v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
24
core/execution/__init__.py
Normal file
24
core/execution/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""
|
||||
Action Execution Module
|
||||
|
||||
Provides classes for executing workflow actions automatically.
|
||||
"""
|
||||
|
||||
from .action_executor import ActionExecutor
|
||||
from .target_resolver import TargetResolver, ResolvedTarget
|
||||
from .error_handler import ErrorHandler, ErrorType, RecoveryStrategy
|
||||
|
||||
# Import tardif pour éviter import circulaire avec pipeline
|
||||
def _get_execution_loop():
|
||||
from .execution_loop import ExecutionLoop, ExecutionMode, ExecutionState, create_execution_loop
|
||||
return ExecutionLoop, ExecutionMode, ExecutionState, create_execution_loop
|
||||
|
||||
__all__ = [
|
||||
'ActionExecutor',
|
||||
'TargetResolver',
|
||||
'ResolvedTarget',
|
||||
'ErrorHandler',
|
||||
'ErrorType',
|
||||
'RecoveryStrategy',
|
||||
# ExecutionLoop accessible via import direct du module
|
||||
]
|
||||
1172
core/execution/action_executor.py
Normal file
1172
core/execution/action_executor.py
Normal file
File diff suppressed because it is too large
Load Diff
366
core/execution/computation_cache.py
Normal file
366
core/execution/computation_cache.py
Normal file
@@ -0,0 +1,366 @@
|
||||
"""
|
||||
ComputationCache - Cache intelligent pour calculs redondants
|
||||
|
||||
Tâche 5.4: Optimiser les calculs redondants dans TargetResolver.
|
||||
Cache les calculs de distance, alignement et relations spatiales.
|
||||
|
||||
Auteur : Dom, Alice Kiro - 20 décembre 2024
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Dict, Tuple, Any, Optional, Callable
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
import hashlib
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComputationCacheStats:
|
||||
"""Statistiques du cache de calculs"""
|
||||
hits: int = 0
|
||||
misses: int = 0
|
||||
total_time_saved_ms: float = 0.0
|
||||
cache_size: int = 0
|
||||
|
||||
|
||||
class ComputationCache:
|
||||
"""
|
||||
Cache intelligent pour calculs redondants.
|
||||
|
||||
Tâche 5.4: Évite les recalculs coûteux de distance, alignement, etc.
|
||||
Réutilise les résultats entre les résolutions d'ancres multiples.
|
||||
"""
|
||||
|
||||
def __init__(self, max_size: int = 1000):
|
||||
"""
|
||||
Initialiser le cache de calculs.
|
||||
|
||||
Args:
|
||||
max_size: Taille maximale du cache
|
||||
"""
|
||||
self.max_size = max_size
|
||||
|
||||
# Caches spécialisés
|
||||
self._distance_cache: Dict[str, float] = {}
|
||||
self._alignment_cache: Dict[str, float] = {}
|
||||
self._spatial_relation_cache: Dict[str, bool] = {}
|
||||
self._bbox_operation_cache: Dict[str, Any] = {}
|
||||
|
||||
# Stats
|
||||
self._stats = ComputationCacheStats()
|
||||
|
||||
logger.debug(f"ComputationCache initialized (max_size={max_size})")
|
||||
|
||||
def _make_key(self, *args) -> str:
|
||||
"""
|
||||
Créer une clé de cache depuis des arguments.
|
||||
|
||||
Args:
|
||||
*args: Arguments à hasher
|
||||
|
||||
Returns:
|
||||
Clé de cache unique
|
||||
"""
|
||||
# Convertir les arguments en string hashable
|
||||
key_parts = []
|
||||
for arg in args:
|
||||
if hasattr(arg, 'element_id'):
|
||||
key_parts.append(arg.element_id)
|
||||
elif isinstance(arg, (tuple, list)):
|
||||
key_parts.append(str(tuple(arg)))
|
||||
else:
|
||||
key_parts.append(str(arg))
|
||||
|
||||
key_str = '|'.join(key_parts)
|
||||
|
||||
# Hasher pour clés longues
|
||||
if len(key_str) > 100:
|
||||
return hashlib.md5(key_str.encode()).hexdigest()
|
||||
return key_str
|
||||
|
||||
def get_distance(self,
|
||||
elem1_id: str,
|
||||
elem2_id: str,
|
||||
compute_func: Callable[[], float]) -> float:
|
||||
"""
|
||||
Obtenir la distance entre deux éléments avec cache.
|
||||
|
||||
Args:
|
||||
elem1_id: ID du premier élément
|
||||
elem2_id: ID du deuxième élément
|
||||
compute_func: Fonction pour calculer la distance si absent du cache
|
||||
|
||||
Returns:
|
||||
Distance calculée ou depuis le cache
|
||||
"""
|
||||
# Clé symétrique (distance(A,B) = distance(B,A))
|
||||
key = self._make_key(min(elem1_id, elem2_id), max(elem1_id, elem2_id), 'dist')
|
||||
|
||||
if key in self._distance_cache:
|
||||
self._stats.hits += 1
|
||||
return self._distance_cache[key]
|
||||
|
||||
# Cache miss - calculer
|
||||
self._stats.misses += 1
|
||||
start_time = time.perf_counter()
|
||||
|
||||
distance = compute_func()
|
||||
|
||||
compute_time = (time.perf_counter() - start_time) * 1000
|
||||
self._stats.total_time_saved_ms += compute_time
|
||||
|
||||
# Ajouter au cache avec éviction si nécessaire
|
||||
self._distance_cache[key] = distance
|
||||
self._ensure_cache_size(self._distance_cache)
|
||||
|
||||
return distance
|
||||
|
||||
def get_alignment_score(self,
|
||||
elem_id: str,
|
||||
anchor_id: str,
|
||||
hint_type: str,
|
||||
compute_func: Callable[[], float]) -> float:
|
||||
"""
|
||||
Obtenir le score d'alignement avec cache.
|
||||
|
||||
Args:
|
||||
elem_id: ID de l'élément
|
||||
anchor_id: ID de l'ancre
|
||||
hint_type: Type de hint (below, right_of, etc.)
|
||||
compute_func: Fonction pour calculer l'alignement
|
||||
|
||||
Returns:
|
||||
Score d'alignement
|
||||
"""
|
||||
key = self._make_key(elem_id, anchor_id, hint_type, 'align')
|
||||
|
||||
if key in self._alignment_cache:
|
||||
self._stats.hits += 1
|
||||
return self._alignment_cache[key]
|
||||
|
||||
# Cache miss
|
||||
self._stats.misses += 1
|
||||
start_time = time.perf_counter()
|
||||
|
||||
score = compute_func()
|
||||
|
||||
compute_time = (time.perf_counter() - start_time) * 1000
|
||||
self._stats.total_time_saved_ms += compute_time
|
||||
|
||||
self._alignment_cache[key] = score
|
||||
self._ensure_cache_size(self._alignment_cache)
|
||||
|
||||
return score
|
||||
|
||||
def get_spatial_relation(self,
|
||||
elem_id: str,
|
||||
anchor_id: str,
|
||||
relation_type: str,
|
||||
compute_func: Callable[[], bool]) -> bool:
|
||||
"""
|
||||
Obtenir une relation spatiale avec cache.
|
||||
|
||||
Args:
|
||||
elem_id: ID de l'élément
|
||||
anchor_id: ID de l'ancre
|
||||
relation_type: Type de relation (below, above, etc.)
|
||||
compute_func: Fonction pour calculer la relation
|
||||
|
||||
Returns:
|
||||
True si la relation est vérifiée
|
||||
"""
|
||||
key = self._make_key(elem_id, anchor_id, relation_type, 'spatial')
|
||||
|
||||
if key in self._spatial_relation_cache:
|
||||
self._stats.hits += 1
|
||||
return self._spatial_relation_cache[key]
|
||||
|
||||
# Cache miss
|
||||
self._stats.misses += 1
|
||||
result = compute_func()
|
||||
|
||||
self._spatial_relation_cache[key] = result
|
||||
self._ensure_cache_size(self._spatial_relation_cache)
|
||||
|
||||
return result
|
||||
|
||||
def get_bbox_operation(self,
|
||||
operation: str,
|
||||
*bbox_ids,
|
||||
compute_func: Callable[[], Any]) -> Any:
|
||||
"""
|
||||
Obtenir le résultat d'une opération bbox avec cache.
|
||||
|
||||
Args:
|
||||
operation: Type d'opération (intersection, union, contains, etc.)
|
||||
*bbox_ids: IDs des bboxes impliquées
|
||||
compute_func: Fonction pour calculer l'opération
|
||||
|
||||
Returns:
|
||||
Résultat de l'opération
|
||||
"""
|
||||
key = self._make_key(operation, *bbox_ids, 'bbox_op')
|
||||
|
||||
if key in self._bbox_operation_cache:
|
||||
self._stats.hits += 1
|
||||
return self._bbox_operation_cache[key]
|
||||
|
||||
# Cache miss
|
||||
self._stats.misses += 1
|
||||
start_time = time.perf_counter()
|
||||
|
||||
result = compute_func()
|
||||
|
||||
compute_time = (time.perf_counter() - start_time) * 1000
|
||||
self._stats.total_time_saved_ms += compute_time
|
||||
|
||||
self._bbox_operation_cache[key] = result
|
||||
self._ensure_cache_size(self._bbox_operation_cache)
|
||||
|
||||
return result
|
||||
|
||||
def _ensure_cache_size(self, cache: Dict) -> None:
|
||||
"""
|
||||
S'assurer que le cache ne dépasse pas la taille max.
|
||||
|
||||
Args:
|
||||
cache: Cache à vérifier
|
||||
"""
|
||||
if len(cache) > self.max_size:
|
||||
# Éviction FIFO simple (supprimer les plus anciennes entrées)
|
||||
keys_to_remove = list(cache.keys())[:len(cache) - self.max_size]
|
||||
for key in keys_to_remove:
|
||||
del cache[key]
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Vider tous les caches"""
|
||||
self._distance_cache.clear()
|
||||
self._alignment_cache.clear()
|
||||
self._spatial_relation_cache.clear()
|
||||
self._bbox_operation_cache.clear()
|
||||
|
||||
logger.debug("ComputationCache cleared")
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Obtenir les statistiques du cache"""
|
||||
total_cache_size = (
|
||||
len(self._distance_cache) +
|
||||
len(self._alignment_cache) +
|
||||
len(self._spatial_relation_cache) +
|
||||
len(self._bbox_operation_cache)
|
||||
)
|
||||
|
||||
total_requests = self._stats.hits + self._stats.misses
|
||||
hit_rate = (self._stats.hits / total_requests * 100) if total_requests > 0 else 0.0
|
||||
|
||||
return {
|
||||
'hits': self._stats.hits,
|
||||
'misses': self._stats.misses,
|
||||
'hit_rate_percent': round(hit_rate, 2),
|
||||
'total_time_saved_ms': round(self._stats.total_time_saved_ms, 2),
|
||||
'cache_sizes': {
|
||||
'distance': len(self._distance_cache),
|
||||
'alignment': len(self._alignment_cache),
|
||||
'spatial_relation': len(self._spatial_relation_cache),
|
||||
'bbox_operation': len(self._bbox_operation_cache),
|
||||
'total': total_cache_size
|
||||
},
|
||||
'max_size': self.max_size
|
||||
}
|
||||
|
||||
|
||||
# Fonctions utilitaires avec cache LRU intégré
|
||||
|
||||
@lru_cache(maxsize=512)
|
||||
def cached_bbox_center(bbox_tuple: Tuple[int, int, int, int]) -> Tuple[float, float]:
|
||||
"""
|
||||
Calculer le centre d'une bbox avec cache LRU.
|
||||
|
||||
Args:
|
||||
bbox_tuple: (x, y, w, h)
|
||||
|
||||
Returns:
|
||||
(center_x, center_y)
|
||||
"""
|
||||
x, y, w, h = bbox_tuple
|
||||
return (float(x + w / 2), float(y + h / 2))
|
||||
|
||||
|
||||
@lru_cache(maxsize=512)
|
||||
def cached_bbox_area(bbox_tuple: Tuple[int, int, int, int]) -> float:
|
||||
"""
|
||||
Calculer l'aire d'une bbox avec cache LRU.
|
||||
|
||||
Args:
|
||||
bbox_tuple: (x, y, w, h)
|
||||
|
||||
Returns:
|
||||
Aire en pixels
|
||||
"""
|
||||
x, y, w, h = bbox_tuple
|
||||
return float(w * h)
|
||||
|
||||
|
||||
@lru_cache(maxsize=512)
|
||||
def cached_bbox_iou(bbox1: Tuple[int, int, int, int],
|
||||
bbox2: Tuple[int, int, int, int]) -> float:
|
||||
"""
|
||||
Calculer l'IoU entre deux bboxes avec cache LRU.
|
||||
|
||||
Args:
|
||||
bbox1: (x, y, w, h)
|
||||
bbox2: (x, y, w, h)
|
||||
|
||||
Returns:
|
||||
IoU dans [0, 1]
|
||||
"""
|
||||
x1, y1, w1, h1 = bbox1
|
||||
x2, y2, w2, h2 = bbox2
|
||||
|
||||
# Intersection
|
||||
x_left = max(x1, x2)
|
||||
y_top = max(y1, y2)
|
||||
x_right = min(x1 + w1, x2 + w2)
|
||||
y_bottom = min(y1 + h1, y2 + h2)
|
||||
|
||||
if x_right < x_left or y_bottom < y_top:
|
||||
return 0.0
|
||||
|
||||
intersection = (x_right - x_left) * (y_bottom - y_top)
|
||||
|
||||
# Union
|
||||
area1 = w1 * h1
|
||||
area2 = w2 * h2
|
||||
union = area1 + area2 - intersection
|
||||
|
||||
return float(intersection / union) if union > 0 else 0.0
|
||||
|
||||
|
||||
@lru_cache(maxsize=512)
|
||||
def cached_euclidean_distance(point1: Tuple[float, float],
|
||||
point2: Tuple[float, float]) -> float:
|
||||
"""
|
||||
Calculer la distance euclidienne avec cache LRU.
|
||||
|
||||
Args:
|
||||
point1: (x1, y1)
|
||||
point2: (x2, y2)
|
||||
|
||||
Returns:
|
||||
Distance euclidienne
|
||||
"""
|
||||
x1, y1 = point1
|
||||
x2, y2 = point2
|
||||
return float(((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5)
|
||||
|
||||
|
||||
def clear_all_lru_caches() -> None:
|
||||
"""Vider tous les caches LRU des fonctions utilitaires"""
|
||||
cached_bbox_center.cache_clear()
|
||||
cached_bbox_area.cache_clear()
|
||||
cached_bbox_iou.cache_clear()
|
||||
cached_euclidean_distance.cache_clear()
|
||||
logger.debug("All LRU caches cleared")
|
||||
718
core/execution/execution_robustness.py
Normal file
718
core/execution/execution_robustness.py
Normal file
@@ -0,0 +1,718 @@
|
||||
"""
|
||||
ExecutionRobustness - Robustesse d'exécution avec retry et récupération
|
||||
|
||||
Ce module ajoute:
|
||||
- Retry avec backoff exponentiel
|
||||
- Attente d'élément avec re-détection
|
||||
- Récupération d'état après échec
|
||||
- Gestion d'écran inconnu
|
||||
- Diagnostics détaillés d'échec
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional, Dict, Any, Callable, List, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Dataclasses
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class RetryConfig:
|
||||
"""Configuration des retries"""
|
||||
max_retries: int = 3
|
||||
base_delay_ms: float = 1000.0 # Délai de base en ms
|
||||
max_delay_ms: float = 30000.0 # Délai max en ms
|
||||
exponential_base: float = 2.0 # Base pour backoff exponentiel
|
||||
jitter_factor: float = 0.1 # Facteur de jitter (0-1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class WaitConfig:
|
||||
"""Configuration de l'attente d'élément"""
|
||||
timeout_ms: float = 10000.0 # Timeout total
|
||||
poll_interval_ms: float = 500.0 # Intervalle de re-détection
|
||||
min_confidence: float = 0.7 # Confiance minimum
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryConfig:
|
||||
"""Configuration de la récupération"""
|
||||
enable_state_recovery: bool = True
|
||||
max_recovery_attempts: int = 3
|
||||
recovery_timeout_ms: float = 30000.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class RetryResult:
|
||||
"""Résultat d'une opération avec retry"""
|
||||
success: bool
|
||||
attempts: int
|
||||
total_delay_ms: float
|
||||
last_error: Optional[Exception] = None
|
||||
result: Any = None
|
||||
delays_used: List[float] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class WaitResult:
|
||||
"""Résultat d'une attente d'élément"""
|
||||
found: bool
|
||||
element: Any = None
|
||||
confidence: float = 0.0
|
||||
wait_time_ms: float = 0.0
|
||||
detection_attempts: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryResult:
|
||||
"""Résultat d'une tentative de récupération"""
|
||||
recovered: bool
|
||||
new_node_id: Optional[str] = None
|
||||
recovery_path: List[str] = field(default_factory=list)
|
||||
message: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class FailureDiagnostics:
|
||||
"""Diagnostics détaillés d'un échec"""
|
||||
failure_type: str
|
||||
timestamp: datetime
|
||||
screenshot_path: Optional[str] = None
|
||||
match_scores: Dict[str, float] = field(default_factory=dict)
|
||||
attempted_strategies: List[str] = field(default_factory=list)
|
||||
context: Dict[str, Any] = field(default_factory=dict)
|
||||
recommendations: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"failure_type": self.failure_type,
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"screenshot_path": self.screenshot_path,
|
||||
"match_scores": self.match_scores,
|
||||
"attempted_strategies": self.attempted_strategies,
|
||||
"context": self.context,
|
||||
"recommendations": self.recommendations
|
||||
}
|
||||
|
||||
|
||||
class FailureType(Enum):
|
||||
"""Types d'échec"""
|
||||
ACTION_FAILED = "action_failed"
|
||||
ELEMENT_NOT_FOUND = "element_not_found"
|
||||
STATE_MISMATCH = "state_mismatch"
|
||||
UNKNOWN_SCREEN = "unknown_screen"
|
||||
TIMEOUT = "timeout"
|
||||
NETWORK_ERROR = "network_error"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Gestionnaire de Retry
|
||||
# =============================================================================
|
||||
|
||||
class RetryManager:
|
||||
"""
|
||||
Gestionnaire de retry avec backoff exponentiel.
|
||||
|
||||
Formule: delay = base_delay * (exponential_base ^ (attempt - 1))
|
||||
|
||||
Example:
|
||||
>>> manager = RetryManager()
|
||||
>>> result = manager.execute_with_retry(my_function, args)
|
||||
"""
|
||||
|
||||
def __init__(self, config: Optional[RetryConfig] = None):
|
||||
"""
|
||||
Initialiser le gestionnaire.
|
||||
|
||||
Args:
|
||||
config: Configuration des retries
|
||||
"""
|
||||
self.config = config or RetryConfig()
|
||||
logger.info(f"RetryManager initialisé (max_retries={self.config.max_retries})")
|
||||
|
||||
def execute_with_retry(
|
||||
self,
|
||||
func: Callable,
|
||||
*args,
|
||||
on_retry: Optional[Callable[[int, Exception], None]] = None,
|
||||
**kwargs
|
||||
) -> RetryResult:
|
||||
"""
|
||||
Exécuter une fonction avec retry et backoff exponentiel.
|
||||
|
||||
Args:
|
||||
func: Fonction à exécuter
|
||||
*args: Arguments positionnels
|
||||
on_retry: Callback appelé à chaque retry
|
||||
**kwargs: Arguments nommés
|
||||
|
||||
Returns:
|
||||
RetryResult avec résultat ou erreur
|
||||
"""
|
||||
attempts = 0
|
||||
total_delay = 0.0
|
||||
delays_used = []
|
||||
last_error = None
|
||||
|
||||
while attempts <= self.config.max_retries:
|
||||
attempts += 1
|
||||
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
return RetryResult(
|
||||
success=True,
|
||||
attempts=attempts,
|
||||
total_delay_ms=total_delay,
|
||||
result=result,
|
||||
delays_used=delays_used
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
logger.warning(f"Tentative {attempts} échouée: {e}")
|
||||
|
||||
if attempts > self.config.max_retries:
|
||||
break
|
||||
|
||||
# Calculer délai avec backoff exponentiel
|
||||
delay = self.compute_delay(attempts)
|
||||
delays_used.append(delay)
|
||||
total_delay += delay
|
||||
|
||||
# Callback de retry
|
||||
if on_retry:
|
||||
try:
|
||||
on_retry(attempts, e)
|
||||
except Exception as cb_error:
|
||||
logger.warning(f"Erreur callback retry: {cb_error}")
|
||||
|
||||
# Attendre
|
||||
time.sleep(delay / 1000.0)
|
||||
|
||||
return RetryResult(
|
||||
success=False,
|
||||
attempts=attempts,
|
||||
total_delay_ms=total_delay,
|
||||
last_error=last_error,
|
||||
delays_used=delays_used
|
||||
)
|
||||
|
||||
def compute_delay(self, attempt: int) -> float:
|
||||
"""
|
||||
Calculer le délai pour une tentative donnée.
|
||||
|
||||
Formule: base_delay * (exponential_base ^ (attempt - 1)) + jitter
|
||||
|
||||
Args:
|
||||
attempt: Numéro de tentative (1-based)
|
||||
|
||||
Returns:
|
||||
Délai en millisecondes
|
||||
"""
|
||||
# Backoff exponentiel
|
||||
delay = self.config.base_delay_ms * (
|
||||
self.config.exponential_base ** (attempt - 1)
|
||||
)
|
||||
|
||||
# Appliquer jitter
|
||||
import random
|
||||
jitter = delay * self.config.jitter_factor * random.random()
|
||||
delay += jitter
|
||||
|
||||
# Plafonner au max
|
||||
delay = min(delay, self.config.max_delay_ms)
|
||||
|
||||
return delay
|
||||
|
||||
def get_expected_delays(self) -> List[float]:
|
||||
"""
|
||||
Obtenir les délais attendus pour chaque tentative.
|
||||
|
||||
Returns:
|
||||
Liste des délais en ms
|
||||
"""
|
||||
delays = []
|
||||
for attempt in range(1, self.config.max_retries + 1):
|
||||
delay = self.config.base_delay_ms * (
|
||||
self.config.exponential_base ** (attempt - 1)
|
||||
)
|
||||
delay = min(delay, self.config.max_delay_ms)
|
||||
delays.append(delay)
|
||||
return delays
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Gestionnaire d'Attente d'Élément
|
||||
# =============================================================================
|
||||
|
||||
class ElementWaiter:
|
||||
"""
|
||||
Gestionnaire d'attente d'élément avec re-détection périodique.
|
||||
|
||||
Example:
|
||||
>>> waiter = ElementWaiter()
|
||||
>>> result = waiter.wait_for_element(detector, target_spec)
|
||||
"""
|
||||
|
||||
def __init__(self, config: Optional[WaitConfig] = None):
|
||||
"""
|
||||
Initialiser le gestionnaire.
|
||||
|
||||
Args:
|
||||
config: Configuration de l'attente
|
||||
"""
|
||||
self.config = config or WaitConfig()
|
||||
logger.info(f"ElementWaiter initialisé (timeout={self.config.timeout_ms}ms)")
|
||||
|
||||
def wait_for_element(
|
||||
self,
|
||||
detect_func: Callable[[], Optional[Any]],
|
||||
confidence_func: Optional[Callable[[Any], float]] = None,
|
||||
on_poll: Optional[Callable[[int], None]] = None
|
||||
) -> WaitResult:
|
||||
"""
|
||||
Attendre qu'un élément soit détecté.
|
||||
|
||||
Args:
|
||||
detect_func: Fonction de détection (retourne élément ou None)
|
||||
confidence_func: Fonction pour obtenir la confiance
|
||||
on_poll: Callback à chaque tentative de détection
|
||||
|
||||
Returns:
|
||||
WaitResult avec élément trouvé ou timeout
|
||||
"""
|
||||
start_time = time.time()
|
||||
attempts = 0
|
||||
|
||||
while True:
|
||||
attempts += 1
|
||||
elapsed_ms = (time.time() - start_time) * 1000
|
||||
|
||||
# Vérifier timeout
|
||||
if elapsed_ms >= self.config.timeout_ms:
|
||||
return WaitResult(
|
||||
found=False,
|
||||
wait_time_ms=elapsed_ms,
|
||||
detection_attempts=attempts
|
||||
)
|
||||
|
||||
# Callback de poll
|
||||
if on_poll:
|
||||
try:
|
||||
on_poll(attempts)
|
||||
except Exception as e:
|
||||
logger.warning(f"Erreur callback poll: {e}")
|
||||
|
||||
# Tenter détection
|
||||
try:
|
||||
element = detect_func()
|
||||
|
||||
if element is not None:
|
||||
# Vérifier confiance si fonction fournie
|
||||
confidence = 1.0
|
||||
if confidence_func:
|
||||
confidence = confidence_func(element)
|
||||
|
||||
if confidence >= self.config.min_confidence:
|
||||
return WaitResult(
|
||||
found=True,
|
||||
element=element,
|
||||
confidence=confidence,
|
||||
wait_time_ms=elapsed_ms,
|
||||
detection_attempts=attempts
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Erreur détection (tentative {attempts}): {e}")
|
||||
|
||||
# Attendre avant prochaine tentative
|
||||
time.sleep(self.config.poll_interval_ms / 1000.0)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Gestionnaire de Récupération d'État
|
||||
# =============================================================================
|
||||
|
||||
class StateRecoveryManager:
|
||||
"""
|
||||
Gestionnaire de récupération d'état après échec.
|
||||
|
||||
Tente de re-matcher l'état actuel vers le graphe de workflow
|
||||
et trouve un chemin de récupération si possible.
|
||||
|
||||
Example:
|
||||
>>> recovery = StateRecoveryManager(pipeline)
|
||||
>>> result = recovery.attempt_recovery(workflow_id, screenshot)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pipeline: Any,
|
||||
config: Optional[RecoveryConfig] = None
|
||||
):
|
||||
"""
|
||||
Initialiser le gestionnaire.
|
||||
|
||||
Args:
|
||||
pipeline: WorkflowPipeline pour matching
|
||||
config: Configuration de récupération
|
||||
"""
|
||||
self.pipeline = pipeline
|
||||
self.config = config or RecoveryConfig()
|
||||
logger.info("StateRecoveryManager initialisé")
|
||||
|
||||
def attempt_recovery(
|
||||
self,
|
||||
workflow_id: str,
|
||||
screenshot_path: str,
|
||||
expected_node_id: Optional[str] = None
|
||||
) -> RecoveryResult:
|
||||
"""
|
||||
Tenter de récupérer après un échec.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
screenshot_path: Chemin du screenshot actuel
|
||||
expected_node_id: Node attendu (optionnel)
|
||||
|
||||
Returns:
|
||||
RecoveryResult avec nouveau node ou échec
|
||||
"""
|
||||
if not self.config.enable_state_recovery:
|
||||
return RecoveryResult(
|
||||
recovered=False,
|
||||
message="State recovery disabled"
|
||||
)
|
||||
|
||||
logger.info(f"Tentative de récupération pour workflow {workflow_id}")
|
||||
|
||||
for attempt in range(self.config.max_recovery_attempts):
|
||||
try:
|
||||
# Re-matcher l'état actuel
|
||||
match = self.pipeline.match_current_state(
|
||||
screenshot_path,
|
||||
workflow_id=workflow_id
|
||||
)
|
||||
|
||||
if match and match.get("confidence", 0) > 0.5:
|
||||
new_node_id = match["node_id"]
|
||||
|
||||
# Vérifier si c'est un node valide
|
||||
workflow = self.pipeline.load_workflow(workflow_id)
|
||||
if workflow and any(n.node_id == new_node_id for n in workflow.nodes):
|
||||
|
||||
# Trouver chemin de récupération si node différent
|
||||
recovery_path = []
|
||||
if expected_node_id and new_node_id != expected_node_id:
|
||||
recovery_path = self._find_recovery_path(
|
||||
workflow, new_node_id, expected_node_id
|
||||
)
|
||||
|
||||
return RecoveryResult(
|
||||
recovered=True,
|
||||
new_node_id=new_node_id,
|
||||
recovery_path=recovery_path,
|
||||
message=f"Récupéré vers node {new_node_id}"
|
||||
)
|
||||
|
||||
# Attendre avant prochaine tentative
|
||||
time.sleep(1.0)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Erreur récupération (tentative {attempt + 1}): {e}")
|
||||
|
||||
return RecoveryResult(
|
||||
recovered=False,
|
||||
message="Échec de récupération après toutes les tentatives"
|
||||
)
|
||||
|
||||
def _find_recovery_path(
|
||||
self,
|
||||
workflow: Any,
|
||||
from_node: str,
|
||||
to_node: str
|
||||
) -> List[str]:
|
||||
"""Trouver un chemin entre deux nodes (BFS)."""
|
||||
from collections import deque
|
||||
|
||||
edges = getattr(workflow, 'edges', [])
|
||||
|
||||
# Construire graphe d'adjacence
|
||||
adjacency = {}
|
||||
for edge in edges:
|
||||
if edge.from_node not in adjacency:
|
||||
adjacency[edge.from_node] = []
|
||||
adjacency[edge.from_node].append(edge.to_node)
|
||||
|
||||
# BFS
|
||||
queue = deque([(from_node, [from_node])])
|
||||
visited = {from_node}
|
||||
|
||||
while queue:
|
||||
current, path = queue.popleft()
|
||||
|
||||
if current == to_node:
|
||||
return path
|
||||
|
||||
for neighbor in adjacency.get(current, []):
|
||||
if neighbor not in visited:
|
||||
visited.add(neighbor)
|
||||
queue.append((neighbor, path + [neighbor]))
|
||||
|
||||
return [] # Pas de chemin trouvé
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Gestionnaire de Diagnostics
|
||||
# =============================================================================
|
||||
|
||||
class DiagnosticsManager:
|
||||
"""
|
||||
Gestionnaire de diagnostics détaillés d'échec.
|
||||
|
||||
Collecte et enregistre les informations de diagnostic
|
||||
pour faciliter le débogage.
|
||||
|
||||
Example:
|
||||
>>> diagnostics = DiagnosticsManager()
|
||||
>>> report = diagnostics.create_failure_report(...)
|
||||
"""
|
||||
|
||||
def __init__(self, logs_dir: str = "data/diagnostics"):
|
||||
"""
|
||||
Initialiser le gestionnaire.
|
||||
|
||||
Args:
|
||||
logs_dir: Répertoire pour les logs de diagnostic
|
||||
"""
|
||||
self.logs_dir = Path(logs_dir)
|
||||
self.logs_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._failure_history: List[FailureDiagnostics] = []
|
||||
logger.info(f"DiagnosticsManager initialisé: {logs_dir}")
|
||||
|
||||
def create_failure_report(
|
||||
self,
|
||||
failure_type: FailureType,
|
||||
screenshot_path: Optional[str] = None,
|
||||
match_scores: Optional[Dict[str, float]] = None,
|
||||
attempted_strategies: Optional[List[str]] = None,
|
||||
context: Optional[Dict[str, Any]] = None
|
||||
) -> FailureDiagnostics:
|
||||
"""
|
||||
Créer un rapport de diagnostic d'échec.
|
||||
|
||||
Args:
|
||||
failure_type: Type d'échec
|
||||
screenshot_path: Chemin du screenshot
|
||||
match_scores: Scores de matching par node
|
||||
attempted_strategies: Stratégies tentées
|
||||
context: Contexte additionnel
|
||||
|
||||
Returns:
|
||||
FailureDiagnostics avec recommandations
|
||||
"""
|
||||
# Générer recommandations basées sur le type d'échec
|
||||
recommendations = self._generate_recommendations(
|
||||
failure_type, match_scores, context
|
||||
)
|
||||
|
||||
diagnostics = FailureDiagnostics(
|
||||
failure_type=failure_type.value,
|
||||
timestamp=datetime.now(),
|
||||
screenshot_path=screenshot_path,
|
||||
match_scores=match_scores or {},
|
||||
attempted_strategies=attempted_strategies or [],
|
||||
context=context or {},
|
||||
recommendations=recommendations
|
||||
)
|
||||
|
||||
# Enregistrer dans l'historique
|
||||
self._failure_history.append(diagnostics)
|
||||
|
||||
# Sauvegarder sur disque
|
||||
self._save_diagnostics(diagnostics)
|
||||
|
||||
logger.info(f"Diagnostic créé: {failure_type.value}")
|
||||
return diagnostics
|
||||
|
||||
def _generate_recommendations(
|
||||
self,
|
||||
failure_type: FailureType,
|
||||
match_scores: Optional[Dict[str, float]],
|
||||
context: Optional[Dict[str, Any]]
|
||||
) -> List[str]:
|
||||
"""Générer des recommandations basées sur l'échec."""
|
||||
recommendations = []
|
||||
|
||||
if failure_type == FailureType.ELEMENT_NOT_FOUND:
|
||||
recommendations.append("Vérifier que l'élément cible est visible à l'écran")
|
||||
recommendations.append("Augmenter le timeout d'attente")
|
||||
recommendations.append("Vérifier les sélecteurs de l'élément")
|
||||
|
||||
elif failure_type == FailureType.STATE_MISMATCH:
|
||||
recommendations.append("L'écran actuel ne correspond pas à l'état attendu")
|
||||
if match_scores:
|
||||
best_match = max(match_scores.items(), key=lambda x: x[1])
|
||||
recommendations.append(f"Meilleur match: {best_match[0]} ({best_match[1]:.2%})")
|
||||
recommendations.append("Considérer l'ajout d'une variante pour ce nouvel état")
|
||||
|
||||
elif failure_type == FailureType.UNKNOWN_SCREEN:
|
||||
recommendations.append("Écran non reconnu dans le workflow")
|
||||
recommendations.append("Vérifier si une popup ou modal bloque l'écran")
|
||||
recommendations.append("Considérer l'entraînement avec ce nouvel écran")
|
||||
|
||||
elif failure_type == FailureType.ACTION_FAILED:
|
||||
recommendations.append("L'action n'a pas pu être exécutée")
|
||||
recommendations.append("Vérifier que l'élément cible est cliquable")
|
||||
recommendations.append("Vérifier les permissions de l'application")
|
||||
|
||||
elif failure_type == FailureType.TIMEOUT:
|
||||
recommendations.append("Opération expirée")
|
||||
recommendations.append("Augmenter les timeouts de configuration")
|
||||
recommendations.append("Vérifier la réactivité de l'application")
|
||||
|
||||
return recommendations
|
||||
|
||||
def _save_diagnostics(self, diagnostics: FailureDiagnostics) -> None:
|
||||
"""Sauvegarder les diagnostics sur disque."""
|
||||
import json
|
||||
|
||||
filename = f"failure_{diagnostics.timestamp.strftime('%Y%m%d_%H%M%S')}.json"
|
||||
filepath = self.logs_dir / filename
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
json.dump(diagnostics.to_dict(), f, indent=2)
|
||||
|
||||
def get_failure_history(self) -> List[FailureDiagnostics]:
|
||||
"""Obtenir l'historique des échecs."""
|
||||
return self._failure_history
|
||||
|
||||
def get_failure_stats(self) -> Dict[str, int]:
|
||||
"""Obtenir les statistiques d'échec par type."""
|
||||
stats = {}
|
||||
for diag in self._failure_history:
|
||||
stats[diag.failure_type] = stats.get(diag.failure_type, 0) + 1
|
||||
return stats
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Classe principale de robustesse
|
||||
# =============================================================================
|
||||
|
||||
class ExecutionRobustness:
|
||||
"""
|
||||
Classe principale regroupant toutes les fonctionnalités de robustesse.
|
||||
|
||||
Example:
|
||||
>>> robustness = ExecutionRobustness(pipeline)
|
||||
>>> result = robustness.execute_with_robustness(action_func)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pipeline: Any,
|
||||
retry_config: Optional[RetryConfig] = None,
|
||||
wait_config: Optional[WaitConfig] = None,
|
||||
recovery_config: Optional[RecoveryConfig] = None
|
||||
):
|
||||
"""
|
||||
Initialiser la robustesse d'exécution.
|
||||
|
||||
Args:
|
||||
pipeline: WorkflowPipeline
|
||||
retry_config: Configuration des retries
|
||||
wait_config: Configuration de l'attente
|
||||
recovery_config: Configuration de la récupération
|
||||
"""
|
||||
self.pipeline = pipeline
|
||||
self.retry_manager = RetryManager(retry_config)
|
||||
self.element_waiter = ElementWaiter(wait_config)
|
||||
self.state_recovery = StateRecoveryManager(pipeline, recovery_config)
|
||||
self.diagnostics = DiagnosticsManager()
|
||||
|
||||
logger.info("ExecutionRobustness initialisé")
|
||||
|
||||
def execute_with_robustness(
|
||||
self,
|
||||
action_func: Callable,
|
||||
workflow_id: str,
|
||||
screenshot_path: str,
|
||||
*args,
|
||||
**kwargs
|
||||
) -> Tuple[bool, Any, Optional[FailureDiagnostics]]:
|
||||
"""
|
||||
Exécuter une action avec toutes les protections de robustesse.
|
||||
|
||||
Args:
|
||||
action_func: Fonction d'action à exécuter
|
||||
workflow_id: ID du workflow
|
||||
screenshot_path: Chemin du screenshot actuel
|
||||
*args, **kwargs: Arguments pour action_func
|
||||
|
||||
Returns:
|
||||
Tuple (success, result, diagnostics)
|
||||
"""
|
||||
# Tentative avec retry
|
||||
retry_result = self.retry_manager.execute_with_retry(
|
||||
action_func, *args, **kwargs
|
||||
)
|
||||
|
||||
if retry_result.success:
|
||||
return True, retry_result.result, None
|
||||
|
||||
# Échec - créer diagnostics
|
||||
diagnostics = self.diagnostics.create_failure_report(
|
||||
failure_type=FailureType.ACTION_FAILED,
|
||||
screenshot_path=screenshot_path,
|
||||
context={
|
||||
"attempts": retry_result.attempts,
|
||||
"total_delay_ms": retry_result.total_delay_ms,
|
||||
"error": str(retry_result.last_error)
|
||||
}
|
||||
)
|
||||
|
||||
# Tenter récupération
|
||||
recovery_result = self.state_recovery.attempt_recovery(
|
||||
workflow_id, screenshot_path
|
||||
)
|
||||
|
||||
if recovery_result.recovered:
|
||||
logger.info(f"Récupération réussie: {recovery_result.new_node_id}")
|
||||
return False, recovery_result, diagnostics
|
||||
|
||||
return False, None, diagnostics
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Fonctions utilitaires
|
||||
# =============================================================================
|
||||
|
||||
def create_robustness(
|
||||
pipeline: Any,
|
||||
max_retries: int = 3,
|
||||
base_delay_ms: float = 1000.0
|
||||
) -> ExecutionRobustness:
|
||||
"""
|
||||
Créer une instance de robustesse avec configuration personnalisée.
|
||||
|
||||
Args:
|
||||
pipeline: WorkflowPipeline
|
||||
max_retries: Nombre max de retries
|
||||
base_delay_ms: Délai de base
|
||||
|
||||
Returns:
|
||||
ExecutionRobustness configuré
|
||||
"""
|
||||
retry_config = RetryConfig(
|
||||
max_retries=max_retries,
|
||||
base_delay_ms=base_delay_ms
|
||||
)
|
||||
return ExecutionRobustness(pipeline, retry_config=retry_config)
|
||||
1060
core/execution/memory_cache.py
Normal file
1060
core/execution/memory_cache.py
Normal file
File diff suppressed because it is too large
Load Diff
833
core/execution/recovery_strategies.py
Normal file
833
core/execution/recovery_strategies.py
Normal file
@@ -0,0 +1,833 @@
|
||||
"""
|
||||
Recovery Strategies - Stratégies de récupération pour ErrorHandler
|
||||
|
||||
Ce module implémente les stratégies de récupération spécialisées pour différents types d'erreurs:
|
||||
- SpatialFallbackStrategy pour TargetNotFoundError
|
||||
- SemanticVariantStrategy pour UIElementChangedError
|
||||
- RetryWithBackoffStrategy pour NetworkError
|
||||
- DataNormalizationStrategy pour ValidationError
|
||||
|
||||
Chaque stratégie implémente l'interface BaseRecoveryStrategy et fournit une logique
|
||||
de récupération spécialisée pour son type d'erreur.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Any, Optional, List, Tuple
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from enum import Enum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RecoveryStrategyType(Enum):
|
||||
"""Types de stratégies de récupération"""
|
||||
SPATIAL_FALLBACK = "spatial_fallback"
|
||||
SEMANTIC_VARIANT = "semantic_variant"
|
||||
RETRY_WITH_BACKOFF = "retry_with_backoff"
|
||||
DATA_NORMALIZATION = "data_normalization"
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryContext:
|
||||
"""Contexte pour une tentative de récupération"""
|
||||
error_type: str
|
||||
error_message: str
|
||||
original_data: Dict[str, Any]
|
||||
attempt_number: int
|
||||
max_attempts: int
|
||||
timestamp: datetime
|
||||
additional_context: Dict[str, Any]
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryResult:
|
||||
"""Résultat d'une tentative de récupération"""
|
||||
success: bool
|
||||
should_retry: bool
|
||||
strategy_used: RecoveryStrategyType
|
||||
recovery_data: Dict[str, Any]
|
||||
message: str
|
||||
escalation_reason: Optional[str] = None
|
||||
duration_ms: float = 0.0
|
||||
|
||||
@classmethod
|
||||
def success_with_retry(cls, strategy: RecoveryStrategyType, data: Dict[str, Any],
|
||||
message: str) -> 'RecoveryResult':
|
||||
"""Créer un résultat de succès avec retry recommandé"""
|
||||
return cls(
|
||||
success=True,
|
||||
should_retry=True,
|
||||
strategy_used=strategy,
|
||||
recovery_data=data,
|
||||
message=message
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def success_no_retry(cls, strategy: RecoveryStrategyType, data: Dict[str, Any],
|
||||
message: str) -> 'RecoveryResult':
|
||||
"""Créer un résultat de succès sans retry"""
|
||||
return cls(
|
||||
success=True,
|
||||
should_retry=False,
|
||||
strategy_used=strategy,
|
||||
recovery_data=data,
|
||||
message=message
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def failure_with_escalation(cls, strategy: RecoveryStrategyType, reason: str,
|
||||
message: str) -> 'RecoveryResult':
|
||||
"""Créer un résultat d'échec avec escalade"""
|
||||
return cls(
|
||||
success=False,
|
||||
should_retry=False,
|
||||
strategy_used=strategy,
|
||||
recovery_data={},
|
||||
message=message,
|
||||
escalation_reason=reason
|
||||
)
|
||||
|
||||
|
||||
class BaseRecoveryStrategy(ABC):
|
||||
"""Interface de base pour les stratégies de récupération"""
|
||||
|
||||
def __init__(self, max_attempts: int = 3):
|
||||
self.max_attempts = max_attempts
|
||||
self.strategy_type = None # À définir dans les sous-classes
|
||||
|
||||
@abstractmethod
|
||||
def can_handle(self, error_type: str, context: Dict[str, Any]) -> bool:
|
||||
"""Détermine si cette stratégie peut gérer ce type d'erreur"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def recover(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""Exécute la stratégie de récupération"""
|
||||
pass
|
||||
|
||||
def _log_recovery_attempt(self, context: RecoveryContext, result: RecoveryResult):
|
||||
"""Log une tentative de récupération"""
|
||||
logger.info(
|
||||
f"Recovery attempt {context.attempt_number}/{context.max_attempts} "
|
||||
f"using {self.strategy_type.value}: {result.message}"
|
||||
)
|
||||
|
||||
|
||||
class SpatialFallbackStrategy(BaseRecoveryStrategy):
|
||||
"""
|
||||
Stratégie de récupération spatiale pour TargetNotFoundError
|
||||
|
||||
Utilise des critères spatiaux alternatifs quand un élément cible n'est pas trouvé:
|
||||
- Recherche par position relative (à droite, en dessous, etc.)
|
||||
- Recherche par zone élargie
|
||||
- Recherche par similarité visuelle dans une zone plus large
|
||||
"""
|
||||
|
||||
def __init__(self, max_attempts: int = 3, expand_factor: float = 1.5):
|
||||
super().__init__(max_attempts)
|
||||
self.strategy_type = RecoveryStrategyType.SPATIAL_FALLBACK
|
||||
self.expand_factor = expand_factor
|
||||
|
||||
def can_handle(self, error_type: str, context: Dict[str, Any]) -> bool:
|
||||
"""Peut gérer les erreurs de type TargetNotFoundError"""
|
||||
return error_type in ["TargetNotFoundError", "target_not_found"]
|
||||
|
||||
def recover(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Récupération par fallback spatial
|
||||
|
||||
Stratégies appliquées dans l'ordre:
|
||||
1. Recherche dans une zone élargie
|
||||
2. Recherche par position relative
|
||||
3. Recherche par similarité visuelle élargie
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Extraire les données du contexte
|
||||
target_info = context.original_data.get('target', {})
|
||||
screen_state = context.additional_context.get('screen_state')
|
||||
|
||||
if not target_info or not screen_state:
|
||||
return RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
"Missing target info or screen state",
|
||||
"Cannot perform spatial fallback without target and screen data"
|
||||
)
|
||||
|
||||
# Stratégie 1: Zone élargie
|
||||
if context.attempt_number == 1:
|
||||
recovery_data = self._expand_search_area(target_info, screen_state)
|
||||
message = f"Expanded search area by factor {self.expand_factor}"
|
||||
|
||||
# Stratégie 2: Position relative
|
||||
elif context.attempt_number == 2:
|
||||
recovery_data = self._search_relative_position(target_info, screen_state)
|
||||
message = "Searching by relative position (nearby elements)"
|
||||
|
||||
# Stratégie 3: Similarité visuelle élargie
|
||||
elif context.attempt_number == 3:
|
||||
recovery_data = self._visual_similarity_fallback(target_info, screen_state)
|
||||
message = "Using visual similarity fallback with relaxed criteria"
|
||||
|
||||
else:
|
||||
return RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Max spatial fallback attempts reached ({self.max_attempts})",
|
||||
"All spatial fallback strategies exhausted"
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
result = RecoveryResult.success_with_retry(
|
||||
self.strategy_type,
|
||||
recovery_data,
|
||||
message
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
|
||||
self._log_recovery_attempt(context, result)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
logger.error(f"Spatial fallback strategy failed: {e}")
|
||||
|
||||
result = RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Strategy execution error: {e}",
|
||||
f"Spatial fallback failed with exception: {e}"
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
return result
|
||||
|
||||
def _expand_search_area(self, target_info: Dict[str, Any], screen_state) -> Dict[str, Any]:
|
||||
"""Élargir la zone de recherche"""
|
||||
original_bbox = target_info.get('bbox', {})
|
||||
if not original_bbox:
|
||||
return {'strategy': 'expand_area', 'success': False, 'reason': 'No bbox available'}
|
||||
|
||||
# Élargir la bbox par le facteur d'expansion
|
||||
expanded_bbox = {
|
||||
'x': max(0, original_bbox.get('x', 0) - int(original_bbox.get('width', 0) * (self.expand_factor - 1) / 2)),
|
||||
'y': max(0, original_bbox.get('y', 0) - int(original_bbox.get('height', 0) * (self.expand_factor - 1) / 2)),
|
||||
'width': int(original_bbox.get('width', 0) * self.expand_factor),
|
||||
'height': int(original_bbox.get('height', 0) * self.expand_factor)
|
||||
}
|
||||
|
||||
return {
|
||||
'strategy': 'expand_area',
|
||||
'original_bbox': original_bbox,
|
||||
'expanded_bbox': expanded_bbox,
|
||||
'expand_factor': self.expand_factor
|
||||
}
|
||||
|
||||
def _search_relative_position(self, target_info: Dict[str, Any], screen_state) -> Dict[str, Any]:
|
||||
"""Rechercher par position relative"""
|
||||
# Rechercher des éléments proches qui pourraient servir de référence
|
||||
target_text = target_info.get('text_pattern', '')
|
||||
target_role = target_info.get('role', '')
|
||||
|
||||
relative_positions = ['right', 'below', 'above', 'left']
|
||||
|
||||
return {
|
||||
'strategy': 'relative_position',
|
||||
'target_text': target_text,
|
||||
'target_role': target_role,
|
||||
'search_positions': relative_positions,
|
||||
'search_radius': 100 # pixels
|
||||
}
|
||||
|
||||
def _visual_similarity_fallback(self, target_info: Dict[str, Any], screen_state) -> Dict[str, Any]:
|
||||
"""Fallback par similarité visuelle avec critères relaxés"""
|
||||
return {
|
||||
'strategy': 'visual_similarity',
|
||||
'relaxed_threshold': 0.6, # Seuil plus bas
|
||||
'use_partial_matching': True,
|
||||
'ignore_color_variations': True,
|
||||
'target_info': target_info
|
||||
}
|
||||
|
||||
|
||||
class SemanticVariantStrategy(BaseRecoveryStrategy):
|
||||
"""
|
||||
Stratégie de récupération sémantique pour UIElementChangedError
|
||||
|
||||
Essaie des variantes sémantiques du texte quand un élément UI a changé:
|
||||
- Variantes linguistiques (synonymes, traductions)
|
||||
- Variantes de format (casse, espaces, ponctuation)
|
||||
- Variantes contextuelles (texte partiel, mots-clés)
|
||||
"""
|
||||
|
||||
def __init__(self, max_attempts: int = 3):
|
||||
super().__init__(max_attempts)
|
||||
self.strategy_type = RecoveryStrategyType.SEMANTIC_VARIANT
|
||||
|
||||
# Dictionnaire de synonymes courants
|
||||
self.synonyms = {
|
||||
'submit': ['send', 'confirm', 'ok', 'apply', 'save'],
|
||||
'cancel': ['close', 'abort', 'dismiss', 'back'],
|
||||
'delete': ['remove', 'erase', 'clear'],
|
||||
'edit': ['modify', 'change', 'update'],
|
||||
'search': ['find', 'lookup', 'query'],
|
||||
'login': ['sign in', 'connect', 'authenticate'],
|
||||
'logout': ['sign out', 'disconnect', 'exit']
|
||||
}
|
||||
|
||||
def can_handle(self, error_type: str, context: Dict[str, Any]) -> bool:
|
||||
"""Peut gérer les erreurs de changement d'UI"""
|
||||
return error_type in ["UIElementChangedError", "ui_element_changed", "ui_changed"]
|
||||
|
||||
def recover(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Récupération par variantes sémantiques
|
||||
|
||||
Stratégies appliquées dans l'ordre:
|
||||
1. Variantes de format (casse, espaces)
|
||||
2. Synonymes et variantes linguistiques
|
||||
3. Correspondance partielle et mots-clés
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Extraire le texte original
|
||||
original_text = context.original_data.get('text_pattern', '')
|
||||
if not original_text:
|
||||
return RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
"No text pattern available",
|
||||
"Cannot generate semantic variants without original text"
|
||||
)
|
||||
|
||||
# Générer variantes selon le numéro de tentative
|
||||
if context.attempt_number == 1:
|
||||
variants = self._generate_format_variants(original_text)
|
||||
message = f"Generated {len(variants)} format variants"
|
||||
|
||||
elif context.attempt_number == 2:
|
||||
variants = self._generate_semantic_variants(original_text)
|
||||
message = f"Generated {len(variants)} semantic variants"
|
||||
|
||||
elif context.attempt_number == 3:
|
||||
variants = self._generate_partial_variants(original_text)
|
||||
message = f"Generated {len(variants)} partial matching variants"
|
||||
|
||||
else:
|
||||
return RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Max semantic variant attempts reached ({self.max_attempts})",
|
||||
"All semantic variant strategies exhausted"
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
recovery_data = {
|
||||
'original_text': original_text,
|
||||
'variants': variants,
|
||||
'strategy_type': f'attempt_{context.attempt_number}'
|
||||
}
|
||||
|
||||
result = RecoveryResult.success_with_retry(
|
||||
self.strategy_type,
|
||||
recovery_data,
|
||||
message
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
|
||||
self._log_recovery_attempt(context, result)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
logger.error(f"Semantic variant strategy failed: {e}")
|
||||
|
||||
result = RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Strategy execution error: {e}",
|
||||
f"Semantic variant generation failed: {e}"
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
return result
|
||||
|
||||
def _generate_format_variants(self, text: str) -> List[str]:
|
||||
"""Générer des variantes de format"""
|
||||
variants = []
|
||||
|
||||
# Variantes de casse
|
||||
variants.extend([
|
||||
text.lower(),
|
||||
text.upper(),
|
||||
text.title(),
|
||||
text.capitalize()
|
||||
])
|
||||
|
||||
# Variantes d'espaces et ponctuation
|
||||
variants.extend([
|
||||
text.strip(),
|
||||
text.replace(' ', ''),
|
||||
text.replace('-', ' '),
|
||||
text.replace('_', ' '),
|
||||
re.sub(r'[^\w\s]', '', text), # Supprimer ponctuation
|
||||
re.sub(r'\s+', ' ', text) # Normaliser espaces
|
||||
])
|
||||
|
||||
# Supprimer doublons et texte vide
|
||||
return list(filter(None, set(variants)))
|
||||
|
||||
def _generate_semantic_variants(self, text: str) -> List[str]:
|
||||
"""Générer des variantes sémantiques"""
|
||||
variants = []
|
||||
text_lower = text.lower()
|
||||
|
||||
# Chercher des synonymes
|
||||
for word, synonyms in self.synonyms.items():
|
||||
if word in text_lower:
|
||||
for synonym in synonyms:
|
||||
variants.append(text_lower.replace(word, synonym))
|
||||
|
||||
# Variantes communes
|
||||
common_replacements = {
|
||||
'btn': 'button',
|
||||
'button': 'btn',
|
||||
'&': 'and',
|
||||
'and': '&',
|
||||
'ok': 'okay',
|
||||
'okay': 'ok'
|
||||
}
|
||||
|
||||
for old, new in common_replacements.items():
|
||||
if old in text_lower:
|
||||
variants.append(text_lower.replace(old, new))
|
||||
|
||||
return variants
|
||||
|
||||
def _generate_partial_variants(self, text: str) -> List[str]:
|
||||
"""Générer des variantes de correspondance partielle"""
|
||||
variants = []
|
||||
words = text.split()
|
||||
|
||||
if len(words) > 1:
|
||||
# Mots individuels
|
||||
variants.extend(words)
|
||||
|
||||
# Combinaisons de mots
|
||||
for i in range(len(words)):
|
||||
for j in range(i + 1, len(words) + 1):
|
||||
variants.append(' '.join(words[i:j]))
|
||||
|
||||
# Préfixes et suffixes
|
||||
if len(text) > 3:
|
||||
variants.extend([
|
||||
text[:len(text)//2], # Première moitié
|
||||
text[len(text)//2:], # Deuxième moitié
|
||||
text[:3], # 3 premiers caractères
|
||||
text[-3:] # 3 derniers caractères
|
||||
])
|
||||
|
||||
return list(filter(lambda x: len(x) > 1, set(variants)))
|
||||
|
||||
|
||||
class RetryWithBackoffStrategy(BaseRecoveryStrategy):
|
||||
"""
|
||||
Stratégie de retry avec backoff exponentiel pour NetworkError
|
||||
|
||||
Implémente un retry intelligent avec délais croissants pour les erreurs réseau:
|
||||
- Backoff exponentiel avec jitter
|
||||
- Détection de types d'erreurs réseau
|
||||
- Adaptation du délai selon le type d'erreur
|
||||
"""
|
||||
|
||||
def __init__(self, max_attempts: int = 5, base_delay: float = 1.0, max_delay: float = 60.0):
|
||||
super().__init__(max_attempts)
|
||||
self.strategy_type = RecoveryStrategyType.RETRY_WITH_BACKOFF
|
||||
self.base_delay = base_delay
|
||||
self.max_delay = max_delay
|
||||
|
||||
def can_handle(self, error_type: str, context: Dict[str, Any]) -> bool:
|
||||
"""Peut gérer les erreurs réseau"""
|
||||
network_errors = [
|
||||
"NetworkError", "ConnectionError", "TimeoutError", "HTTPError",
|
||||
"network_error", "connection_error", "timeout_error", "http_error"
|
||||
]
|
||||
return error_type in network_errors
|
||||
|
||||
def recover(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Récupération par retry avec backoff
|
||||
|
||||
Calcule le délai approprié et recommande un retry après attente
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
if context.attempt_number > self.max_attempts:
|
||||
return RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Max retry attempts reached ({self.max_attempts})",
|
||||
"Network retry limit exceeded"
|
||||
)
|
||||
|
||||
# Calculer délai avec backoff exponentiel
|
||||
delay = min(
|
||||
self.base_delay * (2 ** (context.attempt_number - 1)),
|
||||
self.max_delay
|
||||
)
|
||||
|
||||
# Ajouter jitter (±25%)
|
||||
import random
|
||||
jitter = delay * 0.25 * (random.random() - 0.5)
|
||||
final_delay = max(0.1, delay + jitter)
|
||||
|
||||
# Analyser le type d'erreur pour adapter la stratégie
|
||||
error_analysis = self._analyze_network_error(context.error_message)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
recovery_data = {
|
||||
'delay_seconds': final_delay,
|
||||
'attempt_number': context.attempt_number,
|
||||
'base_delay': self.base_delay,
|
||||
'calculated_delay': delay,
|
||||
'jitter': jitter,
|
||||
'error_analysis': error_analysis
|
||||
}
|
||||
|
||||
message = f"Retry #{context.attempt_number} after {final_delay:.1f}s delay ({error_analysis['category']})"
|
||||
|
||||
result = RecoveryResult.success_with_retry(
|
||||
self.strategy_type,
|
||||
recovery_data,
|
||||
message
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
|
||||
self._log_recovery_attempt(context, result)
|
||||
|
||||
# Attendre le délai calculé
|
||||
logger.info(f"Waiting {final_delay:.1f}s before retry...")
|
||||
time.sleep(final_delay)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
logger.error(f"Retry with backoff strategy failed: {e}")
|
||||
|
||||
result = RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Strategy execution error: {e}",
|
||||
f"Backoff calculation failed: {e}"
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
return result
|
||||
|
||||
def _analyze_network_error(self, error_message: str) -> Dict[str, Any]:
|
||||
"""Analyser le type d'erreur réseau pour adapter la stratégie"""
|
||||
error_lower = error_message.lower()
|
||||
|
||||
# Catégoriser l'erreur
|
||||
if any(term in error_lower for term in ['timeout', 'timed out']):
|
||||
category = 'timeout'
|
||||
severity = 'medium'
|
||||
recommendation = 'Increase timeout and retry'
|
||||
|
||||
elif any(term in error_lower for term in ['connection refused', 'connection failed']):
|
||||
category = 'connection_refused'
|
||||
severity = 'high'
|
||||
recommendation = 'Service may be down, longer backoff recommended'
|
||||
|
||||
elif any(term in error_lower for term in ['dns', 'name resolution']):
|
||||
category = 'dns_error'
|
||||
severity = 'high'
|
||||
recommendation = 'DNS issue, check network connectivity'
|
||||
|
||||
elif any(term in error_lower for term in ['ssl', 'certificate', 'tls']):
|
||||
category = 'ssl_error'
|
||||
severity = 'high'
|
||||
recommendation = 'SSL/TLS issue, may need manual intervention'
|
||||
|
||||
elif any(term in error_lower for term in ['500', '502', '503', '504']):
|
||||
category = 'server_error'
|
||||
severity = 'medium'
|
||||
recommendation = 'Server error, retry with backoff'
|
||||
|
||||
elif any(term in error_lower for term in ['401', '403']):
|
||||
category = 'auth_error'
|
||||
severity = 'high'
|
||||
recommendation = 'Authentication issue, check credentials'
|
||||
|
||||
else:
|
||||
category = 'unknown_network'
|
||||
severity = 'medium'
|
||||
recommendation = 'Generic network error, standard retry'
|
||||
|
||||
return {
|
||||
'category': category,
|
||||
'severity': severity,
|
||||
'recommendation': recommendation,
|
||||
'original_message': error_message
|
||||
}
|
||||
|
||||
|
||||
class DataNormalizationStrategy(BaseRecoveryStrategy):
|
||||
"""
|
||||
Stratégie de normalisation des données pour ValidationError
|
||||
|
||||
Normalise et convertit les données pour résoudre les erreurs de validation:
|
||||
- Conversion de types
|
||||
- Normalisation de formats
|
||||
- Nettoyage de données
|
||||
- Validation et correction automatique
|
||||
"""
|
||||
|
||||
def __init__(self, max_attempts: int = 3):
|
||||
super().__init__(max_attempts)
|
||||
self.strategy_type = RecoveryStrategyType.DATA_NORMALIZATION
|
||||
|
||||
def can_handle(self, error_type: str, context: Dict[str, Any]) -> bool:
|
||||
"""Peut gérer les erreurs de validation"""
|
||||
validation_errors = [
|
||||
"ValidationError", "ValueError", "TypeError", "FormatError",
|
||||
"validation_error", "value_error", "type_error", "format_error"
|
||||
]
|
||||
return error_type in validation_errors
|
||||
|
||||
def recover(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Récupération par normalisation des données
|
||||
|
||||
Stratégies appliquées dans l'ordre:
|
||||
1. Conversion de types automatique
|
||||
2. Normalisation de formats
|
||||
3. Nettoyage et correction de données
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Extraire les données à normaliser
|
||||
invalid_data = context.original_data.get('invalid_data')
|
||||
expected_type = context.original_data.get('expected_type')
|
||||
field_name = context.original_data.get('field_name', 'unknown')
|
||||
|
||||
if invalid_data is None:
|
||||
return RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
"No invalid data provided",
|
||||
"Cannot normalize data without input"
|
||||
)
|
||||
|
||||
# Appliquer stratégie selon tentative
|
||||
if context.attempt_number == 1:
|
||||
normalized_data = self._type_conversion(invalid_data, expected_type)
|
||||
message = f"Applied type conversion for field '{field_name}'"
|
||||
|
||||
elif context.attempt_number == 2:
|
||||
normalized_data = self._format_normalization(invalid_data, expected_type)
|
||||
message = f"Applied format normalization for field '{field_name}'"
|
||||
|
||||
elif context.attempt_number == 3:
|
||||
normalized_data = self._data_cleaning(invalid_data, expected_type)
|
||||
message = f"Applied data cleaning for field '{field_name}'"
|
||||
|
||||
else:
|
||||
return RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Max normalization attempts reached ({self.max_attempts})",
|
||||
"All data normalization strategies exhausted"
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
recovery_data = {
|
||||
'original_data': invalid_data,
|
||||
'normalized_data': normalized_data,
|
||||
'field_name': field_name,
|
||||
'expected_type': expected_type,
|
||||
'normalization_type': f'attempt_{context.attempt_number}'
|
||||
}
|
||||
|
||||
result = RecoveryResult.success_with_retry(
|
||||
self.strategy_type,
|
||||
recovery_data,
|
||||
message
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
|
||||
self._log_recovery_attempt(context, result)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
logger.error(f"Data normalization strategy failed: {e}")
|
||||
|
||||
result = RecoveryResult.failure_with_escalation(
|
||||
self.strategy_type,
|
||||
f"Strategy execution error: {e}",
|
||||
f"Data normalization failed: {e}"
|
||||
)
|
||||
result.duration_ms = duration_ms
|
||||
return result
|
||||
|
||||
def _type_conversion(self, data: Any, expected_type: Optional[str]) -> Any:
|
||||
"""Conversion de type automatique"""
|
||||
if expected_type is None:
|
||||
return data
|
||||
|
||||
try:
|
||||
if expected_type == 'int':
|
||||
if isinstance(data, str):
|
||||
# Nettoyer les caractères non-numériques
|
||||
cleaned = re.sub(r'[^\d.-]', '', data)
|
||||
return int(float(cleaned)) if cleaned else 0
|
||||
return int(data)
|
||||
|
||||
elif expected_type == 'float':
|
||||
if isinstance(data, str):
|
||||
cleaned = re.sub(r'[^\d.-]', '', data)
|
||||
return float(cleaned) if cleaned else 0.0
|
||||
return float(data)
|
||||
|
||||
elif expected_type == 'str':
|
||||
return str(data)
|
||||
|
||||
elif expected_type == 'bool':
|
||||
if isinstance(data, str):
|
||||
return data.lower() in ['true', '1', 'yes', 'on', 'enabled']
|
||||
return bool(data)
|
||||
|
||||
elif expected_type == 'datetime':
|
||||
from datetime import datetime
|
||||
if isinstance(data, str):
|
||||
# Essayer plusieurs formats de date
|
||||
formats = [
|
||||
'%Y-%m-%d %H:%M:%S',
|
||||
'%Y-%m-%d',
|
||||
'%d/%m/%Y',
|
||||
'%m/%d/%Y',
|
||||
'%Y-%m-%dT%H:%M:%S'
|
||||
]
|
||||
for fmt in formats:
|
||||
try:
|
||||
return datetime.strptime(data, fmt)
|
||||
except ValueError:
|
||||
continue
|
||||
return data
|
||||
|
||||
except (ValueError, TypeError) as e:
|
||||
logger.warning(f"Type conversion failed: {e}")
|
||||
|
||||
return data
|
||||
|
||||
def _format_normalization(self, data: Any, expected_type: Optional[str]) -> Any:
|
||||
"""Normalisation de format"""
|
||||
if not isinstance(data, str):
|
||||
return data
|
||||
|
||||
# Normalisation générale des chaînes
|
||||
normalized = data.strip()
|
||||
|
||||
# Normalisation spécifique selon le type attendu
|
||||
if expected_type == 'email':
|
||||
normalized = normalized.lower()
|
||||
|
||||
elif expected_type == 'phone':
|
||||
# Supprimer tous les caractères non-numériques sauf +
|
||||
normalized = re.sub(r'[^\d+]', '', normalized)
|
||||
|
||||
elif expected_type == 'url':
|
||||
if not normalized.startswith(('http://', 'https://')):
|
||||
normalized = 'https://' + normalized
|
||||
|
||||
elif expected_type in ['bbox', 'coordinates']:
|
||||
# Normaliser les coordonnées au format (x,y,w,h)
|
||||
numbers = re.findall(r'-?\d+\.?\d*', normalized)
|
||||
if len(numbers) >= 4:
|
||||
normalized = f"({numbers[0]},{numbers[1]},{numbers[2]},{numbers[3]})"
|
||||
|
||||
return normalized
|
||||
|
||||
def _data_cleaning(self, data: Any, expected_type: Optional[str]) -> Any:
|
||||
"""Nettoyage et correction de données"""
|
||||
if isinstance(data, str):
|
||||
# Nettoyage général
|
||||
cleaned = data.strip()
|
||||
|
||||
# Supprimer caractères de contrôle
|
||||
cleaned = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', cleaned)
|
||||
|
||||
# Normaliser espaces multiples
|
||||
cleaned = re.sub(r'\s+', ' ', cleaned)
|
||||
|
||||
# Corrections spécifiques
|
||||
if expected_type == 'text':
|
||||
# Corriger encodage commun
|
||||
replacements = {
|
||||
'’': "'",
|
||||
'“': '"',
|
||||
'â€': '"',
|
||||
'…': '...'
|
||||
}
|
||||
for old, new in replacements.items():
|
||||
cleaned = cleaned.replace(old, new)
|
||||
|
||||
return cleaned
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# Factory pour créer les stratégies
|
||||
class RecoveryStrategyFactory:
|
||||
"""Factory pour créer les stratégies de récupération appropriées"""
|
||||
|
||||
@staticmethod
|
||||
def create_strategies() -> List[BaseRecoveryStrategy]:
|
||||
"""Créer toutes les stratégies de récupération disponibles"""
|
||||
return [
|
||||
SpatialFallbackStrategy(),
|
||||
SemanticVariantStrategy(),
|
||||
RetryWithBackoffStrategy(),
|
||||
DataNormalizationStrategy()
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def get_strategy_for_error(error_type: str, context: Dict[str, Any]) -> Optional[BaseRecoveryStrategy]:
|
||||
"""Obtenir la stratégie appropriée pour un type d'erreur"""
|
||||
strategies = RecoveryStrategyFactory.create_strategies()
|
||||
|
||||
for strategy in strategies:
|
||||
if strategy.can_handle(error_type, context):
|
||||
return strategy
|
||||
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Test des stratégies
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Test SpatialFallbackStrategy
|
||||
spatial_strategy = SpatialFallbackStrategy()
|
||||
context = RecoveryContext(
|
||||
error_type="TargetNotFoundError",
|
||||
error_message="Target not found",
|
||||
original_data={'target': {'bbox': {'x': 100, 'y': 100, 'width': 50, 'height': 20}}},
|
||||
attempt_number=1,
|
||||
max_attempts=3,
|
||||
timestamp=datetime.now(),
|
||||
additional_context={'screen_state': 'mock_state'}
|
||||
)
|
||||
|
||||
result = spatial_strategy.recover(context)
|
||||
print(f"Spatial strategy result: {result}")
|
||||
|
||||
# Test SemanticVariantStrategy
|
||||
semantic_strategy = SemanticVariantStrategy()
|
||||
context.error_type = "UIElementChangedError"
|
||||
context.original_data = {'text_pattern': 'Submit Button'}
|
||||
|
||||
result = semantic_strategy.recover(context)
|
||||
print(f"Semantic strategy result: {result}")
|
||||
399
core/execution/screen_signature.py
Normal file
399
core/execution/screen_signature.py
Normal file
@@ -0,0 +1,399 @@
|
||||
"""
|
||||
Screen Signature - Génération de signatures d'écran pour apprentissage persistant
|
||||
|
||||
Fiche #18 - Utilitaire pour générer des signatures stables d'écrans
|
||||
permettant de reconnaître des layouts similaires entre sessions.
|
||||
|
||||
Auteur: Dom, Alice Kiro - 22 décembre 2025
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import List, Optional, Dict, Any
|
||||
from dataclasses import dataclass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutElement:
|
||||
"""Élément simplifié pour signature de layout"""
|
||||
role: str
|
||||
bbox: tuple # (x, y, w, h)
|
||||
area: float
|
||||
text_length: int = 0
|
||||
|
||||
|
||||
def screen_signature(
|
||||
screen_state,
|
||||
ui_elements: List,
|
||||
mode: str = "layout"
|
||||
) -> str:
|
||||
"""
|
||||
Générer une signature stable d'un écran.
|
||||
|
||||
Modes disponibles:
|
||||
- "layout": Basé sur la disposition des éléments UI (positions relatives)
|
||||
- "content": Basé sur le contenu textuel et les rôles
|
||||
- "hybrid": Combinaison layout + content
|
||||
|
||||
Args:
|
||||
screen_state: ScreenState actuel
|
||||
ui_elements: Liste des éléments UI détectés
|
||||
mode: Mode de signature ("layout", "content", "hybrid")
|
||||
|
||||
Returns:
|
||||
Signature hexadécimale (MD5)
|
||||
"""
|
||||
if mode == "layout":
|
||||
return _layout_signature(screen_state, ui_elements)
|
||||
elif mode == "content":
|
||||
return _content_signature(screen_state, ui_elements)
|
||||
elif mode == "hybrid":
|
||||
layout_sig = _layout_signature(screen_state, ui_elements)
|
||||
content_sig = _content_signature(screen_state, ui_elements)
|
||||
combined = f"{layout_sig}|{content_sig}"
|
||||
return hashlib.md5(combined.encode('utf-8')).hexdigest()
|
||||
else:
|
||||
raise ValueError(f"Unknown signature mode: {mode}")
|
||||
|
||||
|
||||
def _layout_signature(screen_state, ui_elements: List) -> str:
|
||||
"""
|
||||
Signature basée sur la disposition des éléments.
|
||||
|
||||
Utilise:
|
||||
- Positions relatives des éléments (normalisées)
|
||||
- Tailles relatives
|
||||
- Rôles des éléments
|
||||
- Structure hiérarchique approximative
|
||||
|
||||
Résistant aux petits changements de position mais sensible
|
||||
aux changements de layout majeurs.
|
||||
"""
|
||||
if not ui_elements:
|
||||
return hashlib.md5(b"empty_layout").hexdigest()
|
||||
|
||||
# Obtenir la résolution d'écran pour normalisation
|
||||
try:
|
||||
screen_width = screen_state.window.screen_resolution[0]
|
||||
screen_height = screen_state.window.screen_resolution[1]
|
||||
except (AttributeError, IndexError):
|
||||
screen_width, screen_height = 1920, 1080 # Fallback
|
||||
|
||||
# Convertir les éléments en format simplifié
|
||||
layout_elements = []
|
||||
|
||||
for elem in ui_elements:
|
||||
try:
|
||||
# Extraire bbox (format XYWH)
|
||||
if hasattr(elem, 'bbox'):
|
||||
bbox = elem.bbox
|
||||
if hasattr(bbox, 'to_tuple'):
|
||||
x, y, w, h = bbox.to_tuple()
|
||||
else:
|
||||
x, y, w, h = bbox
|
||||
else:
|
||||
continue # Skip si pas de bbox
|
||||
|
||||
# Normaliser les coordonnées (0-1)
|
||||
norm_x = x / screen_width
|
||||
norm_y = y / screen_height
|
||||
norm_w = w / screen_width
|
||||
norm_h = h / screen_height
|
||||
|
||||
# Calculer l'aire normalisée
|
||||
area = norm_w * norm_h
|
||||
|
||||
# Extraire le rôle
|
||||
role = getattr(elem, 'role', '') or getattr(elem, 'type', '') or 'unknown'
|
||||
|
||||
# Longueur du texte (approximative)
|
||||
label = getattr(elem, 'label', '') or ''
|
||||
text_length = len(label.strip()) if label else 0
|
||||
|
||||
layout_elements.append(LayoutElement(
|
||||
role=role.lower(),
|
||||
bbox=(norm_x, norm_y, norm_w, norm_h),
|
||||
area=area,
|
||||
text_length=text_length
|
||||
))
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error processing element for layout signature: {e}")
|
||||
continue
|
||||
|
||||
if not layout_elements:
|
||||
return hashlib.md5(b"no_valid_elements").hexdigest()
|
||||
|
||||
# Trier par position (top-left à bottom-right) pour stabilité
|
||||
layout_elements.sort(key=lambda e: (e.bbox[1], e.bbox[0])) # y puis x
|
||||
|
||||
# Construire la signature
|
||||
signature_parts = []
|
||||
|
||||
# 1. Nombre total d'éléments par rôle
|
||||
role_counts = {}
|
||||
for elem in layout_elements:
|
||||
role_counts[elem.role] = role_counts.get(elem.role, 0) + 1
|
||||
|
||||
signature_parts.append(f"roles:{','.join(f'{r}:{c}' for r, c in sorted(role_counts.items()))}")
|
||||
|
||||
# 2. Grille approximative (diviser l'écran en 4x4)
|
||||
grid_signature = _compute_grid_signature(layout_elements)
|
||||
signature_parts.append(f"grid:{grid_signature}")
|
||||
|
||||
# 3. Éléments dominants (les plus gros)
|
||||
dominant_elements = sorted(layout_elements, key=lambda e: e.area, reverse=True)[:5]
|
||||
dominant_sig = []
|
||||
for elem in dominant_elements:
|
||||
# Position approximative (arrondie)
|
||||
x, y, w, h = elem.bbox
|
||||
grid_x = int(x * 4) # 0-3
|
||||
grid_y = int(y * 4) # 0-3
|
||||
size_class = "L" if elem.area > 0.1 else "M" if elem.area > 0.01 else "S"
|
||||
dominant_sig.append(f"{elem.role}@{grid_x},{grid_y}:{size_class}")
|
||||
|
||||
signature_parts.append(f"dominant:{','.join(dominant_sig)}")
|
||||
|
||||
# Combiner et hasher
|
||||
signature_string = "|".join(signature_parts)
|
||||
return hashlib.md5(signature_string.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
def _content_signature(screen_state, ui_elements: List) -> str:
|
||||
"""
|
||||
Signature basée sur le contenu textuel et les rôles.
|
||||
|
||||
Utilise:
|
||||
- Textes détectés (normalisés)
|
||||
- Rôles des éléments
|
||||
- Titre de fenêtre
|
||||
- Mots-clés importants
|
||||
|
||||
Résistant aux changements de position mais sensible
|
||||
aux changements de contenu.
|
||||
"""
|
||||
signature_parts = []
|
||||
|
||||
# 1. Titre de fenêtre (normalisé)
|
||||
try:
|
||||
window_title = screen_state.window.window_title or ""
|
||||
# Normaliser: enlever timestamps, numéros de version, etc.
|
||||
normalized_title = _normalize_text_for_signature(window_title)
|
||||
if normalized_title:
|
||||
signature_parts.append(f"title:{normalized_title}")
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# 2. Textes des éléments UI
|
||||
ui_texts = []
|
||||
role_text_pairs = []
|
||||
|
||||
for elem in ui_elements:
|
||||
try:
|
||||
# Extraire le texte
|
||||
label = getattr(elem, 'label', '') or ''
|
||||
if label and len(label.strip()) > 0:
|
||||
normalized_text = _normalize_text_for_signature(label)
|
||||
if normalized_text:
|
||||
ui_texts.append(normalized_text)
|
||||
|
||||
# Associer avec le rôle
|
||||
role = getattr(elem, 'role', '') or 'unknown'
|
||||
role_text_pairs.append(f"{role}:{normalized_text}")
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# 3. Textes détectés par OCR
|
||||
try:
|
||||
detected_texts = screen_state.perception.detected_text or []
|
||||
for text in detected_texts:
|
||||
if isinstance(text, str) and len(text.strip()) > 2:
|
||||
normalized_text = _normalize_text_for_signature(text)
|
||||
if normalized_text:
|
||||
ui_texts.append(normalized_text)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Construire la signature
|
||||
if ui_texts:
|
||||
# Trier pour stabilité
|
||||
ui_texts.sort()
|
||||
signature_parts.append(f"texts:{','.join(ui_texts[:10])}") # Limiter à 10
|
||||
|
||||
if role_text_pairs:
|
||||
role_text_pairs.sort()
|
||||
signature_parts.append(f"role_texts:{','.join(role_text_pairs[:8])}") # Limiter à 8
|
||||
|
||||
# 4. Mots-clés importants (boutons, liens, etc.)
|
||||
keywords = _extract_keywords(ui_elements)
|
||||
if keywords:
|
||||
signature_parts.append(f"keywords:{','.join(sorted(keywords))}")
|
||||
|
||||
if not signature_parts:
|
||||
return hashlib.md5(b"no_content").hexdigest()
|
||||
|
||||
# Combiner et hasher
|
||||
signature_string = "|".join(signature_parts)
|
||||
return hashlib.md5(signature_string.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
def _compute_grid_signature(layout_elements: List[LayoutElement]) -> str:
|
||||
"""
|
||||
Calculer une signature de grille 4x4.
|
||||
|
||||
Divise l'écran en 16 cellules et compte les éléments par cellule.
|
||||
"""
|
||||
grid = [[0 for _ in range(4)] for _ in range(4)]
|
||||
|
||||
for elem in layout_elements:
|
||||
x, y, w, h = elem.bbox
|
||||
|
||||
# Centre de l'élément
|
||||
center_x = x + w / 2
|
||||
center_y = y + h / 2
|
||||
|
||||
# Cellule de grille
|
||||
grid_x = min(3, int(center_x * 4))
|
||||
grid_y = min(3, int(center_y * 4))
|
||||
|
||||
grid[grid_y][grid_x] += 1
|
||||
|
||||
# Convertir en string compacte
|
||||
grid_str = ""
|
||||
for row in grid:
|
||||
for count in row:
|
||||
grid_str += str(min(9, count)) # Limiter à 9
|
||||
|
||||
return grid_str
|
||||
|
||||
|
||||
def _normalize_text_for_signature(text: str) -> str:
|
||||
"""
|
||||
Normaliser un texte pour signature stable.
|
||||
|
||||
Enlève:
|
||||
- Timestamps
|
||||
- Numéros de version
|
||||
- Espaces multiples
|
||||
- Caractères spéciaux
|
||||
- Casse
|
||||
"""
|
||||
if not text:
|
||||
return ""
|
||||
|
||||
import re
|
||||
|
||||
# Convertir en minuscules
|
||||
text = text.lower().strip()
|
||||
|
||||
# Enlever timestamps communs
|
||||
text = re.sub(r'\d{1,2}:\d{2}(:\d{2})?', '', text) # HH:MM ou HH:MM:SS
|
||||
text = re.sub(r'\d{1,2}/\d{1,2}/\d{2,4}', '', text) # Dates
|
||||
text = re.sub(r'\d{4}-\d{2}-\d{2}', '', text) # Dates ISO
|
||||
|
||||
# Enlever numéros de version
|
||||
text = re.sub(r'v?\d+\.\d+(\.\d+)?', '', text)
|
||||
|
||||
# Enlever numéros génériques
|
||||
text = re.sub(r'\b\d+\b', '', text)
|
||||
|
||||
# Normaliser espaces
|
||||
text = re.sub(r'\s+', ' ', text)
|
||||
|
||||
# Garder seulement lettres, espaces et quelques caractères
|
||||
text = re.sub(r'[^a-z\s\-_]', '', text)
|
||||
|
||||
# Enlever mots très courts ou communs
|
||||
words = text.split()
|
||||
filtered_words = []
|
||||
|
||||
stop_words = {'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by'}
|
||||
|
||||
for word in words:
|
||||
if len(word) >= 3 and word not in stop_words:
|
||||
filtered_words.append(word)
|
||||
|
||||
result = ' '.join(filtered_words).strip()
|
||||
|
||||
# Limiter la longueur
|
||||
if len(result) > 50:
|
||||
result = result[:50]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _extract_keywords(ui_elements: List) -> List[str]:
|
||||
"""
|
||||
Extraire des mots-clés importants des éléments UI.
|
||||
|
||||
Se concentre sur:
|
||||
- Boutons avec texte significatif
|
||||
- Liens
|
||||
- Titres/headers
|
||||
- Labels de formulaires
|
||||
"""
|
||||
keywords = set()
|
||||
|
||||
important_roles = {'button', 'link', 'heading', 'label', 'tab', 'menuitem'}
|
||||
|
||||
for elem in ui_elements:
|
||||
try:
|
||||
role = getattr(elem, 'role', '') or ''
|
||||
label = getattr(elem, 'label', '') or ''
|
||||
|
||||
if role.lower() in important_roles and label:
|
||||
normalized = _normalize_text_for_signature(label)
|
||||
if normalized and len(normalized) >= 3:
|
||||
# Prendre le premier mot significatif
|
||||
first_word = normalized.split()[0] if normalized.split() else ""
|
||||
if len(first_word) >= 3:
|
||||
keywords.add(first_word)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return list(keywords)
|
||||
|
||||
|
||||
def compare_signatures(sig1: str, sig2: str) -> float:
|
||||
"""
|
||||
Comparer deux signatures et retourner un score de similarité.
|
||||
|
||||
Args:
|
||||
sig1: Première signature
|
||||
sig2: Deuxième signature
|
||||
|
||||
Returns:
|
||||
Score de similarité (0.0 = différent, 1.0 = identique)
|
||||
"""
|
||||
if sig1 == sig2:
|
||||
return 1.0
|
||||
|
||||
# Pour des signatures MD5, on ne peut que comparer l'égalité exacte
|
||||
# Dans une version plus avancée, on pourrait comparer les composants
|
||||
# avant le hashage pour une similarité partielle
|
||||
return 0.0
|
||||
|
||||
|
||||
def signature_stats(signatures: List[str]) -> Dict[str, Any]:
|
||||
"""
|
||||
Calculer des statistiques sur un ensemble de signatures.
|
||||
|
||||
Args:
|
||||
signatures: Liste de signatures
|
||||
|
||||
Returns:
|
||||
Dictionnaire avec statistiques
|
||||
"""
|
||||
if not signatures:
|
||||
return {"total": 0, "unique": 0, "duplicates": 0}
|
||||
|
||||
unique_signatures = set(signatures)
|
||||
|
||||
return {
|
||||
"total": len(signatures),
|
||||
"unique": len(unique_signatures),
|
||||
"duplicates": len(signatures) - len(unique_signatures),
|
||||
"uniqueness_ratio": len(unique_signatures) / len(signatures)
|
||||
}
|
||||
101
core/execution/spatial_index.py
Normal file
101
core/execution/spatial_index.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# core/execution/spatial_index.py
|
||||
"""
|
||||
Index spatial par grille pour optimisation des requêtes géométriques UI.
|
||||
|
||||
Auteur : Dom, Alice Kiro - 19 décembre 2024
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from ..models.ui_element import UIElement
|
||||
|
||||
|
||||
def _right(b):
|
||||
return b[0] + b[2]
|
||||
|
||||
|
||||
def _bottom(b):
|
||||
return b[1] + b[3]
|
||||
|
||||
|
||||
def _intersects(a, b) -> bool:
|
||||
ax1, ay1, aw, ah = a
|
||||
bx1, by1, bw, bh = b
|
||||
ax2, ay2 = _right(a), _bottom(a)
|
||||
bx2, by2 = _right(b), _bottom(b)
|
||||
return not (ax2 <= bx1 or bx2 <= ax1 or ay2 <= by1 or by2 <= ay1)
|
||||
|
||||
|
||||
def _contains_point(b, x, y) -> bool:
|
||||
return (b[0] <= x <= _right(b)) and (b[1] <= y <= _bottom(b))
|
||||
|
||||
|
||||
@dataclass
|
||||
class SpatialIndexGrid:
|
||||
"""
|
||||
Index spatial simple par grille (très efficace pour UI: rectangles).
|
||||
- build: O(n)
|
||||
- query_bbox / query_point: ~O(k) sur les cellules touchées
|
||||
|
||||
Auteur : Dom, Alice Kiro - 19 décembre 2024
|
||||
"""
|
||||
cell_size: int = 160
|
||||
|
||||
_cells: Dict[Tuple[int, int], List[UIElement]] = field(default_factory=dict)
|
||||
_by_id: Dict[str, UIElement] = field(default_factory=dict)
|
||||
_built: bool = False
|
||||
|
||||
def build(self, elements: List[UIElement]) -> "SpatialIndexGrid":
|
||||
"""Construit l'index à partir d'une liste d'éléments UI"""
|
||||
self._cells = {}
|
||||
self._by_id = {}
|
||||
for e in elements:
|
||||
self._by_id[e.element_id] = e
|
||||
for key in self._cells_for_bbox(e.bbox):
|
||||
self._cells.setdefault(key, []).append(e)
|
||||
self._built = True
|
||||
return self
|
||||
|
||||
def _cells_for_bbox(self, bbox) -> Iterable[Tuple[int, int]]:
|
||||
"""Retourne toutes les cellules touchées par une bbox"""
|
||||
x, y, w, h = bbox
|
||||
x2, y2 = x + w, y + h
|
||||
|
||||
cs = self.cell_size
|
||||
cx1 = int(x // cs)
|
||||
cy1 = int(y // cs)
|
||||
cx2 = int(x2 // cs)
|
||||
cy2 = int(y2 // cs)
|
||||
|
||||
for cy in range(cy1, cy2 + 1):
|
||||
for cx in range(cx1, cx2 + 1):
|
||||
yield (cx, cy)
|
||||
|
||||
def query_bbox(self, bbox) -> List[UIElement]:
|
||||
"""Trouve tous les éléments qui intersectent avec la bbox donnée"""
|
||||
if not self._built:
|
||||
return []
|
||||
seen: Set[str] = set()
|
||||
out: List[UIElement] = []
|
||||
for key in self._cells_for_bbox(bbox):
|
||||
for e in self._cells.get(key, []):
|
||||
if e.element_id in seen:
|
||||
continue
|
||||
seen.add(e.element_id)
|
||||
if _intersects(e.bbox, bbox):
|
||||
out.append(e)
|
||||
return out
|
||||
|
||||
def query_point(self, x: int, y: int) -> List[UIElement]:
|
||||
"""Trouve tous les éléments qui contiennent le point donné"""
|
||||
if not self._built:
|
||||
return []
|
||||
cs = self.cell_size
|
||||
key = (int(x // cs), int(y // cs))
|
||||
out = []
|
||||
for e in self._cells.get(key, []):
|
||||
if _contains_point(e.bbox, x, y):
|
||||
out.append(e)
|
||||
return out
|
||||
23
core/execution/target_memory.py
Normal file
23
core/execution/target_memory.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# core/execution/target_memory.py
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
@dataclass
|
||||
class TargetFingerprint:
|
||||
role: str
|
||||
etype: str
|
||||
label: str
|
||||
bbox: tuple # XYWH
|
||||
element_id: str
|
||||
|
||||
@staticmethod
|
||||
def from_element(e) -> "TargetFingerprint":
|
||||
return TargetFingerprint(
|
||||
role=(getattr(e, "role", "") or ""),
|
||||
etype=(getattr(e, "type", "") or ""),
|
||||
label=(getattr(e, "label", "") or ""),
|
||||
bbox=getattr(e, "bbox", (0, 0, 0, 0)),
|
||||
element_id=getattr(e, "element_id", ""),
|
||||
)
|
||||
3495
core/execution/target_resolver.py
Normal file
3495
core/execution/target_resolver.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user