v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
19
core/healing/__init__.py
Normal file
19
core/healing/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""
|
||||
Self-Healing Workflows Module
|
||||
|
||||
This module provides automatic recovery capabilities for RPA workflows,
|
||||
enabling them to adapt and recover from common failures.
|
||||
"""
|
||||
|
||||
from .healing_engine import SelfHealingEngine, RecoveryContext, RecoveryResult
|
||||
from .learning_repository import LearningRepository, RecoveryPattern
|
||||
from .confidence_scorer import ConfidenceScorer
|
||||
|
||||
__all__ = [
|
||||
'SelfHealingEngine',
|
||||
'RecoveryContext',
|
||||
'RecoveryResult',
|
||||
'LearningRepository',
|
||||
'RecoveryPattern',
|
||||
'ConfidenceScorer',
|
||||
]
|
||||
172
core/healing/confidence_scorer.py
Normal file
172
core/healing/confidence_scorer.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""Calculate confidence scores for recovery actions."""
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
from difflib import SequenceMatcher
|
||||
import numpy as np
|
||||
from .models import RecoveryContext
|
||||
|
||||
|
||||
class ConfidenceScorer:
|
||||
"""Calculate confidence scores for recovery actions."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the confidence scorer."""
|
||||
self.base_confidence = {
|
||||
'semantic_variant': 0.8,
|
||||
'spatial_fallback': 0.6,
|
||||
'timing_adaptation': 0.7,
|
||||
'format_transformation': 0.5
|
||||
}
|
||||
|
||||
def calculate_element_similarity_score(
|
||||
self,
|
||||
original: str,
|
||||
candidate: str,
|
||||
original_pos: Optional[tuple] = None,
|
||||
candidate_pos: Optional[tuple] = None
|
||||
) -> float:
|
||||
"""
|
||||
Calculate similarity between original and candidate elements.
|
||||
|
||||
Args:
|
||||
original: Original element identifier
|
||||
candidate: Candidate element identifier
|
||||
original_pos: Original position (x, y)
|
||||
candidate_pos: Candidate position (x, y)
|
||||
|
||||
Returns:
|
||||
Similarity score (0.0 to 1.0)
|
||||
"""
|
||||
# Text similarity
|
||||
text_similarity = self._text_similarity(original, candidate)
|
||||
|
||||
# Position similarity if available
|
||||
position_similarity = 1.0
|
||||
if original_pos and candidate_pos:
|
||||
position_similarity = self._position_similarity(original_pos, candidate_pos)
|
||||
|
||||
# Weighted combination
|
||||
if original_pos and candidate_pos:
|
||||
return text_similarity * 0.6 + position_similarity * 0.4
|
||||
else:
|
||||
return text_similarity
|
||||
|
||||
def calculate_recovery_confidence(
|
||||
self,
|
||||
strategy: str,
|
||||
context: RecoveryContext,
|
||||
historical_success_rate: float = 0.0
|
||||
) -> float:
|
||||
"""
|
||||
Calculate overall confidence for a recovery strategy.
|
||||
|
||||
Args:
|
||||
strategy: Recovery strategy name
|
||||
context: Recovery context
|
||||
historical_success_rate: Historical success rate for this pattern
|
||||
|
||||
Returns:
|
||||
Confidence score (0.0 to 1.0)
|
||||
"""
|
||||
# Base confidence for strategy
|
||||
base_confidence = self.base_confidence.get(strategy, 0.3)
|
||||
|
||||
# Adjust based on historical success
|
||||
if historical_success_rate > 0:
|
||||
adjusted_confidence = base_confidence * (0.5 + 0.5 * historical_success_rate)
|
||||
else:
|
||||
adjusted_confidence = base_confidence * 0.7 # Penalty for no history
|
||||
|
||||
# Adjust based on context factors
|
||||
context_factor = self._calculate_context_factor(context)
|
||||
|
||||
# Final confidence
|
||||
final_confidence = min(adjusted_confidence * context_factor, 1.0)
|
||||
|
||||
# Ensure valid range
|
||||
return max(0.0, min(1.0, final_confidence))
|
||||
|
||||
def _text_similarity(self, text1: str, text2: str) -> float:
|
||||
"""Calculate text similarity using sequence matching."""
|
||||
if not text1 or not text2:
|
||||
return 0.0
|
||||
|
||||
# Normalize texts
|
||||
text1 = text1.lower().strip()
|
||||
text2 = text2.lower().strip()
|
||||
|
||||
# Exact match
|
||||
if text1 == text2:
|
||||
return 1.0
|
||||
|
||||
# Sequence matching
|
||||
return SequenceMatcher(None, text1, text2).ratio()
|
||||
|
||||
def _position_similarity(self, pos1: tuple, pos2: tuple) -> float:
|
||||
"""
|
||||
Calculate position similarity based on distance.
|
||||
|
||||
Args:
|
||||
pos1: Position (x, y)
|
||||
pos2: Position (x, y)
|
||||
|
||||
Returns:
|
||||
Similarity score (0.0 to 1.0)
|
||||
"""
|
||||
# Calculate Euclidean distance
|
||||
distance = np.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)
|
||||
|
||||
# Convert distance to similarity (closer = higher score)
|
||||
# Use exponential decay with threshold at 100 pixels
|
||||
similarity = np.exp(-distance / 100.0)
|
||||
|
||||
return float(similarity)
|
||||
|
||||
def _calculate_context_factor(self, context: RecoveryContext) -> float:
|
||||
"""
|
||||
Calculate context factor based on various context attributes.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
Context factor (0.5 to 1.5)
|
||||
"""
|
||||
factor = 1.0
|
||||
|
||||
# Penalize multiple attempts
|
||||
if context.attempt_count > 1:
|
||||
factor *= (1.0 - 0.1 * (context.attempt_count - 1))
|
||||
|
||||
# Boost if we have good metadata
|
||||
if context.metadata.get('element_type'):
|
||||
factor *= 1.1
|
||||
|
||||
if context.metadata.get('application'):
|
||||
factor *= 1.05
|
||||
|
||||
# Ensure reasonable bounds
|
||||
return max(0.5, min(1.5, factor))
|
||||
|
||||
def is_safe_to_proceed(
|
||||
self,
|
||||
confidence: float,
|
||||
threshold: float,
|
||||
involves_data_modification: bool = False
|
||||
) -> bool:
|
||||
"""
|
||||
Determine if it's safe to proceed with a recovery action.
|
||||
|
||||
Args:
|
||||
confidence: Confidence score
|
||||
threshold: Safety threshold
|
||||
involves_data_modification: Whether action modifies data
|
||||
|
||||
Returns:
|
||||
True if safe to proceed
|
||||
"""
|
||||
# Higher threshold for data modifications
|
||||
if involves_data_modification:
|
||||
threshold = max(threshold, 0.8)
|
||||
|
||||
return confidence >= threshold
|
||||
343
core/healing/execution_integration.py
Normal file
343
core/healing/execution_integration.py
Normal file
@@ -0,0 +1,343 @@
|
||||
"""Integration of self-healing with execution loop."""
|
||||
|
||||
import logging
|
||||
from typing import Optional, Dict, Any
|
||||
from pathlib import Path
|
||||
|
||||
from core.healing.healing_engine import SelfHealingEngine
|
||||
from core.healing.recovery_logger import RecoveryLogger
|
||||
from core.healing.models import RecoveryContext, RecoveryResult
|
||||
from core.execution.action_executor import ExecutionResult, ExecutionStatus
|
||||
|
||||
# Analytics integration
|
||||
try:
|
||||
from core.analytics.analytics_system import get_analytics_system
|
||||
ANALYTICS_AVAILABLE = True
|
||||
except ImportError:
|
||||
ANALYTICS_AVAILABLE = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SelfHealingIntegration:
|
||||
"""
|
||||
Integration layer between self-healing engine and execution loop.
|
||||
|
||||
This class provides methods to integrate self-healing capabilities
|
||||
into the existing execution loop without major refactoring.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_path: Optional[Path] = None,
|
||||
log_path: Optional[Path] = None,
|
||||
enabled: bool = True
|
||||
):
|
||||
"""
|
||||
Initialize self-healing integration.
|
||||
|
||||
Args:
|
||||
storage_path: Path for storing learned patterns
|
||||
log_path: Path for recovery logs
|
||||
enabled: Whether self-healing is enabled
|
||||
"""
|
||||
self.enabled = enabled
|
||||
|
||||
if enabled:
|
||||
self.healing_engine = SelfHealingEngine(storage_path=storage_path)
|
||||
self.recovery_logger = RecoveryLogger(log_path=log_path)
|
||||
logger.info("Self-healing integration initialized")
|
||||
else:
|
||||
self.healing_engine = None
|
||||
self.recovery_logger = None
|
||||
logger.info("Self-healing integration disabled")
|
||||
|
||||
# Analytics integration
|
||||
self._analytics = None
|
||||
if ANALYTICS_AVAILABLE:
|
||||
try:
|
||||
self._analytics = get_analytics_system()
|
||||
logger.info("Analytics integrated with self-healing")
|
||||
except Exception as e:
|
||||
logger.warning(f"Analytics integration failed: {e}")
|
||||
|
||||
def handle_execution_failure(
|
||||
self,
|
||||
action_info: Dict[str, Any],
|
||||
execution_result: ExecutionResult,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
screenshot_path: str,
|
||||
attempt_count: int = 1
|
||||
) -> Optional[RecoveryResult]:
|
||||
"""
|
||||
Handle an execution failure and attempt recovery.
|
||||
|
||||
Args:
|
||||
action_info: Information about the failed action
|
||||
execution_result: Result of the failed execution
|
||||
workflow_id: ID of the workflow
|
||||
node_id: ID of the current node
|
||||
screenshot_path: Path to screenshot at time of failure
|
||||
attempt_count: Number of attempts so far
|
||||
|
||||
Returns:
|
||||
RecoveryResult if recovery attempted, None if disabled
|
||||
"""
|
||||
if not self.enabled:
|
||||
return None
|
||||
|
||||
# Create recovery context
|
||||
context = self._create_recovery_context(
|
||||
action_info=action_info,
|
||||
execution_result=execution_result,
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
screenshot_path=screenshot_path,
|
||||
attempt_count=attempt_count
|
||||
)
|
||||
|
||||
# Attempt recovery
|
||||
logger.info(f"Attempting recovery for failed action: {action_info.get('action')}")
|
||||
result = self.healing_engine.attempt_recovery(context)
|
||||
|
||||
# Log the recovery attempt
|
||||
self.recovery_logger.log_recovery_attempt(context, result)
|
||||
|
||||
# Notify analytics about recovery attempt
|
||||
if self._analytics:
|
||||
try:
|
||||
self._analytics.collectors.metrics.record_recovery_attempt(
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
failure_reason=context.failure_reason,
|
||||
recovery_success=result.success,
|
||||
strategy_used=result.strategy_used if result.success else None,
|
||||
confidence=result.confidence if result.success else 0.0
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Analytics recovery notification failed: {e}")
|
||||
|
||||
return result
|
||||
|
||||
def update_workflow_from_recovery(
|
||||
self,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
edge_id: str,
|
||||
recovery_result: RecoveryResult
|
||||
) -> bool:
|
||||
"""
|
||||
Update workflow definition based on successful recovery.
|
||||
|
||||
Args:
|
||||
workflow_id: ID of the workflow
|
||||
node_id: ID of the node
|
||||
edge_id: ID of the edge
|
||||
recovery_result: Successful recovery result
|
||||
|
||||
Returns:
|
||||
True if workflow updated successfully
|
||||
"""
|
||||
if not self.enabled or not recovery_result.success:
|
||||
return False
|
||||
|
||||
try:
|
||||
# Extract learned pattern
|
||||
if recovery_result.learned_pattern:
|
||||
logger.info(
|
||||
f"Updating workflow {workflow_id} with learned pattern: "
|
||||
f"{recovery_result.learned_pattern}"
|
||||
)
|
||||
|
||||
# TODO: Integrate with workflow storage to update definition
|
||||
# This would update the workflow's edge or node with new information
|
||||
# For now, just log the update
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update workflow: {e}")
|
||||
return False
|
||||
|
||||
def get_recovery_suggestions(
|
||||
self,
|
||||
action_info: Dict[str, Any],
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
screenshot_path: str
|
||||
) -> list:
|
||||
"""
|
||||
Get recovery suggestions for a potential failure.
|
||||
|
||||
Args:
|
||||
action_info: Information about the action
|
||||
workflow_id: ID of the workflow
|
||||
node_id: ID of the node
|
||||
screenshot_path: Path to current screenshot
|
||||
|
||||
Returns:
|
||||
List of recovery suggestions
|
||||
"""
|
||||
if not self.enabled:
|
||||
return []
|
||||
|
||||
# Create a dummy context for getting suggestions
|
||||
context = RecoveryContext(
|
||||
original_action=action_info.get('action', 'unknown'),
|
||||
target_element=action_info.get('target', 'unknown'),
|
||||
failure_reason='potential_failure',
|
||||
screenshot_path=screenshot_path,
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
attempt_count=0
|
||||
)
|
||||
|
||||
return self.healing_engine.get_recovery_suggestions(context)
|
||||
|
||||
def get_statistics(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get self-healing statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with statistics
|
||||
"""
|
||||
if not self.enabled:
|
||||
return {"enabled": False}
|
||||
|
||||
stats = self.recovery_logger.get_recovery_statistics()
|
||||
stats["enabled"] = True
|
||||
|
||||
return stats
|
||||
|
||||
def get_insights(self) -> list:
|
||||
"""
|
||||
Get insights from recovery patterns.
|
||||
|
||||
Returns:
|
||||
List of insight strings
|
||||
"""
|
||||
if not self.enabled:
|
||||
return []
|
||||
|
||||
return self.recovery_logger.generate_insights()
|
||||
|
||||
def check_alerts(self) -> list:
|
||||
"""
|
||||
Check for alerts that need administrator attention.
|
||||
|
||||
Returns:
|
||||
List of alert dictionaries
|
||||
"""
|
||||
if not self.enabled:
|
||||
return []
|
||||
|
||||
return self.recovery_logger.check_for_alerts()
|
||||
|
||||
def prune_patterns(
|
||||
self,
|
||||
max_age_days: int = 90,
|
||||
min_confidence: float = 0.3
|
||||
):
|
||||
"""
|
||||
Prune outdated recovery patterns.
|
||||
|
||||
Args:
|
||||
max_age_days: Maximum age for patterns
|
||||
min_confidence: Minimum confidence threshold
|
||||
"""
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
self.healing_engine.prune_learned_patterns(max_age_days, min_confidence)
|
||||
logger.info(f"Pruned patterns older than {max_age_days} days")
|
||||
|
||||
def _create_recovery_context(
|
||||
self,
|
||||
action_info: Dict[str, Any],
|
||||
execution_result: ExecutionResult,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
screenshot_path: str,
|
||||
attempt_count: int
|
||||
) -> RecoveryContext:
|
||||
"""Create a recovery context from execution failure."""
|
||||
# Determine failure reason from execution result
|
||||
failure_reason = self._determine_failure_reason(execution_result)
|
||||
|
||||
# Extract target element
|
||||
target_element = action_info.get('target', 'unknown')
|
||||
|
||||
# Extract metadata
|
||||
metadata = {
|
||||
'action_type': action_info.get('action', 'unknown'),
|
||||
'execution_status': execution_result.status.value,
|
||||
'error_message': execution_result.message,
|
||||
'element_type': action_info.get('element_type', 'unknown')
|
||||
}
|
||||
|
||||
# Add input value if available
|
||||
if 'value' in action_info:
|
||||
metadata['input_value'] = action_info['value']
|
||||
|
||||
return RecoveryContext(
|
||||
original_action=action_info.get('action', 'unknown'),
|
||||
target_element=target_element,
|
||||
failure_reason=failure_reason,
|
||||
screenshot_path=screenshot_path,
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
attempt_count=attempt_count,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
def _determine_failure_reason(self, execution_result: ExecutionResult) -> str:
|
||||
"""Determine failure reason from execution result."""
|
||||
if execution_result.status == ExecutionStatus.TARGET_NOT_FOUND:
|
||||
return 'element_not_found'
|
||||
elif execution_result.status == ExecutionStatus.TIMEOUT:
|
||||
return 'timeout'
|
||||
elif execution_result.status == ExecutionStatus.FAILED:
|
||||
# Try to infer from message
|
||||
message = execution_result.message.lower()
|
||||
if 'validation' in message or 'invalid' in message:
|
||||
return 'validation_failed'
|
||||
elif 'timeout' in message:
|
||||
return 'timeout'
|
||||
elif 'not found' in message:
|
||||
return 'element_not_found'
|
||||
else:
|
||||
return 'execution_failed'
|
||||
else:
|
||||
return 'unknown_failure'
|
||||
|
||||
|
||||
# Global instance for easy access
|
||||
_global_integration: Optional[SelfHealingIntegration] = None
|
||||
|
||||
|
||||
def get_self_healing_integration(
|
||||
storage_path: Optional[Path] = None,
|
||||
log_path: Optional[Path] = None,
|
||||
enabled: bool = True
|
||||
) -> SelfHealingIntegration:
|
||||
"""
|
||||
Get or create the global self-healing integration instance.
|
||||
|
||||
Args:
|
||||
storage_path: Path for storing learned patterns
|
||||
log_path: Path for recovery logs
|
||||
enabled: Whether self-healing is enabled
|
||||
|
||||
Returns:
|
||||
SelfHealingIntegration instance
|
||||
"""
|
||||
global _global_integration
|
||||
|
||||
if _global_integration is None:
|
||||
_global_integration = SelfHealingIntegration(
|
||||
storage_path=storage_path,
|
||||
log_path=log_path,
|
||||
enabled=enabled
|
||||
)
|
||||
|
||||
return _global_integration
|
||||
195
core/healing/learning_repository.py
Normal file
195
core/healing/learning_repository.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""Repository for storing and retrieving learned recovery patterns."""
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import datetime, timedelta
|
||||
from .models import RecoveryPattern, RecoveryContext, RecoveryResult
|
||||
|
||||
|
||||
class LearningRepository:
|
||||
"""Repository for storing and retrieving learned recovery patterns."""
|
||||
|
||||
def __init__(self, storage_path: Path):
|
||||
"""
|
||||
Initialize the learning repository.
|
||||
|
||||
Args:
|
||||
storage_path: Path to store learned patterns
|
||||
"""
|
||||
self.storage_path = Path(storage_path)
|
||||
self.storage_path.mkdir(parents=True, exist_ok=True)
|
||||
self.patterns_file = self.storage_path / 'patterns.json'
|
||||
self.patterns: Dict[str, RecoveryPattern] = {}
|
||||
self._load_patterns()
|
||||
|
||||
def store_pattern(self, context: RecoveryContext, result: RecoveryResult):
|
||||
"""
|
||||
Store a recovery pattern from a recovery attempt.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
result: Recovery result
|
||||
"""
|
||||
pattern_key = self._generate_pattern_key(context)
|
||||
|
||||
if pattern_key in self.patterns:
|
||||
# Update existing pattern
|
||||
pattern = self.patterns[pattern_key]
|
||||
if result.success:
|
||||
pattern.success_count += 1
|
||||
else:
|
||||
pattern.failure_count += 1
|
||||
pattern.last_used = datetime.now()
|
||||
# Update confidence based on success rate
|
||||
pattern.confidence_score = pattern.success_rate
|
||||
else:
|
||||
# Create new pattern
|
||||
pattern = RecoveryPattern(
|
||||
pattern_id=pattern_key,
|
||||
original_failure=context.failure_reason,
|
||||
recovery_strategy=result.strategy_used,
|
||||
success_count=1 if result.success else 0,
|
||||
failure_count=0 if result.success else 1,
|
||||
confidence_score=result.confidence_score,
|
||||
context_metadata=self._extract_context_metadata(context),
|
||||
created_at=datetime.now(),
|
||||
last_used=datetime.now()
|
||||
)
|
||||
self.patterns[pattern_key] = pattern
|
||||
|
||||
self._save_patterns()
|
||||
|
||||
def get_matching_patterns(self, context: RecoveryContext) -> List[RecoveryPattern]:
|
||||
"""
|
||||
Get patterns that match the current failure context.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
List of matching patterns sorted by success rate and recency
|
||||
"""
|
||||
matching = []
|
||||
for pattern in self.patterns.values():
|
||||
if self._pattern_matches_context(pattern, context):
|
||||
matching.append(pattern)
|
||||
|
||||
# Sort by success rate (primary) and recency (secondary)
|
||||
return sorted(
|
||||
matching,
|
||||
key=lambda p: (p.success_rate, p.last_used.timestamp()),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
def prune_outdated_patterns(
|
||||
self,
|
||||
max_age_days: int = 90,
|
||||
min_confidence: float = 0.3,
|
||||
min_success_rate: float = 0.2
|
||||
):
|
||||
"""
|
||||
Remove outdated or low-confidence patterns.
|
||||
|
||||
Args:
|
||||
max_age_days: Maximum age in days for patterns
|
||||
min_confidence: Minimum confidence score
|
||||
min_success_rate: Minimum success rate
|
||||
"""
|
||||
cutoff_date = datetime.now() - timedelta(days=max_age_days)
|
||||
|
||||
patterns_to_remove = []
|
||||
for pattern_id, pattern in self.patterns.items():
|
||||
if (pattern.last_used < cutoff_date or
|
||||
pattern.confidence_score < min_confidence or
|
||||
pattern.success_rate < min_success_rate):
|
||||
patterns_to_remove.append(pattern_id)
|
||||
|
||||
for pattern_id in patterns_to_remove:
|
||||
del self.patterns[pattern_id]
|
||||
|
||||
if patterns_to_remove:
|
||||
self._save_patterns()
|
||||
|
||||
def get_pattern_by_id(self, pattern_id: str) -> Optional[RecoveryPattern]:
|
||||
"""Get a specific pattern by ID."""
|
||||
return self.patterns.get(pattern_id)
|
||||
|
||||
def get_all_patterns(self) -> List[RecoveryPattern]:
|
||||
"""Get all stored patterns."""
|
||||
return list(self.patterns.values())
|
||||
|
||||
def _generate_pattern_key(self, context: RecoveryContext) -> str:
|
||||
"""Generate a unique key for a recovery pattern."""
|
||||
# Create key from failure reason, action type, and element type
|
||||
key_parts = [
|
||||
context.failure_reason,
|
||||
context.original_action,
|
||||
context.metadata.get('element_type', 'unknown')
|
||||
]
|
||||
key_string = '|'.join(key_parts)
|
||||
return hashlib.md5(key_string.encode()).hexdigest()[:16]
|
||||
|
||||
def _extract_context_metadata(self, context: RecoveryContext) -> Dict:
|
||||
"""Extract relevant metadata from context."""
|
||||
return {
|
||||
'original_action': context.original_action,
|
||||
'target_element': context.target_element,
|
||||
'element_type': context.metadata.get('element_type', 'unknown'),
|
||||
'application': context.metadata.get('application', 'unknown'),
|
||||
'workflow_id': context.workflow_id
|
||||
}
|
||||
|
||||
def _pattern_matches_context(
|
||||
self,
|
||||
pattern: RecoveryPattern,
|
||||
context: RecoveryContext
|
||||
) -> bool:
|
||||
"""Check if a pattern matches the current context."""
|
||||
# Match on failure reason
|
||||
if pattern.original_failure != context.failure_reason:
|
||||
return False
|
||||
|
||||
# Match on action type
|
||||
if pattern.context_metadata.get('original_action') != context.original_action:
|
||||
return False
|
||||
|
||||
# Match on element type if available
|
||||
pattern_element_type = pattern.context_metadata.get('element_type')
|
||||
context_element_type = context.metadata.get('element_type')
|
||||
if pattern_element_type and context_element_type:
|
||||
if pattern_element_type != context_element_type:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _load_patterns(self):
|
||||
"""Load patterns from storage."""
|
||||
if not self.patterns_file.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
with open(self.patterns_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
for pattern_id, pattern_data in data.items():
|
||||
self.patterns[pattern_id] = RecoveryPattern.from_dict(pattern_data)
|
||||
except Exception as e:
|
||||
print(f"Error loading patterns: {e}")
|
||||
|
||||
def _save_patterns(self):
|
||||
"""Save patterns to storage."""
|
||||
try:
|
||||
data = {
|
||||
pattern_id: pattern.to_dict()
|
||||
for pattern_id, pattern in self.patterns.items()
|
||||
}
|
||||
|
||||
# Atomic write
|
||||
temp_file = self.patterns_file.with_suffix('.tmp')
|
||||
with open(temp_file, 'w') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
temp_file.replace(self.patterns_file)
|
||||
except Exception as e:
|
||||
print(f"Error saving patterns: {e}")
|
||||
120
core/healing/models.py
Normal file
120
core/healing/models.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""Data models for self-healing workflows."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryContext:
|
||||
"""Context information for recovery attempts."""
|
||||
original_action: str
|
||||
target_element: str
|
||||
failure_reason: str
|
||||
screenshot_path: str
|
||||
workflow_id: str
|
||||
node_id: str
|
||||
attempt_count: int
|
||||
max_attempts: int = 3
|
||||
confidence_threshold: float = 0.7
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
'original_action': self.original_action,
|
||||
'target_element': self.target_element,
|
||||
'failure_reason': self.failure_reason,
|
||||
'screenshot_path': self.screenshot_path,
|
||||
'workflow_id': self.workflow_id,
|
||||
'node_id': self.node_id,
|
||||
'attempt_count': self.attempt_count,
|
||||
'max_attempts': self.max_attempts,
|
||||
'confidence_threshold': self.confidence_threshold,
|
||||
'metadata': self.metadata
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryResult:
|
||||
"""Result of a recovery attempt."""
|
||||
success: bool
|
||||
strategy_used: str
|
||||
new_element: Optional[str] = None
|
||||
confidence_score: float = 0.0
|
||||
execution_time: float = 0.0
|
||||
learned_pattern: Optional[Dict] = None
|
||||
requires_user_input: bool = False
|
||||
error_message: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
'success': self.success,
|
||||
'strategy_used': self.strategy_used,
|
||||
'new_element': self.new_element,
|
||||
'confidence_score': self.confidence_score,
|
||||
'execution_time': self.execution_time,
|
||||
'learned_pattern': self.learned_pattern,
|
||||
'requires_user_input': self.requires_user_input,
|
||||
'error_message': self.error_message
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryPattern:
|
||||
"""A learned recovery pattern."""
|
||||
pattern_id: str
|
||||
original_failure: str
|
||||
recovery_strategy: str
|
||||
success_count: int
|
||||
failure_count: int
|
||||
confidence_score: float
|
||||
context_metadata: Dict[str, Any]
|
||||
created_at: datetime
|
||||
last_used: datetime
|
||||
|
||||
@property
|
||||
def success_rate(self) -> float:
|
||||
"""Calculate success rate."""
|
||||
total = self.success_count + self.failure_count
|
||||
return self.success_count / total if total > 0 else 0.0
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
'pattern_id': self.pattern_id,
|
||||
'original_failure': self.original_failure,
|
||||
'recovery_strategy': self.recovery_strategy,
|
||||
'success_count': self.success_count,
|
||||
'failure_count': self.failure_count,
|
||||
'confidence_score': self.confidence_score,
|
||||
'context_metadata': self.context_metadata,
|
||||
'created_at': self.created_at.isoformat(),
|
||||
'last_used': self.last_used.isoformat()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'RecoveryPattern':
|
||||
"""Create from dictionary."""
|
||||
return cls(
|
||||
pattern_id=data['pattern_id'],
|
||||
original_failure=data['original_failure'],
|
||||
recovery_strategy=data['recovery_strategy'],
|
||||
success_count=data['success_count'],
|
||||
failure_count=data['failure_count'],
|
||||
confidence_score=data['confidence_score'],
|
||||
context_metadata=data['context_metadata'],
|
||||
created_at=datetime.fromisoformat(data['created_at']),
|
||||
last_used=datetime.fromisoformat(data['last_used'])
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoverySuggestion:
|
||||
"""A suggested recovery action."""
|
||||
strategy: str
|
||||
confidence: float
|
||||
description: str
|
||||
estimated_time: float
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
286
core/healing/recovery_logger.py
Normal file
286
core/healing/recovery_logger.py
Normal file
@@ -0,0 +1,286 @@
|
||||
"""Logging and monitoring for self-healing operations."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import datetime
|
||||
from .models import RecoveryContext, RecoveryResult
|
||||
|
||||
|
||||
class RecoveryLogger:
|
||||
"""Logger for self-healing recovery operations."""
|
||||
|
||||
def __init__(self, log_path: Optional[Path] = None):
|
||||
"""
|
||||
Initialize recovery logger.
|
||||
|
||||
Args:
|
||||
log_path: Path for storing recovery logs
|
||||
"""
|
||||
self.log_path = log_path or Path('logs/healing')
|
||||
self.log_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Setup file logger
|
||||
self.logger = logging.getLogger('healing')
|
||||
self.logger.setLevel(logging.INFO)
|
||||
|
||||
# File handler
|
||||
log_file = self.log_path / 'recovery.log'
|
||||
handler = logging.FileHandler(log_file)
|
||||
handler.setFormatter(logging.Formatter(
|
||||
'%(asctime)s - %(levelname)s - %(message)s'
|
||||
))
|
||||
self.logger.addHandler(handler)
|
||||
|
||||
# Metrics storage
|
||||
self.metrics_file = self.log_path / 'metrics.json'
|
||||
self.metrics = self._load_metrics()
|
||||
|
||||
def log_recovery_attempt(
|
||||
self,
|
||||
context: RecoveryContext,
|
||||
result: RecoveryResult
|
||||
):
|
||||
"""
|
||||
Log a recovery attempt with full details.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
result: Recovery result
|
||||
"""
|
||||
log_entry = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'workflow_id': context.workflow_id,
|
||||
'node_id': context.node_id,
|
||||
'original_action': context.original_action,
|
||||
'target_element': context.target_element,
|
||||
'failure_reason': context.failure_reason,
|
||||
'attempt_count': context.attempt_count,
|
||||
'strategy_used': result.strategy_used,
|
||||
'success': result.success,
|
||||
'confidence_score': result.confidence_score,
|
||||
'execution_time': result.execution_time,
|
||||
'new_element': result.new_element,
|
||||
'requires_user_input': result.requires_user_input,
|
||||
'error_message': result.error_message
|
||||
}
|
||||
|
||||
# Log to file
|
||||
if result.success:
|
||||
self.logger.info(f"Recovery SUCCESS: {json.dumps(log_entry)}")
|
||||
else:
|
||||
self.logger.warning(f"Recovery FAILED: {json.dumps(log_entry)}")
|
||||
|
||||
# Update metrics
|
||||
self._update_metrics(context, result)
|
||||
|
||||
def log_user_intervention(
|
||||
self,
|
||||
context: RecoveryContext,
|
||||
user_action: str,
|
||||
details: Dict
|
||||
):
|
||||
"""
|
||||
Log user intervention in recovery process.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
user_action: Action taken by user
|
||||
details: Additional details
|
||||
"""
|
||||
log_entry = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'workflow_id': context.workflow_id,
|
||||
'node_id': context.node_id,
|
||||
'user_action': user_action,
|
||||
'details': details
|
||||
}
|
||||
|
||||
self.logger.info(f"User intervention: {json.dumps(log_entry)}")
|
||||
|
||||
def get_recovery_statistics(
|
||||
self,
|
||||
workflow_id: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Get recovery statistics.
|
||||
|
||||
Args:
|
||||
workflow_id: Optional workflow ID to filter by
|
||||
|
||||
Returns:
|
||||
Dictionary with statistics
|
||||
"""
|
||||
metrics = self.metrics.copy()
|
||||
|
||||
if workflow_id and workflow_id in metrics.get('by_workflow', {}):
|
||||
return metrics['by_workflow'][workflow_id]
|
||||
|
||||
return metrics
|
||||
|
||||
def generate_insights(self) -> List[str]:
|
||||
"""
|
||||
Generate insights and recommendations from recovery patterns.
|
||||
|
||||
Returns:
|
||||
List of insight strings
|
||||
"""
|
||||
insights = []
|
||||
metrics = self.metrics
|
||||
|
||||
# Overall success rate
|
||||
total = metrics.get('total_attempts', 0)
|
||||
successes = metrics.get('successful_recoveries', 0)
|
||||
if total > 0:
|
||||
success_rate = (successes / total) * 100
|
||||
insights.append(f"Overall recovery success rate: {success_rate:.1f}%")
|
||||
|
||||
# Strategy performance
|
||||
strategy_perf = metrics.get('strategy_performance', {})
|
||||
if strategy_perf:
|
||||
best_strategy = max(
|
||||
strategy_perf.items(),
|
||||
key=lambda x: x[1].get('success_rate', 0)
|
||||
)
|
||||
insights.append(
|
||||
f"Best performing strategy: {best_strategy[0]} "
|
||||
f"({best_strategy[1].get('success_rate', 0):.1f}% success)"
|
||||
)
|
||||
|
||||
# Time savings
|
||||
time_saved = metrics.get('time_saved_hours', 0)
|
||||
if time_saved > 0:
|
||||
insights.append(f"Estimated time saved: {time_saved:.1f} hours")
|
||||
|
||||
# Repeated failures
|
||||
repeated_failures = self._detect_repeated_failures()
|
||||
if repeated_failures:
|
||||
insights.append(
|
||||
f"Warning: {len(repeated_failures)} workflows have repeated failures"
|
||||
)
|
||||
|
||||
return insights
|
||||
|
||||
def check_for_alerts(self) -> List[Dict]:
|
||||
"""
|
||||
Check for conditions that require administrator attention.
|
||||
|
||||
Returns:
|
||||
List of alert dictionaries
|
||||
"""
|
||||
alerts = []
|
||||
|
||||
# Check for repeated failures
|
||||
repeated_failures = self._detect_repeated_failures()
|
||||
for workflow_id, count in repeated_failures.items():
|
||||
if count >= 5:
|
||||
alerts.append({
|
||||
'severity': 'high',
|
||||
'type': 'repeated_failures',
|
||||
'workflow_id': workflow_id,
|
||||
'count': count,
|
||||
'message': f'Workflow {workflow_id} has {count} repeated failures'
|
||||
})
|
||||
|
||||
# Check for low success rates
|
||||
strategy_perf = self.metrics.get('strategy_performance', {})
|
||||
for strategy, perf in strategy_perf.items():
|
||||
success_rate = perf.get('success_rate', 0)
|
||||
attempts = perf.get('attempts', 0)
|
||||
if attempts >= 10 and success_rate < 50:
|
||||
alerts.append({
|
||||
'severity': 'medium',
|
||||
'type': 'low_success_rate',
|
||||
'strategy': strategy,
|
||||
'success_rate': success_rate,
|
||||
'message': f'Strategy {strategy} has low success rate: {success_rate:.1f}%'
|
||||
})
|
||||
|
||||
return alerts
|
||||
|
||||
def _update_metrics(self, context: RecoveryContext, result: RecoveryResult):
|
||||
"""Update metrics with recovery result."""
|
||||
# Total attempts
|
||||
self.metrics['total_attempts'] = self.metrics.get('total_attempts', 0) + 1
|
||||
|
||||
# Successful recoveries
|
||||
if result.success:
|
||||
self.metrics['successful_recoveries'] = \
|
||||
self.metrics.get('successful_recoveries', 0) + 1
|
||||
|
||||
# Estimate time saved (assume 5 minutes per manual intervention)
|
||||
time_saved_hours = self.metrics.get('time_saved_hours', 0.0)
|
||||
self.metrics['time_saved_hours'] = time_saved_hours + (5.0 / 60.0)
|
||||
|
||||
# Strategy performance
|
||||
if 'strategy_performance' not in self.metrics:
|
||||
self.metrics['strategy_performance'] = {}
|
||||
|
||||
strategy = result.strategy_used
|
||||
if strategy not in self.metrics['strategy_performance']:
|
||||
self.metrics['strategy_performance'][strategy] = {
|
||||
'attempts': 0,
|
||||
'successes': 0,
|
||||
'success_rate': 0.0
|
||||
}
|
||||
|
||||
perf = self.metrics['strategy_performance'][strategy]
|
||||
perf['attempts'] += 1
|
||||
if result.success:
|
||||
perf['successes'] += 1
|
||||
perf['success_rate'] = (perf['successes'] / perf['attempts']) * 100
|
||||
|
||||
# By workflow
|
||||
if 'by_workflow' not in self.metrics:
|
||||
self.metrics['by_workflow'] = {}
|
||||
|
||||
workflow_id = context.workflow_id
|
||||
if workflow_id not in self.metrics['by_workflow']:
|
||||
self.metrics['by_workflow'][workflow_id] = {
|
||||
'attempts': 0,
|
||||
'successes': 0,
|
||||
'failures': 0
|
||||
}
|
||||
|
||||
wf_metrics = self.metrics['by_workflow'][workflow_id]
|
||||
wf_metrics['attempts'] += 1
|
||||
if result.success:
|
||||
wf_metrics['successes'] += 1
|
||||
else:
|
||||
wf_metrics['failures'] += 1
|
||||
|
||||
# Save metrics
|
||||
self._save_metrics()
|
||||
|
||||
def _detect_repeated_failures(self) -> Dict[str, int]:
|
||||
"""Detect workflows with repeated failures."""
|
||||
repeated = {}
|
||||
by_workflow = self.metrics.get('by_workflow', {})
|
||||
|
||||
for workflow_id, metrics in by_workflow.items():
|
||||
failures = metrics.get('failures', 0)
|
||||
if failures >= 3:
|
||||
repeated[workflow_id] = failures
|
||||
|
||||
return repeated
|
||||
|
||||
def _load_metrics(self) -> Dict:
|
||||
"""Load metrics from storage."""
|
||||
if not self.metrics_file.exists():
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(self.metrics_file, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error loading metrics: {e}")
|
||||
return {}
|
||||
|
||||
def _save_metrics(self):
|
||||
"""Save metrics to storage."""
|
||||
try:
|
||||
with open(self.metrics_file, 'w') as f:
|
||||
json.dump(self.metrics, f, indent=2)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error saving metrics: {e}")
|
||||
15
core/healing/strategies/__init__.py
Normal file
15
core/healing/strategies/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""Recovery strategies for self-healing workflows."""
|
||||
|
||||
from .base_strategy import RecoveryStrategy
|
||||
from .semantic_variants import SemanticVariantStrategy
|
||||
from .spatial_fallback import SpatialFallbackStrategy
|
||||
from .timing_adaptation import TimingAdaptationStrategy
|
||||
from .format_transformation import FormatTransformationStrategy
|
||||
|
||||
__all__ = [
|
||||
'RecoveryStrategy',
|
||||
'SemanticVariantStrategy',
|
||||
'SpatialFallbackStrategy',
|
||||
'TimingAdaptationStrategy',
|
||||
'FormatTransformationStrategy',
|
||||
]
|
||||
50
core/healing/strategies/base_strategy.py
Normal file
50
core/healing/strategies/base_strategy.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Base class for recovery strategies."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
from ..models import RecoveryContext, RecoveryResult
|
||||
|
||||
|
||||
class RecoveryStrategy(ABC):
|
||||
"""Abstract base class for all recovery strategies."""
|
||||
|
||||
def __init__(self):
|
||||
self.name = self.__class__.__name__
|
||||
|
||||
@abstractmethod
|
||||
def attempt_recovery(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Attempt to recover from a workflow failure.
|
||||
|
||||
Args:
|
||||
context: Recovery context with failure information
|
||||
|
||||
Returns:
|
||||
RecoveryResult with outcome of recovery attempt
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle(self, context: RecoveryContext) -> bool:
|
||||
"""
|
||||
Check if this strategy can handle the given failure context.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
True if strategy can handle this failure type
|
||||
"""
|
||||
return True
|
||||
|
||||
def get_priority(self, context: RecoveryContext) -> float:
|
||||
"""
|
||||
Get priority for this strategy given the context.
|
||||
Higher values = higher priority.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
Priority score (0.0 to 1.0)
|
||||
"""
|
||||
return 0.5
|
||||
222
core/healing/strategies/format_transformation.py
Normal file
222
core/healing/strategies/format_transformation.py
Normal file
@@ -0,0 +1,222 @@
|
||||
"""Format transformation recovery strategy."""
|
||||
|
||||
import re
|
||||
import time
|
||||
from typing import List, Optional
|
||||
from datetime import datetime
|
||||
from .base_strategy import RecoveryStrategy
|
||||
from ..models import RecoveryContext, RecoveryResult
|
||||
|
||||
|
||||
class FormatTransformationStrategy(RecoveryStrategy):
|
||||
"""Transform input formats to match validation requirements."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize format transformation strategy."""
|
||||
super().__init__()
|
||||
|
||||
# Date format patterns
|
||||
self.date_formats = [
|
||||
'%Y-%m-%d', # 2024-11-30
|
||||
'%d/%m/%Y', # 30/11/2024
|
||||
'%m/%d/%Y', # 11/30/2024
|
||||
'%d-%m-%Y', # 30-11-2024
|
||||
'%Y/%m/%d', # 2024/11/30
|
||||
'%d.%m.%Y', # 30.11.2024
|
||||
'%B %d, %Y', # November 30, 2024
|
||||
'%d %B %Y', # 30 November 2024
|
||||
]
|
||||
|
||||
# Phone format patterns
|
||||
self.phone_formats = [
|
||||
lambda p: p, # Original
|
||||
lambda p: re.sub(r'\D', '', p), # Digits only
|
||||
lambda p: "+" + re.sub(r"\D", "", p), # +digits
|
||||
lambda p: self._format_phone_us(p), # (123) 456-7890
|
||||
lambda p: self._format_phone_intl(p), # +1-123-456-7890
|
||||
]
|
||||
|
||||
def attempt_recovery(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Try to transform input format to match validation.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
RecoveryResult with outcome
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Only handle format validation failures
|
||||
if context.failure_reason not in ['validation_failed', 'format_error']:
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='format_transformation',
|
||||
error_message='Strategy only handles format/validation failures'
|
||||
)
|
||||
|
||||
# Get input value
|
||||
input_value = context.metadata.get('input_value', '')
|
||||
if not input_value:
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='format_transformation',
|
||||
error_message='No input value provided in context'
|
||||
)
|
||||
|
||||
# Detect input type and try transformations
|
||||
input_type = self._detect_input_type(input_value, context)
|
||||
|
||||
if input_type == 'date':
|
||||
result = self._try_date_formats(input_value, context)
|
||||
elif input_type == 'phone':
|
||||
result = self._try_phone_formats(input_value, context)
|
||||
elif input_type == 'text':
|
||||
result = self._try_text_adaptations(input_value, context)
|
||||
else:
|
||||
result = None
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
if result:
|
||||
return RecoveryResult(
|
||||
success=True,
|
||||
strategy_used='format_transformation',
|
||||
new_element=result['formatted_value'],
|
||||
confidence_score=result['confidence'],
|
||||
execution_time=execution_time,
|
||||
learned_pattern={
|
||||
'input_type': input_type,
|
||||
'original_format': input_value,
|
||||
'new_format': result['formatted_value'],
|
||||
'transformation': result['transformation']
|
||||
}
|
||||
)
|
||||
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='format_transformation',
|
||||
execution_time=execution_time,
|
||||
error_message=f'Could not find valid format transformation for: {input_value}'
|
||||
)
|
||||
|
||||
def can_handle(self, context: RecoveryContext) -> bool:
|
||||
"""Check if this strategy can handle the failure."""
|
||||
return context.failure_reason in ['validation_failed', 'format_error', 'input_rejected']
|
||||
|
||||
def _detect_input_type(self, value: str, context: RecoveryContext) -> str:
|
||||
"""Detect the type of input value."""
|
||||
# Check metadata first
|
||||
if 'input_type' in context.metadata:
|
||||
return context.metadata['input_type']
|
||||
|
||||
# Try to detect from value
|
||||
if self._looks_like_date(value):
|
||||
return 'date'
|
||||
elif self._looks_like_phone(value):
|
||||
return 'phone'
|
||||
else:
|
||||
return 'text'
|
||||
|
||||
def _looks_like_date(self, value: str) -> bool:
|
||||
"""Check if value looks like a date."""
|
||||
# Contains date-like patterns
|
||||
date_patterns = [
|
||||
r'\d{4}[-/]\d{1,2}[-/]\d{1,2}', # YYYY-MM-DD
|
||||
r'\d{1,2}[-/]\d{1,2}[-/]\d{4}', # DD-MM-YYYY or MM-DD-YYYY
|
||||
r'\d{1,2}\s+\w+\s+\d{4}', # DD Month YYYY
|
||||
]
|
||||
return any(re.search(pattern, value) for pattern in date_patterns)
|
||||
|
||||
def _looks_like_phone(self, value: str) -> bool:
|
||||
"""Check if value looks like a phone number."""
|
||||
# Contains mostly digits with optional formatting
|
||||
digits = re.sub(r'\D', '', value)
|
||||
return len(digits) >= 7 and len(digits) <= 15
|
||||
|
||||
def _try_date_formats(self, value: str, context: RecoveryContext) -> Optional[dict]:
|
||||
"""Try different date formats."""
|
||||
# Try to parse the date
|
||||
parsed_date = None
|
||||
for fmt in self.date_formats:
|
||||
try:
|
||||
parsed_date = datetime.strptime(value, fmt)
|
||||
break
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if not parsed_date:
|
||||
return None
|
||||
|
||||
# Try different output formats
|
||||
for fmt in self.date_formats:
|
||||
formatted = parsed_date.strftime(fmt)
|
||||
# In real implementation, would try this format
|
||||
# For now, assume first different format works
|
||||
if formatted != value:
|
||||
return {
|
||||
'formatted_value': formatted,
|
||||
'confidence': 0.85,
|
||||
'transformation': f'date_format:{fmt}'
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _try_phone_formats(self, value: str, context: RecoveryContext) -> Optional[dict]:
|
||||
"""Try different phone formats."""
|
||||
for i, formatter in enumerate(self.phone_formats):
|
||||
try:
|
||||
formatted = formatter(value)
|
||||
if formatted != value:
|
||||
return {
|
||||
'formatted_value': formatted,
|
||||
'confidence': 0.75,
|
||||
'transformation': f'phone_format:{i}'
|
||||
}
|
||||
except:
|
||||
continue
|
||||
|
||||
return None
|
||||
|
||||
def _try_text_adaptations(self, value: str, context: RecoveryContext) -> Optional[dict]:
|
||||
"""Try text adaptations like truncation."""
|
||||
# Check if there's a max length constraint
|
||||
max_length = context.metadata.get('max_length')
|
||||
|
||||
if max_length and len(value) > max_length:
|
||||
# Try truncation
|
||||
truncated = value[:max_length]
|
||||
return {
|
||||
'formatted_value': truncated,
|
||||
'confidence': 0.6,
|
||||
'transformation': f'truncate:{max_length}'
|
||||
}
|
||||
|
||||
# Try other adaptations
|
||||
# Remove extra whitespace
|
||||
cleaned = ' '.join(value.split())
|
||||
if cleaned != value:
|
||||
return {
|
||||
'formatted_value': cleaned,
|
||||
'confidence': 0.7,
|
||||
'transformation': 'clean_whitespace'
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _format_phone_us(self, phone: str) -> str:
|
||||
"""Format phone number as US format: (123) 456-7890."""
|
||||
digits = re.sub(r'\D', '', phone)
|
||||
if len(digits) == 10:
|
||||
return f"({digits[:3]}) {digits[3:6]}-{digits[6:]}"
|
||||
return phone
|
||||
|
||||
def _format_phone_intl(self, phone: str) -> str:
|
||||
"""Format phone number as international: +1-123-456-7890."""
|
||||
digits = re.sub(r'\D', '', phone)
|
||||
if len(digits) == 10:
|
||||
return f"+1-{digits[:3]}-{digits[3:6]}-{digits[6:]}"
|
||||
elif len(digits) == 11 and digits[0] == '1':
|
||||
return f"+{digits[0]}-{digits[1:4]}-{digits[4:7]}-{digits[7:]}"
|
||||
return phone
|
||||
154
core/healing/strategies/semantic_variants.py
Normal file
154
core/healing/strategies/semantic_variants.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""Semantic variant recovery strategy."""
|
||||
|
||||
import re
|
||||
from typing import List, Dict, Optional
|
||||
from .base_strategy import RecoveryStrategy
|
||||
from ..models import RecoveryContext, RecoveryResult
|
||||
|
||||
|
||||
class SemanticVariantStrategy(RecoveryStrategy):
|
||||
"""Find semantic variants of UI elements."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize semantic variant strategy."""
|
||||
super().__init__()
|
||||
|
||||
# Predefined semantic mappings (English and French)
|
||||
self.variant_mappings = {
|
||||
'submit': ['send', 'ok', 'confirm', 'apply', 'save', 'envoyer', 'valider', 'soumettre'],
|
||||
'cancel': ['close', 'abort', 'back', 'dismiss', 'annuler', 'fermer', 'retour'],
|
||||
'login': ['sign in', 'log in', 'connect', 'connexion', 'se connecter', 'authentifier'],
|
||||
'logout': ['sign out', 'log out', 'disconnect', 'déconnexion', 'se déconnecter'],
|
||||
'search': ['find', 'lookup', 'query', 'chercher', 'rechercher', 'trouver'],
|
||||
'delete': ['remove', 'trash', 'erase', 'supprimer', 'effacer', 'retirer'],
|
||||
'edit': ['modify', 'change', 'update', 'modifier', 'changer', 'éditer'],
|
||||
'add': ['create', 'new', 'insert', 'ajouter', 'créer', 'nouveau'],
|
||||
'next': ['continue', 'forward', 'suivant', 'continuer', 'avancer'],
|
||||
'previous': ['back', 'backward', 'précédent', 'retour', 'arrière'],
|
||||
'yes': ['ok', 'confirm', 'accept', 'oui', 'confirmer', 'accepter'],
|
||||
'no': ['cancel', 'decline', 'reject', 'non', 'refuser', 'décliner'],
|
||||
}
|
||||
|
||||
# Build reverse mapping
|
||||
self.reverse_mapping = {}
|
||||
for key, variants in self.variant_mappings.items():
|
||||
for variant in variants:
|
||||
if variant not in self.reverse_mapping:
|
||||
self.reverse_mapping[variant] = []
|
||||
self.reverse_mapping[variant].append(key)
|
||||
|
||||
def attempt_recovery(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Try to find semantic variants of the target element.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
RecoveryResult with outcome
|
||||
"""
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
# Extract text from element
|
||||
original_text = self._extract_text_from_element(context.target_element)
|
||||
if not original_text:
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='semantic_variant',
|
||||
error_message='Could not extract text from element'
|
||||
)
|
||||
|
||||
# Get semantic variants
|
||||
variants = self._get_semantic_variants(original_text)
|
||||
|
||||
# Try each variant
|
||||
for variant in variants:
|
||||
# In real implementation, this would use UI detector
|
||||
# For now, we simulate finding the element
|
||||
element = self._find_element_by_text(variant, context)
|
||||
if element:
|
||||
confidence = self._calculate_semantic_confidence(original_text, variant)
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
return RecoveryResult(
|
||||
success=True,
|
||||
strategy_used='semantic_variant',
|
||||
new_element=element,
|
||||
confidence_score=confidence,
|
||||
execution_time=execution_time,
|
||||
learned_pattern={
|
||||
'original_text': original_text,
|
||||
'found_variant': variant
|
||||
}
|
||||
)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='semantic_variant',
|
||||
execution_time=execution_time,
|
||||
error_message=f'No semantic variants found for: {original_text}'
|
||||
)
|
||||
|
||||
def can_handle(self, context: RecoveryContext) -> bool:
|
||||
"""Check if this strategy can handle the failure."""
|
||||
return context.failure_reason in ['element_not_found', 'element_changed']
|
||||
|
||||
def _extract_text_from_element(self, element: str) -> str:
|
||||
"""Extract text from element identifier."""
|
||||
# Simple extraction - in real implementation would parse element structure
|
||||
if isinstance(element, str):
|
||||
# Remove common prefixes/suffixes
|
||||
text = re.sub(r'^(button|link|input|text):', '', element, flags=re.IGNORECASE)
|
||||
text = text.strip()
|
||||
return text
|
||||
return str(element)
|
||||
|
||||
def _get_semantic_variants(self, text: str) -> List[str]:
|
||||
"""Get semantic variants for the given text."""
|
||||
text_lower = text.lower().strip()
|
||||
variants = []
|
||||
|
||||
# Check direct mapping
|
||||
if text_lower in self.variant_mappings:
|
||||
variants.extend(self.variant_mappings[text_lower])
|
||||
|
||||
# Check reverse mapping
|
||||
if text_lower in self.reverse_mapping:
|
||||
for key in self.reverse_mapping[text_lower]:
|
||||
variants.extend(self.variant_mappings[key])
|
||||
|
||||
# Remove duplicates and original text
|
||||
variants = list(set(variants))
|
||||
if text_lower in variants:
|
||||
variants.remove(text_lower)
|
||||
|
||||
return variants
|
||||
|
||||
def _find_element_by_text(self, text: str, context: RecoveryContext) -> Optional[str]:
|
||||
"""
|
||||
Find element by text in screenshot.
|
||||
|
||||
This is a placeholder - real implementation would use UI detector.
|
||||
"""
|
||||
# TODO: Integrate with UIDetector to actually find elements
|
||||
# For now, return None to indicate not found
|
||||
return None
|
||||
|
||||
def _calculate_semantic_confidence(self, original: str, variant: str) -> float:
|
||||
"""Calculate confidence score for semantic variant match."""
|
||||
original_lower = original.lower().strip()
|
||||
variant_lower = variant.lower().strip()
|
||||
|
||||
# Higher confidence for direct mappings
|
||||
if original_lower in self.variant_mappings:
|
||||
if variant_lower in self.variant_mappings[original_lower]:
|
||||
return 0.85
|
||||
|
||||
# Medium confidence for reverse mappings
|
||||
if original_lower in self.reverse_mapping:
|
||||
return 0.75
|
||||
|
||||
# Lower confidence for fuzzy matches
|
||||
return 0.6
|
||||
174
core/healing/strategies/spatial_fallback.py
Normal file
174
core/healing/strategies/spatial_fallback.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Spatial fallback recovery strategy."""
|
||||
|
||||
import time
|
||||
from typing import Optional, List, Tuple
|
||||
from .base_strategy import RecoveryStrategy
|
||||
from ..models import RecoveryContext, RecoveryResult
|
||||
|
||||
|
||||
class SpatialFallbackStrategy(RecoveryStrategy):
|
||||
"""Search in expanded areas around the original element position."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize spatial fallback strategy."""
|
||||
super().__init__()
|
||||
self.search_radii = [50, 100, 200, 400] # pixels
|
||||
|
||||
def attempt_recovery(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Search in progressively larger areas around original position.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
RecoveryResult with outcome
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Get original position
|
||||
original_pos = self._get_original_position(context)
|
||||
if not original_pos:
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='spatial_fallback',
|
||||
error_message='Could not determine original element position'
|
||||
)
|
||||
|
||||
# Try progressively larger search areas
|
||||
for radius in self.search_radii:
|
||||
search_area = self._expand_search_area(original_pos, radius)
|
||||
elements = self._find_similar_elements_in_area(search_area, context)
|
||||
|
||||
if elements:
|
||||
best_match = self._select_best_spatial_match(elements, original_pos)
|
||||
confidence = self._calculate_spatial_confidence(best_match, original_pos, radius)
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
return RecoveryResult(
|
||||
success=True,
|
||||
strategy_used='spatial_fallback',
|
||||
new_element=best_match['element'],
|
||||
confidence_score=confidence,
|
||||
execution_time=execution_time,
|
||||
learned_pattern={
|
||||
'original_position': original_pos,
|
||||
'found_position': best_match['position'],
|
||||
'search_radius': radius
|
||||
}
|
||||
)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='spatial_fallback',
|
||||
execution_time=execution_time,
|
||||
error_message='No similar elements found in expanded search areas'
|
||||
)
|
||||
|
||||
def can_handle(self, context: RecoveryContext) -> bool:
|
||||
"""Check if this strategy can handle the failure."""
|
||||
return context.failure_reason in ['element_not_found', 'element_moved']
|
||||
|
||||
def _get_original_position(self, context: RecoveryContext) -> Optional[Tuple[int, int]]:
|
||||
"""Extract original element position from context."""
|
||||
# Try to get position from metadata
|
||||
if 'position' in context.metadata:
|
||||
pos = context.metadata['position']
|
||||
if isinstance(pos, (list, tuple)) and len(pos) >= 2:
|
||||
return (int(pos[0]), int(pos[1]))
|
||||
|
||||
# Try to parse from element string
|
||||
# Format: "element@(x,y)"
|
||||
if '@(' in context.target_element:
|
||||
try:
|
||||
pos_str = context.target_element.split('@(')[1].split(')')[0]
|
||||
x, y = pos_str.split(',')
|
||||
return (int(x.strip()), int(y.strip()))
|
||||
except:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def _expand_search_area(
|
||||
self,
|
||||
center: Tuple[int, int],
|
||||
radius: int
|
||||
) -> Tuple[int, int, int, int]:
|
||||
"""
|
||||
Expand search area around center point.
|
||||
|
||||
Returns:
|
||||
(x1, y1, x2, y2) bounding box
|
||||
"""
|
||||
x, y = center
|
||||
return (
|
||||
max(0, x - radius),
|
||||
max(0, y - radius),
|
||||
x + radius,
|
||||
y + radius
|
||||
)
|
||||
|
||||
def _find_similar_elements_in_area(
|
||||
self,
|
||||
search_area: Tuple[int, int, int, int],
|
||||
context: RecoveryContext
|
||||
) -> List[dict]:
|
||||
"""
|
||||
Find similar elements in the search area.
|
||||
|
||||
This is a placeholder - real implementation would use UI detector.
|
||||
"""
|
||||
# TODO: Integrate with UIDetector to find elements in area
|
||||
# For now, return empty list
|
||||
return []
|
||||
|
||||
def _select_best_spatial_match(
|
||||
self,
|
||||
elements: List[dict],
|
||||
original_pos: Tuple[int, int]
|
||||
) -> dict:
|
||||
"""Select the best matching element based on distance and similarity."""
|
||||
if not elements:
|
||||
return None
|
||||
|
||||
# Score each element
|
||||
scored_elements = []
|
||||
for element in elements:
|
||||
distance = self._calculate_distance(original_pos, element['position'])
|
||||
similarity = element.get('similarity', 0.5)
|
||||
|
||||
# Combined score (closer and more similar = better)
|
||||
score = similarity * (1.0 / (1.0 + distance / 100.0))
|
||||
scored_elements.append((score, element))
|
||||
|
||||
# Return element with highest score
|
||||
scored_elements.sort(key=lambda x: x[0], reverse=True)
|
||||
return scored_elements[0][1]
|
||||
|
||||
def _calculate_spatial_confidence(
|
||||
self,
|
||||
match: dict,
|
||||
original_pos: Tuple[int, int],
|
||||
radius: int
|
||||
) -> float:
|
||||
"""Calculate confidence score for spatial match."""
|
||||
distance = self._calculate_distance(original_pos, match['position'])
|
||||
similarity = match.get('similarity', 0.5)
|
||||
|
||||
# Distance factor (closer = higher confidence)
|
||||
distance_factor = 1.0 - (distance / (radius * 2))
|
||||
distance_factor = max(0.0, min(1.0, distance_factor))
|
||||
|
||||
# Combined confidence
|
||||
confidence = (similarity * 0.6 + distance_factor * 0.4)
|
||||
|
||||
return max(0.0, min(1.0, confidence))
|
||||
|
||||
def _calculate_distance(
|
||||
self,
|
||||
pos1: Tuple[int, int],
|
||||
pos2: Tuple[int, int]
|
||||
) -> float:
|
||||
"""Calculate Euclidean distance between two positions."""
|
||||
return ((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)**0.5
|
||||
150
core/healing/strategies/timing_adaptation.py
Normal file
150
core/healing/strategies/timing_adaptation.py
Normal file
@@ -0,0 +1,150 @@
|
||||
"""Timing adaptation recovery strategy."""
|
||||
|
||||
import time
|
||||
from typing import Dict
|
||||
from .base_strategy import RecoveryStrategy
|
||||
from ..models import RecoveryContext, RecoveryResult
|
||||
|
||||
|
||||
class TimingAdaptationStrategy(RecoveryStrategy):
|
||||
"""Adapt wait times and timeouts based on performance."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize timing adaptation strategy."""
|
||||
super().__init__()
|
||||
self.performance_history: Dict[str, list] = {}
|
||||
self.min_wait = 0.5
|
||||
self.max_wait = 30.0
|
||||
self.adaptation_factor = 1.5
|
||||
|
||||
def attempt_recovery(self, context: RecoveryContext) -> RecoveryResult:
|
||||
"""
|
||||
Adapt timing based on historical performance.
|
||||
|
||||
Args:
|
||||
context: Recovery context
|
||||
|
||||
Returns:
|
||||
RecoveryResult with outcome
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Only handle timeout failures
|
||||
if context.failure_reason != 'timeout':
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='timing_adaptation',
|
||||
error_message='Strategy only handles timeout failures'
|
||||
)
|
||||
|
||||
# Get current wait time
|
||||
current_wait = self._get_current_wait_time(context)
|
||||
|
||||
# Calculate adapted wait time
|
||||
adapted_wait = min(current_wait * self.adaptation_factor, self.max_wait)
|
||||
|
||||
# Try with adapted timing
|
||||
success = self._retry_with_timing(context, adapted_wait)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
if success:
|
||||
# Update performance history
|
||||
self._update_performance_history(context, adapted_wait)
|
||||
|
||||
return RecoveryResult(
|
||||
success=True,
|
||||
strategy_used='timing_adaptation',
|
||||
confidence_score=0.8,
|
||||
execution_time=execution_time,
|
||||
learned_pattern={
|
||||
'original_wait': current_wait,
|
||||
'new_wait_time': adapted_wait,
|
||||
'element': context.target_element
|
||||
}
|
||||
)
|
||||
|
||||
return RecoveryResult(
|
||||
success=False,
|
||||
strategy_used='timing_adaptation',
|
||||
execution_time=execution_time,
|
||||
error_message=f'Timeout even with adapted wait time: {adapted_wait}s'
|
||||
)
|
||||
|
||||
def can_handle(self, context: RecoveryContext) -> bool:
|
||||
"""Check if this strategy can handle the failure."""
|
||||
return context.failure_reason == 'timeout'
|
||||
|
||||
def get_optimized_wait_time(self, element_key: str, default: float = 5.0) -> float:
|
||||
"""
|
||||
Get optimized wait time based on historical performance.
|
||||
|
||||
Args:
|
||||
element_key: Key identifying the element/action
|
||||
default: Default wait time if no history
|
||||
|
||||
Returns:
|
||||
Optimized wait time in seconds
|
||||
"""
|
||||
if element_key not in self.performance_history:
|
||||
return default
|
||||
|
||||
history = self.performance_history[element_key]
|
||||
if not history:
|
||||
return default
|
||||
|
||||
# Use average of recent successful timings
|
||||
recent = history[-10:] # Last 10 attempts
|
||||
avg_time = sum(recent) / len(recent)
|
||||
|
||||
# Add 20% buffer for safety
|
||||
optimized = avg_time * 1.2
|
||||
|
||||
return max(self.min_wait, min(optimized, self.max_wait))
|
||||
|
||||
def _get_current_wait_time(self, context: RecoveryContext) -> float:
|
||||
"""Extract current wait time from context."""
|
||||
# Try to get from metadata
|
||||
if 'wait_time' in context.metadata:
|
||||
return float(context.metadata['wait_time'])
|
||||
|
||||
# Try to get from performance history
|
||||
element_key = self._get_element_key(context)
|
||||
if element_key in self.performance_history:
|
||||
history = self.performance_history[element_key]
|
||||
if history:
|
||||
return history[-1]
|
||||
|
||||
# Default
|
||||
return 5.0
|
||||
|
||||
def _retry_with_timing(self, context: RecoveryContext, wait_time: float) -> bool:
|
||||
"""
|
||||
Retry the action with adapted timing.
|
||||
|
||||
This is a placeholder - real implementation would retry the actual action.
|
||||
"""
|
||||
# TODO: Integrate with execution loop to actually retry
|
||||
# For now, simulate with sleep
|
||||
time.sleep(min(wait_time, 1.0)) # Cap at 1s for testing
|
||||
|
||||
# Simulate success based on wait time
|
||||
# In real implementation, this would actually retry the action
|
||||
return wait_time >= 3.0
|
||||
|
||||
def _update_performance_history(self, context: RecoveryContext, wait_time: float):
|
||||
"""Update performance history with successful timing."""
|
||||
element_key = self._get_element_key(context)
|
||||
|
||||
if element_key not in self.performance_history:
|
||||
self.performance_history[element_key] = []
|
||||
|
||||
self.performance_history[element_key].append(wait_time)
|
||||
|
||||
# Keep only recent history (last 50 entries)
|
||||
if len(self.performance_history[element_key]) > 50:
|
||||
self.performance_history[element_key] = self.performance_history[element_key][-50:]
|
||||
|
||||
def _get_element_key(self, context: RecoveryContext) -> str:
|
||||
"""Generate a key for the element/action."""
|
||||
return f"{context.workflow_id}:{context.node_id}:{context.target_element}"
|
||||
Reference in New Issue
Block a user