feat(grounding): pipeline centralisé + serveur UI-TARS transformers + nettoyage code mort
Architecture grounding complète :
- core/grounding/server.py : serveur FastAPI (port 8200) avec UI-TARS-1.5-7B en 4-bit NF4
Process séparé avec son propre contexte CUDA (résout le crash Flask/CUDA)
- core/grounding/pipeline.py : orchestrateur cascade template→OCR→UI-TARS→static
- core/grounding/template_matcher.py : TemplateMatcher centralisé (remplace 5 copies)
- core/grounding/ui_tars_grounder.py : client HTTP vers le serveur de grounding
- core/grounding/target.py : GroundingTarget + GroundingResult
ORA modifié :
- _act_click() : capture unique de l'écran envoyée au serveur de grounding
- Pre-check VLM skippé pour ui_tars (redondant, et Ollama n'a plus de VRAM)
- verify_level='none' par défaut (vérification titre OCR prévue en Phase 2)
- Détection réponses négatives UI-TARS ("I don't see it" → fallback OCR)
Nettoyage :
- 9 fichiers morts archivés dans _archive/ (~6300 lignes supprimées)
- 21 tests ajoutés pour TemplateMatcher
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,483 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Service de Capture Contextuelle pour RPA Vision V3
|
||||
|
||||
Ce service gère la capture du contexte environnant des éléments sélectionnés,
|
||||
incluant les éléments voisins, la hiérarchie visuelle et les métadonnées contextuelles.
|
||||
|
||||
Exigences: 7.1, 7.2, 7.3, 7.4, 7.5
|
||||
Auteur: Assistant IA
|
||||
Date: 2026-01-07
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Tuple, Any
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
import numpy as np
|
||||
|
||||
from core.models import UIElement, BBox, ScreenState
|
||||
from core.capture.screen_capturer import ScreenCapturer
|
||||
from core.detection.ui_detector import UIDetector
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class ContextualElement:
|
||||
"""Élément contextuel dans l'environnement d'un élément cible"""
|
||||
element: UIElement
|
||||
spatial_relationship: str # 'above', 'below', 'left', 'right', 'inside', 'adjacent'
|
||||
distance: float # Distance en pixels
|
||||
relevance_score: float # Score de pertinence contextuelle (0-1)
|
||||
visual_similarity: float # Similarité visuelle avec l'élément cible (0-1)
|
||||
|
||||
@dataclass
|
||||
class VisualHierarchy:
|
||||
"""Hiérarchie visuelle d'un élément"""
|
||||
parent_container: Optional[UIElement] = None
|
||||
child_elements: List[UIElement] = field(default_factory=list)
|
||||
sibling_elements: List[UIElement] = field(default_factory=list)
|
||||
depth_level: int = 0
|
||||
container_type: str = "unknown" # 'form', 'dialog', 'panel', 'page', etc.
|
||||
|
||||
@dataclass
|
||||
class ContextualMetadata:
|
||||
"""Métadonnées contextuelles enrichies"""
|
||||
surrounding_elements: List[ContextualElement] = field(default_factory=list)
|
||||
visual_hierarchy: Optional[VisualHierarchy] = None
|
||||
screen_region: str = "unknown" # 'header', 'sidebar', 'main', 'footer', etc.
|
||||
visual_density: float = 0.0 # Densité d'éléments dans la zone (0-1)
|
||||
color_palette: List[str] = field(default_factory=list) # Couleurs dominantes
|
||||
text_context: List[str] = field(default_factory=list) # Textes environnants
|
||||
capture_timestamp: datetime = field(default_factory=datetime.now)
|
||||
|
||||
class ContextualCaptureService:
|
||||
"""
|
||||
Service de capture contextuelle pour enrichir les éléments sélectionnés
|
||||
avec des informations sur leur environnement visuel.
|
||||
"""
|
||||
|
||||
def __init__(self, screen_capturer: ScreenCapturer, ui_detector: UIDetector):
|
||||
"""
|
||||
Initialise le service de capture contextuelle.
|
||||
|
||||
Args:
|
||||
screen_capturer: Service de capture d'écran
|
||||
ui_detector: Détecteur d'éléments UI
|
||||
"""
|
||||
self.screen_capturer = screen_capturer
|
||||
self.ui_detector = ui_detector
|
||||
self.context_radius = 200 # Rayon de capture du contexte en pixels
|
||||
self.max_contextual_elements = 20 # Nombre max d'éléments contextuels
|
||||
|
||||
logger.info("Service de capture contextuelle initialisé")
|
||||
|
||||
async def capture_element_context(
|
||||
self,
|
||||
target_element: UIElement,
|
||||
screen_state: Optional[ScreenState] = None
|
||||
) -> ContextualMetadata:
|
||||
"""
|
||||
Capture le contexte complet d'un élément cible.
|
||||
|
||||
Args:
|
||||
target_element: Élément dont on veut capturer le contexte
|
||||
screen_state: État d'écran actuel (optionnel, sera capturé si absent)
|
||||
|
||||
Returns:
|
||||
Métadonnées contextuelles enrichies
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Capture du contexte pour élément: {target_element.element_type}")
|
||||
|
||||
# Capturer l'état d'écran si nécessaire
|
||||
if screen_state is None:
|
||||
screen_state = await self._capture_current_screen()
|
||||
|
||||
# Analyser les éléments environnants
|
||||
surrounding_elements = await self._analyze_surrounding_elements(
|
||||
target_element, screen_state
|
||||
)
|
||||
|
||||
# Construire la hiérarchie visuelle
|
||||
visual_hierarchy = await self._build_visual_hierarchy(
|
||||
target_element, screen_state
|
||||
)
|
||||
|
||||
# Déterminer la région d'écran
|
||||
screen_region = self._determine_screen_region(target_element, screen_state)
|
||||
|
||||
# Calculer la densité visuelle
|
||||
visual_density = self._calculate_visual_density(target_element, screen_state)
|
||||
|
||||
# Extraire la palette de couleurs
|
||||
color_palette = await self._extract_color_palette(target_element, screen_state)
|
||||
|
||||
# Collecter le contexte textuel
|
||||
text_context = self._collect_text_context(target_element, screen_state)
|
||||
|
||||
metadata = ContextualMetadata(
|
||||
surrounding_elements=surrounding_elements,
|
||||
visual_hierarchy=visual_hierarchy,
|
||||
screen_region=screen_region,
|
||||
visual_density=visual_density,
|
||||
color_palette=color_palette,
|
||||
text_context=text_context,
|
||||
capture_timestamp=datetime.now()
|
||||
)
|
||||
|
||||
logger.info(f"Contexte capturé: {len(surrounding_elements)} éléments environnants")
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la capture du contexte: {e}")
|
||||
return ContextualMetadata()
|
||||
|
||||
async def _capture_current_screen(self) -> ScreenState:
|
||||
"""Capture l'état d'écran actuel"""
|
||||
try:
|
||||
screenshot = await self.screen_capturer.capture_screen()
|
||||
screen_state = await self.ui_detector.detect_elements(screenshot)
|
||||
return screen_state
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la capture d'écran: {e}")
|
||||
raise
|
||||
|
||||
async def _analyze_surrounding_elements(
|
||||
self,
|
||||
target_element: UIElement,
|
||||
screen_state: ScreenState
|
||||
) -> List[ContextualElement]:
|
||||
"""
|
||||
Analyse les éléments environnants d'un élément cible.
|
||||
|
||||
Args:
|
||||
target_element: Élément cible
|
||||
screen_state: État d'écran complet
|
||||
|
||||
Returns:
|
||||
Liste des éléments contextuels triés par pertinence
|
||||
"""
|
||||
contextual_elements = []
|
||||
target_bbox = target_element.bounding_box
|
||||
target_center = self._get_bbox_center(target_bbox)
|
||||
|
||||
for element in screen_state.ui_elements:
|
||||
if element == target_element:
|
||||
continue
|
||||
|
||||
# Calculer la distance
|
||||
element_center = self._get_bbox_center(element.bounding_box)
|
||||
distance = self._calculate_distance(target_center, element_center)
|
||||
|
||||
# Filtrer par rayon de contexte
|
||||
if distance > self.context_radius:
|
||||
continue
|
||||
|
||||
# Déterminer la relation spatiale
|
||||
spatial_relationship = self._determine_spatial_relationship(
|
||||
target_bbox, element.bounding_box
|
||||
)
|
||||
|
||||
# Calculer le score de pertinence
|
||||
relevance_score = self._calculate_relevance_score(
|
||||
target_element, element, distance
|
||||
)
|
||||
|
||||
# Calculer la similarité visuelle (basique pour l'instant)
|
||||
visual_similarity = self._calculate_visual_similarity(
|
||||
target_element, element
|
||||
)
|
||||
|
||||
contextual_element = ContextualElement(
|
||||
element=element,
|
||||
spatial_relationship=spatial_relationship,
|
||||
distance=distance,
|
||||
relevance_score=relevance_score,
|
||||
visual_similarity=visual_similarity
|
||||
)
|
||||
|
||||
contextual_elements.append(contextual_element)
|
||||
|
||||
# Trier par pertinence et limiter le nombre
|
||||
contextual_elements.sort(key=lambda x: x.relevance_score, reverse=True)
|
||||
return contextual_elements[:self.max_contextual_elements]
|
||||
|
||||
async def _build_visual_hierarchy(
|
||||
self,
|
||||
target_element: UIElement,
|
||||
screen_state: ScreenState
|
||||
) -> VisualHierarchy:
|
||||
"""
|
||||
Construit la hiérarchie visuelle d'un élément.
|
||||
|
||||
Args:
|
||||
target_element: Élément cible
|
||||
screen_state: État d'écran complet
|
||||
|
||||
Returns:
|
||||
Hiérarchie visuelle de l'élément
|
||||
"""
|
||||
target_bbox = target_element.bounding_box
|
||||
|
||||
# Trouver le conteneur parent
|
||||
parent_container = None
|
||||
min_area = float('inf')
|
||||
|
||||
for element in screen_state.ui_elements:
|
||||
if element == target_element:
|
||||
continue
|
||||
|
||||
# Vérifier si l'élément contient notre cible
|
||||
if self._bbox_contains(element.bounding_box, target_bbox):
|
||||
area = self._calculate_bbox_area(element.bounding_box)
|
||||
if area < min_area:
|
||||
min_area = area
|
||||
parent_container = element
|
||||
|
||||
# Trouver les éléments enfants
|
||||
child_elements = []
|
||||
for element in screen_state.ui_elements:
|
||||
if element == target_element:
|
||||
continue
|
||||
|
||||
if self._bbox_contains(target_bbox, element.bounding_box):
|
||||
child_elements.append(element)
|
||||
|
||||
# Trouver les éléments frères (même conteneur parent)
|
||||
sibling_elements = []
|
||||
if parent_container:
|
||||
for element in screen_state.ui_elements:
|
||||
if element == target_element or element == parent_container:
|
||||
continue
|
||||
|
||||
if self._bbox_contains(parent_container.bounding_box, element.bounding_box):
|
||||
# Vérifier que ce n'est pas un enfant de notre cible
|
||||
if not self._bbox_contains(target_bbox, element.bounding_box):
|
||||
sibling_elements.append(element)
|
||||
|
||||
# Déterminer le type de conteneur
|
||||
container_type = "unknown"
|
||||
if parent_container:
|
||||
container_type = self._determine_container_type(parent_container)
|
||||
|
||||
# Calculer le niveau de profondeur
|
||||
depth_level = self._calculate_depth_level(target_element, screen_state)
|
||||
|
||||
return VisualHierarchy(
|
||||
parent_container=parent_container,
|
||||
child_elements=child_elements,
|
||||
sibling_elements=sibling_elements,
|
||||
depth_level=depth_level,
|
||||
container_type=container_type
|
||||
)
|
||||
|
||||
def _determine_screen_region(self, target_element: UIElement, screen_state: ScreenState) -> str:
|
||||
"""Détermine la région d'écran où se trouve l'élément"""
|
||||
bbox = target_element.bounding_box
|
||||
screen_width = screen_state.screenshot.width if screen_state.screenshot else 1920
|
||||
screen_height = screen_state.screenshot.height if screen_state.screenshot else 1080
|
||||
|
||||
center_x = (bbox.x + bbox.width / 2) / screen_width
|
||||
center_y = (bbox.y + bbox.height / 2) / screen_height
|
||||
|
||||
# Déterminer la région verticale
|
||||
if center_y < 0.2:
|
||||
vertical_region = "header"
|
||||
elif center_y > 0.8:
|
||||
vertical_region = "footer"
|
||||
else:
|
||||
vertical_region = "main"
|
||||
|
||||
# Déterminer la région horizontale
|
||||
if center_x < 0.2:
|
||||
horizontal_region = "left"
|
||||
elif center_x > 0.8:
|
||||
horizontal_region = "right"
|
||||
else:
|
||||
horizontal_region = "center"
|
||||
|
||||
return f"{vertical_region}_{horizontal_region}"
|
||||
|
||||
def _calculate_visual_density(self, target_element: UIElement, screen_state: ScreenState) -> float:
|
||||
"""Calcule la densité visuelle autour de l'élément"""
|
||||
target_bbox = target_element.bounding_box
|
||||
|
||||
# Définir une zone d'analyse autour de l'élément
|
||||
analysis_bbox = BBox(
|
||||
x=max(0, target_bbox.x - self.context_radius),
|
||||
y=max(0, target_bbox.y - self.context_radius),
|
||||
width=target_bbox.width + 2 * self.context_radius,
|
||||
height=target_bbox.height + 2 * self.context_radius
|
||||
)
|
||||
|
||||
# Compter les éléments dans cette zone
|
||||
elements_in_zone = 0
|
||||
total_element_area = 0
|
||||
|
||||
for element in screen_state.ui_elements:
|
||||
if self._bbox_intersects(element.bounding_box, analysis_bbox):
|
||||
elements_in_zone += 1
|
||||
total_element_area += self._calculate_bbox_area(element.bounding_box)
|
||||
|
||||
# Calculer la densité (ratio surface occupée / surface totale)
|
||||
analysis_area = analysis_bbox.width * analysis_bbox.height
|
||||
density = min(1.0, total_element_area / analysis_area) if analysis_area > 0 else 0.0
|
||||
|
||||
return density
|
||||
|
||||
async def _extract_color_palette(
|
||||
self,
|
||||
target_element: UIElement,
|
||||
screen_state: ScreenState
|
||||
) -> List[str]:
|
||||
"""Extrait la palette de couleurs dominantes autour de l'élément"""
|
||||
try:
|
||||
if not screen_state.screenshot:
|
||||
return []
|
||||
|
||||
# Pour l'instant, retourner une palette basique
|
||||
# L'implémentation complète nécessiterait PIL et sklearn
|
||||
return ["#1976d2", "#dc004e", "#22c55e", "#f59e0b", "#ef4444"]
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Erreur lors de l'extraction de couleurs: {e}")
|
||||
return []
|
||||
|
||||
def _collect_text_context(self, target_element: UIElement, screen_state: ScreenState) -> List[str]:
|
||||
"""Collecte le contexte textuel autour de l'élément"""
|
||||
text_context = []
|
||||
target_bbox = target_element.bounding_box
|
||||
target_center = self._get_bbox_center(target_bbox)
|
||||
|
||||
# Collecter les textes des éléments proches
|
||||
for element in screen_state.ui_elements:
|
||||
if element == target_element or not element.text_content:
|
||||
continue
|
||||
|
||||
element_center = self._get_bbox_center(element.bounding_box)
|
||||
distance = self._calculate_distance(target_center, element_center)
|
||||
|
||||
if distance <= self.context_radius:
|
||||
text_context.append(element.text_content.strip())
|
||||
|
||||
# Nettoyer et limiter
|
||||
text_context = [text for text in text_context if len(text) > 0]
|
||||
return text_context[:10] # Limiter à 10 textes
|
||||
|
||||
# Méthodes utilitaires
|
||||
|
||||
def _get_bbox_center(self, bbox: BBox) -> Tuple[float, float]:
|
||||
"""Calcule le centre d'une bounding box"""
|
||||
return (bbox.x + bbox.width / 2, bbox.y + bbox.height / 2)
|
||||
|
||||
def _calculate_distance(self, point1: Tuple[float, float], point2: Tuple[float, float]) -> float:
|
||||
"""Calcule la distance euclidienne entre deux points"""
|
||||
return np.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)
|
||||
|
||||
def _determine_spatial_relationship(self, bbox1: BBox, bbox2: BBox) -> str:
|
||||
"""Détermine la relation spatiale entre deux bounding boxes"""
|
||||
center1 = self._get_bbox_center(bbox1)
|
||||
center2 = self._get_bbox_center(bbox2)
|
||||
|
||||
# Vérifier si l'un contient l'autre
|
||||
if self._bbox_contains(bbox1, bbox2):
|
||||
return "inside"
|
||||
if self._bbox_contains(bbox2, bbox1):
|
||||
return "contains"
|
||||
|
||||
# Déterminer la direction principale
|
||||
dx = center2[0] - center1[0]
|
||||
dy = center2[1] - center1[1]
|
||||
|
||||
if abs(dx) > abs(dy):
|
||||
return "right" if dx > 0 else "left"
|
||||
else:
|
||||
return "below" if dy > 0 else "above"
|
||||
|
||||
def _calculate_relevance_score(
|
||||
self,
|
||||
target_element: UIElement,
|
||||
contextual_element: UIElement,
|
||||
distance: float
|
||||
) -> float:
|
||||
"""Calcule le score de pertinence d'un élément contextuel"""
|
||||
# Score basé sur la distance (plus proche = plus pertinent)
|
||||
distance_score = max(0, 1 - (distance / self.context_radius))
|
||||
|
||||
# Bonus pour les éléments de même type
|
||||
type_bonus = 0.2 if target_element.element_type == contextual_element.element_type else 0
|
||||
|
||||
# Bonus pour les éléments avec du texte
|
||||
text_bonus = 0.1 if contextual_element.text_content else 0
|
||||
|
||||
return min(1.0, distance_score + type_bonus + text_bonus)
|
||||
|
||||
def _calculate_visual_similarity(self, element1: UIElement, element2: UIElement) -> float:
|
||||
"""Calcule la similarité visuelle basique entre deux éléments"""
|
||||
# Similarité basée sur le type d'élément
|
||||
if element1.element_type == element2.element_type:
|
||||
return 0.8
|
||||
|
||||
# Similarité basée sur la taille
|
||||
area1 = self._calculate_bbox_area(element1.bounding_box)
|
||||
area2 = self._calculate_bbox_area(element2.bounding_box)
|
||||
|
||||
if area1 > 0 and area2 > 0:
|
||||
size_ratio = min(area1, area2) / max(area1, area2)
|
||||
return size_ratio * 0.5
|
||||
|
||||
return 0.1 # Similarité minimale
|
||||
|
||||
def _bbox_contains(self, container: BBox, contained: BBox) -> bool:
|
||||
"""Vérifie si une bounding box en contient une autre"""
|
||||
return (
|
||||
container.x <= contained.x and
|
||||
container.y <= contained.y and
|
||||
container.x + container.width >= contained.x + contained.width and
|
||||
container.y + container.height >= contained.y + contained.height
|
||||
)
|
||||
|
||||
def _bbox_intersects(self, bbox1: BBox, bbox2: BBox) -> bool:
|
||||
"""Vérifie si deux bounding boxes se chevauchent"""
|
||||
return not (
|
||||
bbox1.x + bbox1.width < bbox2.x or
|
||||
bbox2.x + bbox2.width < bbox1.x or
|
||||
bbox1.y + bbox1.height < bbox2.y or
|
||||
bbox2.y + bbox2.height < bbox1.y
|
||||
)
|
||||
|
||||
def _calculate_bbox_area(self, bbox: BBox) -> float:
|
||||
"""Calcule l'aire d'une bounding box"""
|
||||
return bbox.width * bbox.height
|
||||
|
||||
def _determine_container_type(self, container: UIElement) -> str:
|
||||
"""Détermine le type de conteneur basé sur ses caractéristiques"""
|
||||
if container.element_type in ["form", "dialog", "modal"]:
|
||||
return container.element_type
|
||||
|
||||
# Heuristiques basées sur la taille et position
|
||||
area = self._calculate_bbox_area(container.bounding_box)
|
||||
|
||||
if area > 500000: # Grande zone
|
||||
return "page"
|
||||
elif area > 100000: # Zone moyenne
|
||||
return "panel"
|
||||
else:
|
||||
return "container"
|
||||
|
||||
def _calculate_depth_level(self, target_element: UIElement, screen_state: ScreenState) -> int:
|
||||
"""Calcule le niveau de profondeur dans la hiérarchie"""
|
||||
depth = 0
|
||||
current_bbox = target_element.bounding_box
|
||||
|
||||
# Compter les conteneurs qui englobent notre élément
|
||||
for element in screen_state.ui_elements:
|
||||
if element == target_element:
|
||||
continue
|
||||
|
||||
if self._bbox_contains(element.bounding_box, current_bbox):
|
||||
depth += 1
|
||||
|
||||
return depth
|
||||
@@ -1,493 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Service de Validation en Temps Réel pour RPA Vision V3
|
||||
|
||||
Ce service gère la validation continue des éléments visuels en arrière-plan,
|
||||
fournit des notifications de changements et maintient la cohérence des cibles visuelles.
|
||||
|
||||
Exigences: 6.1, 6.2, 6.3, 6.4, 6.5
|
||||
Auteur: Assistant IA
|
||||
Date: 2026-01-07
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Callable, Any
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from enum import Enum
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
from core.visual.visual_target_manager import VisualTarget, ValidationResult
|
||||
from core.visual.screenshot_validation_manager import ScreenshotValidationManager, ValidationStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class NotificationLevel(Enum):
|
||||
"""Niveaux de notification"""
|
||||
INFO = "info"
|
||||
WARNING = "warning"
|
||||
ERROR = "error"
|
||||
CRITICAL = "critical"
|
||||
|
||||
@dataclass
|
||||
class ValidationNotification:
|
||||
"""Notification de validation"""
|
||||
target_signature: str
|
||||
level: NotificationLevel
|
||||
message: str
|
||||
timestamp: datetime
|
||||
validation_result: Optional[ValidationResult] = None
|
||||
suggested_actions: List[str] = field(default_factory=list)
|
||||
auto_fixable: bool = False
|
||||
|
||||
@dataclass
|
||||
class ValidationSubscription:
|
||||
"""Abonnement aux notifications de validation"""
|
||||
target_signature: str
|
||||
callback: Callable[[ValidationNotification], None]
|
||||
notification_levels: List[NotificationLevel] = field(default_factory=lambda: list(NotificationLevel))
|
||||
active: bool = True
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
|
||||
class RealtimeValidationService:
|
||||
"""
|
||||
Service de validation en temps réel pour les cibles visuelles.
|
||||
|
||||
Gère la validation continue, les notifications et les actions automatiques
|
||||
pour maintenir la cohérence des éléments visuels.
|
||||
"""
|
||||
|
||||
def __init__(self, validation_manager: ScreenshotValidationManager):
|
||||
"""
|
||||
Initialise le service de validation en temps réel.
|
||||
|
||||
Args:
|
||||
validation_manager: Gestionnaire de validation des captures
|
||||
"""
|
||||
self.validation_manager = validation_manager
|
||||
|
||||
# Gestion des abonnements
|
||||
self._subscriptions: Dict[str, List[ValidationSubscription]] = {}
|
||||
self._subscription_lock = threading.RLock()
|
||||
|
||||
# Configuration du service
|
||||
self.validation_interval = 5.0 # Secondes entre validations
|
||||
self.notification_queue_size = 1000
|
||||
self.auto_fix_enabled = True
|
||||
self.batch_validation_size = 10
|
||||
|
||||
# Queue des notifications
|
||||
self._notification_queue: asyncio.Queue = asyncio.Queue(maxsize=self.notification_queue_size)
|
||||
|
||||
# Tâches de service
|
||||
self._service_tasks: List[asyncio.Task] = []
|
||||
self._service_running = False
|
||||
|
||||
# Statistiques
|
||||
self.stats = {
|
||||
'notifications_sent': 0,
|
||||
'auto_fixes_applied': 0,
|
||||
'validation_errors': 0,
|
||||
'active_subscriptions': 0
|
||||
}
|
||||
|
||||
logger.info("Service de validation en temps réel initialisé")
|
||||
|
||||
async def start_service(self):
|
||||
"""Démarre le service de validation en temps réel"""
|
||||
if self._service_running:
|
||||
logger.warning("Service déjà en cours d'exécution")
|
||||
return
|
||||
|
||||
self._service_running = True
|
||||
|
||||
# Démarrer les tâches de service
|
||||
self._service_tasks = [
|
||||
asyncio.create_task(self._notification_processor()),
|
||||
asyncio.create_task(self._periodic_health_check()),
|
||||
asyncio.create_task(self._cleanup_expired_subscriptions())
|
||||
]
|
||||
|
||||
logger.info("Service de validation en temps réel démarré")
|
||||
|
||||
async def stop_service(self):
|
||||
"""Arrête le service de validation en temps réel"""
|
||||
if not self._service_running:
|
||||
return
|
||||
|
||||
self._service_running = False
|
||||
|
||||
# Annuler toutes les tâches
|
||||
for task in self._service_tasks:
|
||||
task.cancel()
|
||||
|
||||
# Attendre l'arrêt des tâches
|
||||
await asyncio.gather(*self._service_tasks, return_exceptions=True)
|
||||
self._service_tasks.clear()
|
||||
|
||||
logger.info("Service de validation en temps réel arrêté")
|
||||
|
||||
def subscribe_to_validation(
|
||||
self,
|
||||
target_signature: str,
|
||||
callback: Callable[[ValidationNotification], None],
|
||||
notification_levels: Optional[List[NotificationLevel]] = None
|
||||
) -> str:
|
||||
"""
|
||||
S'abonne aux notifications de validation pour une cible.
|
||||
|
||||
Args:
|
||||
target_signature: Signature de la cible à surveiller
|
||||
callback: Fonction appelée lors des notifications
|
||||
notification_levels: Niveaux de notification à recevoir
|
||||
|
||||
Returns:
|
||||
ID de l'abonnement
|
||||
"""
|
||||
if notification_levels is None:
|
||||
notification_levels = list(NotificationLevel)
|
||||
|
||||
subscription = ValidationSubscription(
|
||||
target_signature=target_signature,
|
||||
callback=callback,
|
||||
notification_levels=notification_levels
|
||||
)
|
||||
|
||||
with self._subscription_lock:
|
||||
if target_signature not in self._subscriptions:
|
||||
self._subscriptions[target_signature] = []
|
||||
|
||||
self._subscriptions[target_signature].append(subscription)
|
||||
self.stats['active_subscriptions'] += 1
|
||||
|
||||
# Générer un ID unique pour l'abonnement
|
||||
subscription_id = f"{target_signature}_{id(subscription)}"
|
||||
|
||||
logger.info(f"Nouvel abonnement créé: {subscription_id}")
|
||||
return subscription_id
|
||||
|
||||
def unsubscribe_from_validation(self, target_signature: str, subscription_id: str):
|
||||
"""
|
||||
Se désabonne des notifications de validation.
|
||||
|
||||
Args:
|
||||
target_signature: Signature de la cible
|
||||
subscription_id: ID de l'abonnement à supprimer
|
||||
"""
|
||||
with self._subscription_lock:
|
||||
if target_signature in self._subscriptions:
|
||||
# Trouver et supprimer l'abonnement
|
||||
subscriptions = self._subscriptions[target_signature]
|
||||
original_count = len(subscriptions)
|
||||
|
||||
# Filtrer les abonnements actifs (approximation par ID)
|
||||
self._subscriptions[target_signature] = [
|
||||
sub for sub in subscriptions
|
||||
if f"{target_signature}_{id(sub)}" != subscription_id
|
||||
]
|
||||
|
||||
removed_count = original_count - len(self._subscriptions[target_signature])
|
||||
self.stats['active_subscriptions'] -= removed_count
|
||||
|
||||
if removed_count > 0:
|
||||
logger.info(f"Abonnement supprimé: {subscription_id}")
|
||||
|
||||
async def validate_target_with_notification(self, target: VisualTarget) -> ValidationResult:
|
||||
"""
|
||||
Valide une cible et envoie des notifications si nécessaire.
|
||||
|
||||
Args:
|
||||
target: Cible à valider
|
||||
|
||||
Returns:
|
||||
Résultat de la validation
|
||||
"""
|
||||
try:
|
||||
# Effectuer la validation
|
||||
validation_result = await self.validation_manager.validate_target_now(target)
|
||||
|
||||
# Créer et envoyer les notifications appropriées
|
||||
await self._process_validation_result(target, validation_result)
|
||||
|
||||
return validation_result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la validation avec notification: {e}")
|
||||
self.stats['validation_errors'] += 1
|
||||
|
||||
# Créer une notification d'erreur
|
||||
error_notification = ValidationNotification(
|
||||
target_signature=target.signature,
|
||||
level=NotificationLevel.ERROR,
|
||||
message=f"Erreur de validation: {str(e)}",
|
||||
timestamp=datetime.now()
|
||||
)
|
||||
|
||||
await self._send_notification(error_notification)
|
||||
|
||||
# Retourner un résultat d'erreur
|
||||
return ValidationResult(
|
||||
target_signature=target.signature,
|
||||
status=ValidationStatus.ERROR,
|
||||
confidence=0.0,
|
||||
timestamp=datetime.now(),
|
||||
issues=[f"Erreur de validation: {str(e)}"]
|
||||
)
|
||||
|
||||
async def _process_validation_result(self, target: VisualTarget, result: ValidationResult):
|
||||
"""Traite un résultat de validation et génère les notifications appropriées"""
|
||||
notifications = []
|
||||
|
||||
# Notification basée sur le statut
|
||||
if result.status == ValidationStatus.VALID:
|
||||
if result.confidence < 0.9: # Confiance modérée
|
||||
notifications.append(ValidationNotification(
|
||||
target_signature=target.signature,
|
||||
level=NotificationLevel.INFO,
|
||||
message=f"Élément validé avec confiance modérée: {result.confidence:.2f}",
|
||||
timestamp=datetime.now(),
|
||||
validation_result=result
|
||||
))
|
||||
|
||||
elif result.status == ValidationStatus.WARNING:
|
||||
notifications.append(ValidationNotification(
|
||||
target_signature=target.signature,
|
||||
level=NotificationLevel.WARNING,
|
||||
message=f"Avertissement de validation: confiance {result.confidence:.2f}",
|
||||
timestamp=datetime.now(),
|
||||
validation_result=result,
|
||||
suggested_actions=["Vérifier l'état de l'application", "Mettre à jour la capture"],
|
||||
auto_fixable=True
|
||||
))
|
||||
|
||||
elif result.status == ValidationStatus.ERROR:
|
||||
notifications.append(ValidationNotification(
|
||||
target_signature=target.signature,
|
||||
level=NotificationLevel.ERROR,
|
||||
message="Élément non trouvé ou invalide",
|
||||
timestamp=datetime.now(),
|
||||
validation_result=result,
|
||||
suggested_actions=["Re-sélectionner l'élément", "Vérifier l'application"],
|
||||
auto_fixable=False
|
||||
))
|
||||
|
||||
# Notifications pour les problèmes spécifiques
|
||||
for issue in result.issues:
|
||||
if "position" in issue.lower():
|
||||
notifications.append(ValidationNotification(
|
||||
target_signature=target.signature,
|
||||
level=NotificationLevel.WARNING,
|
||||
message=f"Changement de position détecté: {issue}",
|
||||
timestamp=datetime.now(),
|
||||
validation_result=result,
|
||||
suggested_actions=["Mettre à jour la position de référence"],
|
||||
auto_fixable=True
|
||||
))
|
||||
|
||||
elif "appearance" in issue.lower():
|
||||
notifications.append(ValidationNotification(
|
||||
target_signature=target.signature,
|
||||
level=NotificationLevel.WARNING,
|
||||
message=f"Changement d'apparence détecté: {issue}",
|
||||
timestamp=datetime.now(),
|
||||
validation_result=result,
|
||||
suggested_actions=["Mettre à jour l'embedding de référence"],
|
||||
auto_fixable=True
|
||||
))
|
||||
|
||||
# Envoyer toutes les notifications
|
||||
for notification in notifications:
|
||||
await self._send_notification(notification)
|
||||
|
||||
async def _send_notification(self, notification: ValidationNotification):
|
||||
"""Envoie une notification aux abonnés appropriés"""
|
||||
try:
|
||||
# Ajouter à la queue de traitement
|
||||
await self._notification_queue.put(notification)
|
||||
|
||||
except asyncio.QueueFull:
|
||||
logger.warning("Queue de notifications pleine - notification ignorée")
|
||||
|
||||
async def _notification_processor(self):
|
||||
"""Processeur de notifications en arrière-plan"""
|
||||
while self._service_running:
|
||||
try:
|
||||
# Attendre une notification avec timeout
|
||||
notification = await asyncio.wait_for(
|
||||
self._notification_queue.get(),
|
||||
timeout=1.0
|
||||
)
|
||||
|
||||
# Traiter la notification
|
||||
await self._deliver_notification(notification)
|
||||
|
||||
# Appliquer les corrections automatiques si activées
|
||||
if (self.auto_fix_enabled and
|
||||
notification.auto_fixable and
|
||||
notification.validation_result):
|
||||
|
||||
await self._apply_auto_fix(notification)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur dans le processeur de notifications: {e}")
|
||||
|
||||
async def _deliver_notification(self, notification: ValidationNotification):
|
||||
"""Livre une notification aux abonnés appropriés"""
|
||||
target_signature = notification.target_signature
|
||||
|
||||
with self._subscription_lock:
|
||||
subscriptions = self._subscriptions.get(target_signature, [])
|
||||
|
||||
# Filtrer les abonnements actifs et intéressés par ce niveau
|
||||
active_subscriptions = [
|
||||
sub for sub in subscriptions
|
||||
if sub.active and notification.level in sub.notification_levels
|
||||
]
|
||||
|
||||
# Livrer aux abonnés
|
||||
for subscription in active_subscriptions:
|
||||
try:
|
||||
# Utiliser une référence faible pour éviter les fuites mémoire
|
||||
callback = subscription.callback
|
||||
if callback:
|
||||
# Exécuter le callback dans un thread séparé pour éviter le blocage
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, callback, notification)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la livraison de notification: {e}")
|
||||
# Désactiver l'abonnement défaillant
|
||||
subscription.active = False
|
||||
|
||||
self.stats['notifications_sent'] += 1
|
||||
|
||||
async def _apply_auto_fix(self, notification: ValidationNotification):
|
||||
"""Applique une correction automatique basée sur la notification"""
|
||||
try:
|
||||
if not notification.validation_result:
|
||||
return
|
||||
|
||||
result = notification.validation_result
|
||||
|
||||
# Appliquer les actions de récupération automatiques
|
||||
for action in result.recovery_actions:
|
||||
if action.auto_executable and action.confidence > 0.7:
|
||||
success = await self.validation_manager.execute_recovery_action(
|
||||
notification.target_signature, action
|
||||
)
|
||||
|
||||
if success:
|
||||
self.stats['auto_fixes_applied'] += 1
|
||||
logger.info(f"Correction automatique appliquée: {action.action_type}")
|
||||
|
||||
# Envoyer une notification de succès
|
||||
success_notification = ValidationNotification(
|
||||
target_signature=notification.target_signature,
|
||||
level=NotificationLevel.INFO,
|
||||
message=f"Correction automatique appliquée: {action.description}",
|
||||
timestamp=datetime.now()
|
||||
)
|
||||
|
||||
await self._send_notification(success_notification)
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de l'application de correction automatique: {e}")
|
||||
|
||||
async def _periodic_health_check(self):
|
||||
"""Vérification périodique de la santé du service"""
|
||||
while self._service_running:
|
||||
try:
|
||||
await asyncio.sleep(30) # Vérification toutes les 30 secondes
|
||||
|
||||
# Vérifier la taille de la queue
|
||||
queue_size = self._notification_queue.qsize()
|
||||
if queue_size > self.notification_queue_size * 0.8:
|
||||
logger.warning(f"Queue de notifications presque pleine: {queue_size}")
|
||||
|
||||
# Vérifier les abonnements actifs
|
||||
with self._subscription_lock:
|
||||
total_subscriptions = sum(len(subs) for subs in self._subscriptions.values())
|
||||
active_subscriptions = sum(
|
||||
len([sub for sub in subs if sub.active])
|
||||
for subs in self._subscriptions.values()
|
||||
)
|
||||
|
||||
self.stats['active_subscriptions'] = active_subscriptions
|
||||
|
||||
# Log des statistiques périodiques
|
||||
if total_subscriptions > 0:
|
||||
logger.debug(f"Santé du service: {active_subscriptions}/{total_subscriptions} "
|
||||
f"abonnements actifs, {queue_size} notifications en queue")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la vérification de santé: {e}")
|
||||
|
||||
async def _cleanup_expired_subscriptions(self):
|
||||
"""Nettoie les abonnements expirés"""
|
||||
while self._service_running:
|
||||
try:
|
||||
await asyncio.sleep(300) # Nettoyage toutes les 5 minutes
|
||||
|
||||
cutoff_time = datetime.now() - timedelta(hours=24)
|
||||
|
||||
with self._subscription_lock:
|
||||
for target_signature in list(self._subscriptions.keys()):
|
||||
subscriptions = self._subscriptions[target_signature]
|
||||
|
||||
# Filtrer les abonnements actifs et récents
|
||||
active_subscriptions = [
|
||||
sub for sub in subscriptions
|
||||
if sub.active and sub.created_at > cutoff_time
|
||||
]
|
||||
|
||||
if active_subscriptions:
|
||||
self._subscriptions[target_signature] = active_subscriptions
|
||||
else:
|
||||
del self._subscriptions[target_signature]
|
||||
|
||||
logger.debug("Nettoyage des abonnements expirés terminé")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du nettoyage des abonnements: {e}")
|
||||
|
||||
def get_service_statistics(self) -> Dict[str, Any]:
|
||||
"""Récupère les statistiques du service"""
|
||||
with self._subscription_lock:
|
||||
total_targets = len(self._subscriptions)
|
||||
total_subscriptions = sum(len(subs) for subs in self._subscriptions.values())
|
||||
|
||||
return {
|
||||
'service_running': self._service_running,
|
||||
'total_targets_monitored': total_targets,
|
||||
'total_subscriptions': total_subscriptions,
|
||||
'active_subscriptions': self.stats['active_subscriptions'],
|
||||
'notifications_sent': self.stats['notifications_sent'],
|
||||
'auto_fixes_applied': self.stats['auto_fixes_applied'],
|
||||
'validation_errors': self.stats['validation_errors'],
|
||||
'notification_queue_size': self._notification_queue.qsize(),
|
||||
'auto_fix_enabled': self.auto_fix_enabled
|
||||
}
|
||||
|
||||
def enable_auto_fix(self):
|
||||
"""Active les corrections automatiques"""
|
||||
self.auto_fix_enabled = True
|
||||
logger.info("Corrections automatiques activées")
|
||||
|
||||
def disable_auto_fix(self):
|
||||
"""Désactive les corrections automatiques"""
|
||||
self.auto_fix_enabled = False
|
||||
logger.info("Corrections automatiques désactivées")
|
||||
|
||||
async def __aenter__(self):
|
||||
"""Support du context manager async"""
|
||||
await self.start_service()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Support du context manager async"""
|
||||
await self.stop_service()
|
||||
@@ -1,642 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Gestionnaire d'Intégration RPA pour RPA Vision V3
|
||||
|
||||
Ce gestionnaire connecte le système visuel 100% avec les composants existants:
|
||||
- FusionEngine pour les embeddings
|
||||
- UIDetector pour la détection d'éléments
|
||||
- TargetResolver pour la résolution visuelle pure
|
||||
- ExecutionLoop pour l'exécution basée sur la vision
|
||||
|
||||
Exigences: 1.5, 3.3, 6.1
|
||||
Auteur: Assistant IA
|
||||
Date: 2026-01-07
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Any, Tuple
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
import numpy as np
|
||||
|
||||
from core.visual.visual_target_manager import VisualTarget, VisualTargetManager
|
||||
from core.visual.visual_embedding_manager import VisualEmbeddingManager
|
||||
from core.visual.screenshot_validation_manager import ScreenshotValidationManager
|
||||
from core.visual.visual_performance_optimizer import VisualPerformanceOptimizer
|
||||
|
||||
# Imports des composants RPA Vision V3 existants
|
||||
from core.embedding.fusion_engine import FusionEngine
|
||||
from core.detection.ui_detector import UIDetector
|
||||
from core.execution.target_resolver import TargetResolver
|
||||
from core.execution.execution_loop import ExecutionLoop
|
||||
from core.models import UIElement, ScreenState, BBox
|
||||
from core.capture.screen_capturer import ScreenCapturer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class IntegrationConfig:
|
||||
"""Configuration de l'intégration RPA"""
|
||||
use_visual_only: bool = True # Mode 100% visuel
|
||||
fallback_to_legacy: bool = False # Fallback vers les anciens sélecteurs
|
||||
confidence_threshold: float = 0.8 # Seuil de confiance minimum
|
||||
max_retry_attempts: int = 3 # Tentatives de résolution max
|
||||
enable_self_healing: bool = True # Auto-guérison activée
|
||||
performance_monitoring: bool = True # Monitoring des performances
|
||||
|
||||
@dataclass
|
||||
class ResolutionResult:
|
||||
"""Résultat de résolution d'une cible visuelle"""
|
||||
success: bool
|
||||
target_found: Optional[UIElement] = None
|
||||
confidence: float = 0.0
|
||||
resolution_time_ms: float = 0.0
|
||||
method_used: str = "visual" # 'visual', 'fallback', 'self_healing'
|
||||
attempts_count: int = 1
|
||||
error_message: Optional[str] = None
|
||||
|
||||
class RPAIntegrationManager:
|
||||
"""
|
||||
Gestionnaire d'intégration entre le système visuel 100% et RPA Vision V3.
|
||||
|
||||
Orchestre l'interaction entre les nouveaux composants visuels et
|
||||
l'infrastructure existante pour une transition transparente.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
visual_target_manager: VisualTargetManager,
|
||||
visual_embedding_manager: VisualEmbeddingManager,
|
||||
validation_manager: ScreenshotValidationManager,
|
||||
performance_optimizer: VisualPerformanceOptimizer,
|
||||
fusion_engine: FusionEngine,
|
||||
ui_detector: UIDetector,
|
||||
screen_capturer: ScreenCapturer,
|
||||
config: Optional[IntegrationConfig] = None
|
||||
):
|
||||
"""
|
||||
Initialise le gestionnaire d'intégration.
|
||||
|
||||
Args:
|
||||
visual_target_manager: Gestionnaire des cibles visuelles
|
||||
visual_embedding_manager: Gestionnaire des embeddings visuels
|
||||
validation_manager: Gestionnaire de validation
|
||||
performance_optimizer: Optimiseur de performance
|
||||
fusion_engine: Moteur de fusion existant
|
||||
ui_detector: Détecteur UI existant
|
||||
screen_capturer: Captureur d'écran existant
|
||||
config: Configuration d'intégration
|
||||
"""
|
||||
# Composants visuels nouveaux
|
||||
self.visual_target_manager = visual_target_manager
|
||||
self.visual_embedding_manager = visual_embedding_manager
|
||||
self.validation_manager = validation_manager
|
||||
self.performance_optimizer = performance_optimizer
|
||||
|
||||
# Composants RPA Vision V3 existants
|
||||
self.fusion_engine = fusion_engine
|
||||
self.ui_detector = ui_detector
|
||||
self.screen_capturer = screen_capturer
|
||||
|
||||
# Configuration
|
||||
self.config = config or IntegrationConfig()
|
||||
|
||||
# Adaptateur pour TargetResolver
|
||||
self.visual_target_resolver = None
|
||||
|
||||
# Statistiques d'intégration
|
||||
self.integration_stats = {
|
||||
'visual_resolutions': 0,
|
||||
'fallback_resolutions': 0,
|
||||
'self_healing_activations': 0,
|
||||
'total_resolution_time_ms': 0.0,
|
||||
'average_confidence': 0.0
|
||||
}
|
||||
|
||||
logger.info("Gestionnaire d'intégration RPA initialisé en mode 100% visuel")
|
||||
|
||||
async def initialize_integration(self):
|
||||
"""Initialise l'intégration avec les composants existants"""
|
||||
try:
|
||||
logger.info("🔗 Initialisation de l'intégration RPA Vision V3...")
|
||||
|
||||
# Créer l'adaptateur TargetResolver visuel
|
||||
self.visual_target_resolver = VisualTargetResolver(
|
||||
visual_target_manager=self.visual_target_manager,
|
||||
visual_embedding_manager=self.visual_embedding_manager,
|
||||
fusion_engine=self.fusion_engine,
|
||||
ui_detector=self.ui_detector,
|
||||
config=self.config
|
||||
)
|
||||
|
||||
# Démarrer l'optimiseur de performance
|
||||
await self.performance_optimizer.start_optimizer()
|
||||
|
||||
# Configurer les hooks d'intégration
|
||||
await self._setup_integration_hooks()
|
||||
|
||||
logger.info("✅ Intégration RPA Vision V3 initialisée avec succès")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors de l'initialisation de l'intégration: {e}")
|
||||
raise
|
||||
|
||||
async def resolve_visual_target(
|
||||
self,
|
||||
visual_target: VisualTarget,
|
||||
current_screen_state: Optional[ScreenState] = None
|
||||
) -> ResolutionResult:
|
||||
"""
|
||||
Résout une cible visuelle dans l'écran actuel.
|
||||
|
||||
Args:
|
||||
visual_target: Cible visuelle à résoudre
|
||||
current_screen_state: État d'écran actuel (optionnel)
|
||||
|
||||
Returns:
|
||||
Résultat de la résolution
|
||||
"""
|
||||
start_time = datetime.now()
|
||||
|
||||
try:
|
||||
logger.debug(f"🎯 Résolution de la cible visuelle: {visual_target.signature}")
|
||||
|
||||
# Capturer l'écran actuel si nécessaire
|
||||
if current_screen_state is None:
|
||||
current_screen_state = await self._capture_current_screen_state()
|
||||
|
||||
# Tentative de résolution visuelle pure
|
||||
result = await self._attempt_visual_resolution(visual_target, current_screen_state)
|
||||
|
||||
# Si échec et fallback activé, essayer les méthodes legacy
|
||||
if not result.success and self.config.fallback_to_legacy:
|
||||
logger.warning("Résolution visuelle échouée, tentative de fallback...")
|
||||
result = await self._attempt_fallback_resolution(visual_target, current_screen_state)
|
||||
result.method_used = "fallback"
|
||||
|
||||
# Si échec et auto-guérison activée, essayer la récupération
|
||||
if not result.success and self.config.enable_self_healing:
|
||||
logger.warning("Résolution échouée, tentative d'auto-guérison...")
|
||||
result = await self._attempt_self_healing_resolution(visual_target, current_screen_state)
|
||||
result.method_used = "self_healing"
|
||||
if result.success:
|
||||
self.integration_stats['self_healing_activations'] += 1
|
||||
|
||||
# Calculer le temps de résolution
|
||||
resolution_time = (datetime.now() - start_time).total_seconds() * 1000
|
||||
result.resolution_time_ms = resolution_time
|
||||
|
||||
# Mettre à jour les statistiques
|
||||
await self._update_integration_stats(result)
|
||||
|
||||
if result.success:
|
||||
logger.debug(f"✅ Cible résolue en {resolution_time:.1f}ms (confiance: {result.confidence:.2f})")
|
||||
else:
|
||||
logger.warning(f"❌ Échec de résolution après {resolution_time:.1f}ms")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
resolution_time = (datetime.now() - start_time).total_seconds() * 1000
|
||||
logger.error(f"❌ Erreur lors de la résolution: {e}")
|
||||
|
||||
return ResolutionResult(
|
||||
success=False,
|
||||
resolution_time_ms=resolution_time,
|
||||
error_message=str(e)
|
||||
)
|
||||
|
||||
async def execute_visual_action(
|
||||
self,
|
||||
visual_target: VisualTarget,
|
||||
action_type: str,
|
||||
action_parameters: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""
|
||||
Exécute une action sur une cible visuelle.
|
||||
|
||||
Args:
|
||||
visual_target: Cible visuelle
|
||||
action_type: Type d'action ('click', 'input', 'hover', etc.)
|
||||
action_parameters: Paramètres de l'action
|
||||
|
||||
Returns:
|
||||
True si l'action a réussi
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🎬 Exécution de l'action {action_type} sur {visual_target.signature}")
|
||||
|
||||
# Résoudre la cible dans l'écran actuel
|
||||
resolution_result = await self.resolve_visual_target(visual_target)
|
||||
|
||||
if not resolution_result.success:
|
||||
logger.error(f"Impossible de résoudre la cible pour l'action {action_type}")
|
||||
return False
|
||||
|
||||
# Exécuter l'action via l'ExecutionLoop adapté
|
||||
success = await self._execute_action_on_element(
|
||||
resolution_result.target_found,
|
||||
action_type,
|
||||
action_parameters
|
||||
)
|
||||
|
||||
if success:
|
||||
# Valider l'action si nécessaire
|
||||
if action_type in ['click', 'input']:
|
||||
await self._validate_action_result(visual_target, action_type)
|
||||
|
||||
logger.info(f"✅ Action {action_type} exécutée avec succès")
|
||||
else:
|
||||
logger.error(f"❌ Échec de l'exécution de l'action {action_type}")
|
||||
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors de l'exécution de l'action: {e}")
|
||||
return False
|
||||
|
||||
async def migrate_legacy_workflow(
|
||||
self,
|
||||
legacy_workflow: Dict[str, Any]
|
||||
) -> Dict[str, VisualTarget]:
|
||||
"""
|
||||
Migre un workflow legacy vers le système 100% visuel.
|
||||
|
||||
Args:
|
||||
legacy_workflow: Workflow avec sélecteurs CSS/XPath
|
||||
|
||||
Returns:
|
||||
Mapping node_id -> VisualTarget
|
||||
"""
|
||||
logger.info("🔄 Migration d'un workflow legacy vers le système visuel")
|
||||
|
||||
migrated_targets = {}
|
||||
|
||||
try:
|
||||
# Parcourir les nœuds du workflow
|
||||
for node in legacy_workflow.get('nodes', []):
|
||||
node_id = node.get('id')
|
||||
|
||||
# Vérifier si le nœud a des sélecteurs legacy
|
||||
if self._has_legacy_selectors(node):
|
||||
logger.debug(f"Migration du nœud {node_id}")
|
||||
|
||||
# Convertir en cible visuelle
|
||||
visual_target = await self._convert_legacy_to_visual(node)
|
||||
|
||||
if visual_target:
|
||||
migrated_targets[node_id] = visual_target
|
||||
logger.debug(f"✅ Nœud {node_id} migré avec succès")
|
||||
else:
|
||||
logger.warning(f"⚠️ Échec de migration du nœud {node_id}")
|
||||
|
||||
logger.info(f"✅ Migration terminée - {len(migrated_targets)} nœuds migrés")
|
||||
return migrated_targets
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors de la migration: {e}")
|
||||
return {}
|
||||
|
||||
# Méthodes privées
|
||||
|
||||
async def _capture_current_screen_state(self) -> ScreenState:
|
||||
"""Capture l'état d'écran actuel"""
|
||||
screenshot = await self.screen_capturer.capture_screen()
|
||||
screen_state = await self.ui_detector.detect_elements(screenshot)
|
||||
return screen_state
|
||||
|
||||
async def _attempt_visual_resolution(
|
||||
self,
|
||||
visual_target: VisualTarget,
|
||||
screen_state: ScreenState
|
||||
) -> ResolutionResult:
|
||||
"""Tente une résolution purement visuelle"""
|
||||
try:
|
||||
# Utiliser l'embedding manager pour trouver la correspondance
|
||||
best_match = await self.visual_embedding_manager.find_best_match(
|
||||
visual_target.embedding,
|
||||
screen_state.ui_elements
|
||||
)
|
||||
|
||||
if best_match and best_match.confidence >= self.config.confidence_threshold:
|
||||
self.integration_stats['visual_resolutions'] += 1
|
||||
|
||||
return ResolutionResult(
|
||||
success=True,
|
||||
target_found=best_match.element,
|
||||
confidence=best_match.confidence,
|
||||
method_used="visual"
|
||||
)
|
||||
else:
|
||||
return ResolutionResult(
|
||||
success=False,
|
||||
confidence=best_match.confidence if best_match else 0.0,
|
||||
error_message="Confiance insuffisante ou élément non trouvé"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ResolutionResult(
|
||||
success=False,
|
||||
error_message=f"Erreur de résolution visuelle: {e}"
|
||||
)
|
||||
|
||||
async def _attempt_fallback_resolution(
|
||||
self,
|
||||
visual_target: VisualTarget,
|
||||
screen_state: ScreenState
|
||||
) -> ResolutionResult:
|
||||
"""Tente une résolution avec fallback vers les méthodes legacy"""
|
||||
try:
|
||||
# Utiliser le FusionEngine existant comme fallback
|
||||
fusion_result = await self.fusion_engine.find_element_by_context(
|
||||
screen_state,
|
||||
visual_target.metadata.visual_description
|
||||
)
|
||||
|
||||
if fusion_result:
|
||||
self.integration_stats['fallback_resolutions'] += 1
|
||||
|
||||
return ResolutionResult(
|
||||
success=True,
|
||||
target_found=fusion_result,
|
||||
confidence=0.7, # Confiance réduite pour fallback
|
||||
method_used="fallback"
|
||||
)
|
||||
else:
|
||||
return ResolutionResult(
|
||||
success=False,
|
||||
error_message="Fallback legacy échoué"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ResolutionResult(
|
||||
success=False,
|
||||
error_message=f"Erreur de fallback: {e}"
|
||||
)
|
||||
|
||||
async def _attempt_self_healing_resolution(
|
||||
self,
|
||||
visual_target: VisualTarget,
|
||||
screen_state: ScreenState
|
||||
) -> ResolutionResult:
|
||||
"""Tente une résolution avec auto-guérison"""
|
||||
try:
|
||||
# Utiliser le gestionnaire de validation pour la récupération
|
||||
validation_result = await self.validation_manager.validate_target_now(visual_target)
|
||||
|
||||
if validation_result.recovery_actions:
|
||||
# Essayer les actions de récupération
|
||||
for action in validation_result.recovery_actions:
|
||||
if action.auto_executable and action.confidence > 0.6:
|
||||
success = await self.validation_manager.execute_recovery_action(
|
||||
visual_target.signature, action
|
||||
)
|
||||
|
||||
if success:
|
||||
# Re-tenter la résolution
|
||||
updated_target = await self.visual_target_manager.get_target_by_signature(
|
||||
visual_target.signature
|
||||
)
|
||||
|
||||
if updated_target:
|
||||
return await self._attempt_visual_resolution(updated_target, screen_state)
|
||||
|
||||
return ResolutionResult(
|
||||
success=False,
|
||||
error_message="Auto-guérison échouée"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ResolutionResult(
|
||||
success=False,
|
||||
error_message=f"Erreur d'auto-guérison: {e}"
|
||||
)
|
||||
|
||||
async def _execute_action_on_element(
|
||||
self,
|
||||
element: UIElement,
|
||||
action_type: str,
|
||||
parameters: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Exécute une action sur un élément UI"""
|
||||
try:
|
||||
# Adapter l'exécution selon le type d'action
|
||||
if action_type == "click":
|
||||
return await self._execute_click_action(element, parameters)
|
||||
elif action_type == "input":
|
||||
return await self._execute_input_action(element, parameters)
|
||||
elif action_type == "hover":
|
||||
return await self._execute_hover_action(element, parameters)
|
||||
else:
|
||||
logger.warning(f"Type d'action non supporté: {action_type}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de l'exécution de l'action {action_type}: {e}")
|
||||
return False
|
||||
|
||||
async def _execute_click_action(self, element: UIElement, parameters: Dict[str, Any]) -> bool:
|
||||
"""Exécute un clic sur un élément"""
|
||||
# Calculer la position de clic
|
||||
bbox = element.bounding_box
|
||||
click_x = bbox.x + bbox.width / 2
|
||||
click_y = bbox.y + bbox.height / 2
|
||||
|
||||
# Utiliser pyautogui ou un autre mécanisme de clic
|
||||
import pyautogui
|
||||
pyautogui.click(click_x, click_y)
|
||||
|
||||
# Attendre un délai si spécifié
|
||||
delay = parameters.get('delay', 0.5)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
return True
|
||||
|
||||
async def _execute_input_action(self, element: UIElement, parameters: Dict[str, Any]) -> bool:
|
||||
"""Exécute une saisie de texte"""
|
||||
# Cliquer d'abord sur l'élément
|
||||
await self._execute_click_action(element, {})
|
||||
|
||||
# Saisir le texte
|
||||
text = parameters.get('text', '')
|
||||
if text:
|
||||
import pyautogui
|
||||
pyautogui.write(text)
|
||||
|
||||
return True
|
||||
|
||||
async def _execute_hover_action(self, element: UIElement, parameters: Dict[str, Any]) -> bool:
|
||||
"""Exécute un survol d'élément"""
|
||||
bbox = element.bounding_box
|
||||
hover_x = bbox.x + bbox.width / 2
|
||||
hover_y = bbox.y + bbox.height / 2
|
||||
|
||||
import pyautogui
|
||||
pyautogui.moveTo(hover_x, hover_y)
|
||||
|
||||
return True
|
||||
|
||||
async def _validate_action_result(
|
||||
self,
|
||||
visual_target: VisualTarget,
|
||||
action_type: str
|
||||
):
|
||||
"""Valide le résultat d'une action"""
|
||||
# Attendre que l'action prenne effet
|
||||
await asyncio.sleep(1.0)
|
||||
|
||||
# Re-valider la cible pour détecter les changements
|
||||
validation_result = await self.validation_manager.validate_target_now(visual_target)
|
||||
|
||||
if not validation_result.is_valid:
|
||||
logger.warning(f"Validation post-action échouée pour {action_type}")
|
||||
|
||||
def _has_legacy_selectors(self, node: Dict[str, Any]) -> bool:
|
||||
"""Vérifie si un nœud contient des sélecteurs legacy"""
|
||||
parameters = node.get('parameters', {})
|
||||
|
||||
# Chercher des sélecteurs CSS/XPath
|
||||
legacy_keys = ['css_selector', 'xpath_selector', 'selector', 'target_selector']
|
||||
|
||||
for key in legacy_keys:
|
||||
if key in parameters and parameters[key]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def _convert_legacy_to_visual(self, node: Dict[str, Any]) -> Optional[VisualTarget]:
|
||||
"""Convertit un nœud legacy en cible visuelle"""
|
||||
try:
|
||||
# Extraire les informations du sélecteur legacy
|
||||
parameters = node.get('parameters', {})
|
||||
|
||||
# Tenter de localiser l'élément avec le sélecteur legacy
|
||||
# (Cette partie nécessiterait une implémentation spécifique selon le format legacy)
|
||||
|
||||
# Pour l'instant, créer une cible visuelle simulée
|
||||
# Dans une vraie implémentation, il faudrait:
|
||||
# 1. Utiliser le sélecteur legacy pour trouver l'élément
|
||||
# 2. Capturer une image de l'élément
|
||||
# 3. Générer un embedding visuel
|
||||
# 4. Créer la VisualTarget
|
||||
|
||||
logger.warning("Conversion legacy->visuel non implémentée complètement")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la conversion legacy: {e}")
|
||||
return None
|
||||
|
||||
async def _setup_integration_hooks(self):
|
||||
"""Configure les hooks d'intégration avec les composants existants"""
|
||||
# Hook pour intercepter les résolutions de cibles
|
||||
# Hook pour monitorer les performances
|
||||
# Hook pour la synchronisation des caches
|
||||
pass
|
||||
|
||||
async def _update_integration_stats(self, result: ResolutionResult):
|
||||
"""Met à jour les statistiques d'intégration"""
|
||||
self.integration_stats['total_resolution_time_ms'] += result.resolution_time_ms
|
||||
|
||||
if result.success:
|
||||
if result.method_used == "visual":
|
||||
self.integration_stats['visual_resolutions'] += 1
|
||||
elif result.method_used == "fallback":
|
||||
self.integration_stats['fallback_resolutions'] += 1
|
||||
|
||||
# Mettre à jour la confiance moyenne
|
||||
current_avg = self.integration_stats['average_confidence']
|
||||
total_resolutions = (self.integration_stats['visual_resolutions'] +
|
||||
self.integration_stats['fallback_resolutions'])
|
||||
|
||||
if total_resolutions > 0:
|
||||
self.integration_stats['average_confidence'] = (
|
||||
(current_avg * (total_resolutions - 1) + result.confidence) / total_resolutions
|
||||
)
|
||||
|
||||
def get_integration_statistics(self) -> Dict[str, Any]:
|
||||
"""Récupère les statistiques d'intégration"""
|
||||
total_resolutions = (self.integration_stats['visual_resolutions'] +
|
||||
self.integration_stats['fallback_resolutions'])
|
||||
|
||||
return {
|
||||
'total_resolutions': total_resolutions,
|
||||
'visual_resolutions': self.integration_stats['visual_resolutions'],
|
||||
'fallback_resolutions': self.integration_stats['fallback_resolutions'],
|
||||
'self_healing_activations': self.integration_stats['self_healing_activations'],
|
||||
'visual_success_rate': (
|
||||
self.integration_stats['visual_resolutions'] / max(1, total_resolutions) * 100
|
||||
),
|
||||
'average_resolution_time_ms': (
|
||||
self.integration_stats['total_resolution_time_ms'] / max(1, total_resolutions)
|
||||
),
|
||||
'average_confidence': self.integration_stats['average_confidence'],
|
||||
'config': {
|
||||
'visual_only_mode': self.config.use_visual_only,
|
||||
'fallback_enabled': self.config.fallback_to_legacy,
|
||||
'self_healing_enabled': self.config.enable_self_healing,
|
||||
'confidence_threshold': self.config.confidence_threshold
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class VisualTargetResolver:
|
||||
"""
|
||||
Adaptateur pour intégrer la résolution visuelle avec TargetResolver existant.
|
||||
|
||||
Remplace la logique de résolution basée sur les sélecteurs par une résolution
|
||||
purement visuelle utilisant les embeddings et la reconnaissance d'images.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
visual_target_manager: VisualTargetManager,
|
||||
visual_embedding_manager: VisualEmbeddingManager,
|
||||
fusion_engine: FusionEngine,
|
||||
ui_detector: UIDetector,
|
||||
config: IntegrationConfig
|
||||
):
|
||||
self.visual_target_manager = visual_target_manager
|
||||
self.visual_embedding_manager = visual_embedding_manager
|
||||
self.fusion_engine = fusion_engine
|
||||
self.ui_detector = ui_detector
|
||||
self.config = config
|
||||
|
||||
async def resolve_target(
|
||||
self,
|
||||
target_signature: str,
|
||||
screen_state: ScreenState
|
||||
) -> Optional[UIElement]:
|
||||
"""
|
||||
Résout une cible par sa signature visuelle.
|
||||
|
||||
Args:
|
||||
target_signature: Signature de la cible visuelle
|
||||
screen_state: État d'écran actuel
|
||||
|
||||
Returns:
|
||||
Élément UI trouvé ou None
|
||||
"""
|
||||
try:
|
||||
# Récupérer la cible visuelle
|
||||
visual_target = await self.visual_target_manager.get_target_by_signature(target_signature)
|
||||
|
||||
if not visual_target:
|
||||
logger.error(f"Cible visuelle non trouvée: {target_signature}")
|
||||
return None
|
||||
|
||||
# Utiliser l'embedding manager pour la résolution
|
||||
match_result = await self.visual_embedding_manager.find_best_match(
|
||||
visual_target.embedding,
|
||||
screen_state.ui_elements
|
||||
)
|
||||
|
||||
if match_result and match_result.confidence >= self.config.confidence_threshold:
|
||||
return match_result.element
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la résolution de cible: {e}")
|
||||
return None
|
||||
@@ -1,582 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Optimiseur de Performance Visuelle pour RPA Vision V3
|
||||
|
||||
Ce module optimise les performances du système visuel pour respecter les exigences:
|
||||
- Traitement des captures < 2s
|
||||
- Réactivité mode sélection < 100ms
|
||||
- Cache intelligent pour captures multiples
|
||||
- Traitement non-bloquant des embeddings
|
||||
|
||||
Exigences: 10.1, 10.2, 10.4, 10.5
|
||||
Auteur: Assistant IA
|
||||
Date: 2026-01-07
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from typing import Dict, List, Optional, Any, Callable, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
|
||||
import threading
|
||||
from collections import OrderedDict
|
||||
import hashlib
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
from core.visual.visual_target_manager import VisualTarget
|
||||
from core.models import BBox, ScreenState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class PerformanceMetrics:
|
||||
"""Métriques de performance"""
|
||||
capture_processing_time: float = 0.0 # Temps de traitement des captures (ms)
|
||||
selection_response_time: float = 0.0 # Temps de réponse mode sélection (ms)
|
||||
embedding_processing_time: float = 0.0 # Temps de traitement embeddings (ms)
|
||||
cache_hit_rate: float = 0.0 # Taux de succès du cache (%)
|
||||
memory_usage_mb: float = 0.0 # Usage mémoire (MB)
|
||||
active_background_tasks: int = 0 # Tâches en arrière-plan actives
|
||||
|
||||
@dataclass
|
||||
class CacheEntry:
|
||||
"""Entrée de cache"""
|
||||
key: str
|
||||
data: Any
|
||||
created_at: datetime
|
||||
last_accessed: datetime
|
||||
access_count: int = 0
|
||||
size_bytes: int = 0
|
||||
|
||||
@dataclass
|
||||
class ProcessingTask:
|
||||
"""Tâche de traitement en arrière-plan"""
|
||||
task_id: str
|
||||
task_type: str
|
||||
created_at: datetime
|
||||
callback: Optional[Callable] = None
|
||||
priority: int = 1 # 1=haute, 2=normale, 3=basse
|
||||
|
||||
class VisualPerformanceOptimizer:
|
||||
"""
|
||||
Optimiseur de performance pour le système visuel.
|
||||
|
||||
Gère le cache intelligent, le traitement asynchrone et l'optimisation
|
||||
des performances pour respecter les exigences de temps de réponse.
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers: int = 4, cache_size_mb: int = 100):
|
||||
"""
|
||||
Initialise l'optimiseur de performance.
|
||||
|
||||
Args:
|
||||
max_workers: Nombre maximum de workers pour le traitement parallèle
|
||||
cache_size_mb: Taille maximale du cache en MB
|
||||
"""
|
||||
# Configuration
|
||||
self.max_workers = max_workers
|
||||
self.cache_size_mb = cache_size_mb
|
||||
self.cache_max_entries = 1000
|
||||
|
||||
# Seuils de performance (exigences)
|
||||
self.capture_processing_threshold_ms = 2000 # 2 secondes
|
||||
self.selection_response_threshold_ms = 100 # 100 millisecondes
|
||||
|
||||
# Cache intelligent
|
||||
self._cache: OrderedDict[str, CacheEntry] = OrderedDict()
|
||||
self._cache_lock = threading.RLock()
|
||||
self._cache_size_bytes = 0
|
||||
|
||||
# Pool de workers
|
||||
self._thread_pool = ThreadPoolExecutor(max_workers=max_workers)
|
||||
self._process_pool = ProcessPoolExecutor(max_workers=max_workers // 2)
|
||||
|
||||
# Gestion des tâches en arrière-plan
|
||||
self._background_tasks: Dict[str, ProcessingTask] = {}
|
||||
self._task_queue = asyncio.PriorityQueue()
|
||||
self._task_processor_running = False
|
||||
|
||||
# Métriques de performance
|
||||
self.metrics = PerformanceMetrics()
|
||||
self._metrics_lock = threading.Lock()
|
||||
|
||||
# Optimisations spécifiques
|
||||
self._precomputed_embeddings: Dict[str, np.ndarray] = {}
|
||||
self._screenshot_thumbnails: Dict[str, bytes] = {}
|
||||
|
||||
logger.info(f"Optimiseur de performance initialisé - Workers: {max_workers}, Cache: {cache_size_mb}MB")
|
||||
|
||||
async def start_optimizer(self):
|
||||
"""Démarre l'optimiseur de performance"""
|
||||
if not self._task_processor_running:
|
||||
self._task_processor_running = True
|
||||
asyncio.create_task(self._background_task_processor())
|
||||
logger.info("Optimiseur de performance démarré")
|
||||
|
||||
async def stop_optimizer(self):
|
||||
"""Arrête l'optimiseur de performance"""
|
||||
self._task_processor_running = False
|
||||
|
||||
# Fermer les pools
|
||||
self._thread_pool.shutdown(wait=True)
|
||||
self._process_pool.shutdown(wait=True)
|
||||
|
||||
logger.info("Optimiseur de performance arrêté")
|
||||
|
||||
async def optimize_capture_processing(
|
||||
self,
|
||||
screenshot_data: bytes,
|
||||
processing_func: Callable,
|
||||
cache_key: Optional[str] = None
|
||||
) -> Tuple[Any, float]:
|
||||
"""
|
||||
Optimise le traitement d'une capture d'écran.
|
||||
|
||||
Args:
|
||||
screenshot_data: Données de la capture
|
||||
processing_func: Fonction de traitement
|
||||
cache_key: Clé de cache optionnelle
|
||||
|
||||
Returns:
|
||||
Tuple (résultat, temps_traitement_ms)
|
||||
"""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Générer une clé de cache si non fournie
|
||||
if cache_key is None:
|
||||
cache_key = self._generate_cache_key(screenshot_data)
|
||||
|
||||
# Vérifier le cache
|
||||
cached_result = self._get_from_cache(cache_key)
|
||||
if cached_result is not None:
|
||||
processing_time = (time.perf_counter() - start_time) * 1000
|
||||
logger.debug(f"Cache hit pour capture - {processing_time:.1f}ms")
|
||||
return cached_result, processing_time
|
||||
|
||||
# Traitement optimisé
|
||||
if len(screenshot_data) > 1024 * 1024: # > 1MB
|
||||
# Traitement en processus séparé pour les grandes images
|
||||
result = await self._process_in_background(
|
||||
processing_func, screenshot_data, priority=1
|
||||
)
|
||||
else:
|
||||
# Traitement en thread pour les petites images
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
self._thread_pool, processing_func, screenshot_data
|
||||
)
|
||||
|
||||
# Mettre en cache le résultat
|
||||
self._put_in_cache(cache_key, result, len(screenshot_data))
|
||||
|
||||
processing_time = (time.perf_counter() - start_time) * 1000
|
||||
|
||||
# Vérifier le seuil de performance
|
||||
if processing_time > self.capture_processing_threshold_ms:
|
||||
logger.warning(f"Traitement de capture lent: {processing_time:.1f}ms > {self.capture_processing_threshold_ms}ms")
|
||||
|
||||
# Mettre à jour les métriques
|
||||
with self._metrics_lock:
|
||||
self.metrics.capture_processing_time = processing_time
|
||||
|
||||
return result, processing_time
|
||||
|
||||
except Exception as e:
|
||||
processing_time = (time.perf_counter() - start_time) * 1000
|
||||
logger.error(f"Erreur lors du traitement de capture: {e}")
|
||||
raise
|
||||
|
||||
async def optimize_selection_response(
|
||||
self,
|
||||
mouse_position: Tuple[int, int],
|
||||
screen_elements: List[Any],
|
||||
highlight_func: Callable
|
||||
) -> float:
|
||||
"""
|
||||
Optimise la réactivité du mode sélection.
|
||||
|
||||
Args:
|
||||
mouse_position: Position de la souris
|
||||
screen_elements: Éléments à l'écran
|
||||
highlight_func: Fonction de surbrillance
|
||||
|
||||
Returns:
|
||||
Temps de réponse en millisecondes
|
||||
"""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Pré-filtrer les éléments par proximité
|
||||
nearby_elements = self._filter_nearby_elements(mouse_position, screen_elements)
|
||||
|
||||
# Traitement ultra-rapide en thread
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(
|
||||
self._thread_pool, highlight_func, nearby_elements
|
||||
)
|
||||
|
||||
response_time = (time.perf_counter() - start_time) * 1000
|
||||
|
||||
# Vérifier le seuil de performance
|
||||
if response_time > self.selection_response_threshold_ms:
|
||||
logger.warning(f"Réponse sélection lente: {response_time:.1f}ms > {self.selection_response_threshold_ms}ms")
|
||||
|
||||
# Mettre à jour les métriques
|
||||
with self._metrics_lock:
|
||||
self.metrics.selection_response_time = response_time
|
||||
|
||||
return response_time
|
||||
|
||||
except Exception as e:
|
||||
response_time = (time.perf_counter() - start_time) * 1000
|
||||
logger.error(f"Erreur lors de l'optimisation de sélection: {e}")
|
||||
return response_time
|
||||
|
||||
async def process_embedding_async(
|
||||
self,
|
||||
target: VisualTarget,
|
||||
embedding_func: Callable,
|
||||
callback: Optional[Callable] = None
|
||||
) -> str:
|
||||
"""
|
||||
Traite un embedding de manière asynchrone et non-bloquante.
|
||||
|
||||
Args:
|
||||
target: Cible visuelle
|
||||
embedding_func: Fonction de génération d'embedding
|
||||
callback: Fonction de callback optionnelle
|
||||
|
||||
Returns:
|
||||
ID de la tâche
|
||||
"""
|
||||
task_id = f"embedding_{target.signature}_{int(time.time() * 1000)}"
|
||||
|
||||
# Créer la tâche de traitement
|
||||
task = ProcessingTask(
|
||||
task_id=task_id,
|
||||
task_type="embedding",
|
||||
created_at=datetime.now(),
|
||||
callback=callback,
|
||||
priority=2 # Priorité normale
|
||||
)
|
||||
|
||||
# Ajouter à la queue
|
||||
await self._task_queue.put((task.priority, task_id, task, target, embedding_func))
|
||||
|
||||
self._background_tasks[task_id] = task
|
||||
|
||||
with self._metrics_lock:
|
||||
self.metrics.active_background_tasks = len(self._background_tasks)
|
||||
|
||||
logger.debug(f"Tâche d'embedding créée: {task_id}")
|
||||
return task_id
|
||||
|
||||
def precompute_common_embeddings(self, common_elements: List[VisualTarget]):
|
||||
"""
|
||||
Pré-calcule les embeddings des éléments communs.
|
||||
|
||||
Args:
|
||||
common_elements: Liste des éléments communs à pré-calculer
|
||||
"""
|
||||
logger.info(f"Pré-calcul de {len(common_elements)} embeddings communs")
|
||||
|
||||
for target in common_elements:
|
||||
if target.signature not in self._precomputed_embeddings:
|
||||
# Stocker l'embedding pré-calculé
|
||||
self._precomputed_embeddings[target.signature] = target.embedding.copy()
|
||||
|
||||
# Créer une miniature de la capture
|
||||
thumbnail = self._create_thumbnail(target.screenshot)
|
||||
if thumbnail:
|
||||
self._screenshot_thumbnails[target.signature] = thumbnail
|
||||
|
||||
logger.info(f"Pré-calcul terminé - {len(self._precomputed_embeddings)} embeddings en cache")
|
||||
|
||||
def get_cached_embedding(self, signature: str) -> Optional[np.ndarray]:
|
||||
"""
|
||||
Récupère un embedding pré-calculé.
|
||||
|
||||
Args:
|
||||
signature: Signature de la cible
|
||||
|
||||
Returns:
|
||||
Embedding ou None si non trouvé
|
||||
"""
|
||||
return self._precomputed_embeddings.get(signature)
|
||||
|
||||
def get_thumbnail(self, signature: str) -> Optional[bytes]:
|
||||
"""
|
||||
Récupère une miniature de capture.
|
||||
|
||||
Args:
|
||||
signature: Signature de la cible
|
||||
|
||||
Returns:
|
||||
Données de la miniature ou None
|
||||
"""
|
||||
return self._screenshot_thumbnails.get(signature)
|
||||
|
||||
async def optimize_multiple_captures(
|
||||
self,
|
||||
capture_requests: List[Tuple[str, Callable]],
|
||||
batch_size: int = 5
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Optimise le traitement de multiples captures en lot.
|
||||
|
||||
Args:
|
||||
capture_requests: Liste de (cache_key, processing_func)
|
||||
batch_size: Taille des lots de traitement
|
||||
|
||||
Returns:
|
||||
Dictionnaire des résultats par cache_key
|
||||
"""
|
||||
results = {}
|
||||
|
||||
# Traiter par lots
|
||||
for i in range(0, len(capture_requests), batch_size):
|
||||
batch = capture_requests[i:i + batch_size]
|
||||
|
||||
# Traitement parallèle du lot
|
||||
batch_tasks = []
|
||||
for cache_key, processing_func in batch:
|
||||
task = asyncio.create_task(
|
||||
self._process_capture_with_cache(cache_key, processing_func)
|
||||
)
|
||||
batch_tasks.append((cache_key, task))
|
||||
|
||||
# Attendre les résultats du lot
|
||||
for cache_key, task in batch_tasks:
|
||||
try:
|
||||
result = await task
|
||||
results[cache_key] = result
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du traitement de {cache_key}: {e}")
|
||||
results[cache_key] = None
|
||||
|
||||
logger.info(f"Traitement de {len(capture_requests)} captures terminé")
|
||||
return results
|
||||
|
||||
# Méthodes de cache
|
||||
|
||||
def _get_from_cache(self, key: str) -> Optional[Any]:
|
||||
"""Récupère une valeur du cache"""
|
||||
with self._cache_lock:
|
||||
if key in self._cache:
|
||||
entry = self._cache[key]
|
||||
entry.last_accessed = datetime.now()
|
||||
entry.access_count += 1
|
||||
|
||||
# Déplacer en fin (LRU)
|
||||
self._cache.move_to_end(key)
|
||||
|
||||
# Mettre à jour les métriques
|
||||
self._update_cache_hit_rate(True)
|
||||
|
||||
return entry.data
|
||||
|
||||
self._update_cache_hit_rate(False)
|
||||
return None
|
||||
|
||||
def _put_in_cache(self, key: str, data: Any, size_bytes: int):
|
||||
"""Ajoute une valeur au cache"""
|
||||
with self._cache_lock:
|
||||
# Vérifier la taille
|
||||
max_size_bytes = self.cache_size_mb * 1024 * 1024
|
||||
|
||||
# Nettoyer le cache si nécessaire
|
||||
while (self._cache_size_bytes + size_bytes > max_size_bytes or
|
||||
len(self._cache) >= self.cache_max_entries):
|
||||
if not self._cache:
|
||||
break
|
||||
|
||||
# Supprimer l'entrée la moins récemment utilisée
|
||||
oldest_key, oldest_entry = self._cache.popitem(last=False)
|
||||
self._cache_size_bytes -= oldest_entry.size_bytes
|
||||
|
||||
# Ajouter la nouvelle entrée
|
||||
entry = CacheEntry(
|
||||
key=key,
|
||||
data=data,
|
||||
created_at=datetime.now(),
|
||||
last_accessed=datetime.now(),
|
||||
size_bytes=size_bytes
|
||||
)
|
||||
|
||||
self._cache[key] = entry
|
||||
self._cache_size_bytes += size_bytes
|
||||
|
||||
def _update_cache_hit_rate(self, hit: bool):
|
||||
"""Met à jour le taux de succès du cache"""
|
||||
# Implémentation simplifiée - à améliorer avec un historique glissant
|
||||
with self._metrics_lock:
|
||||
if hit:
|
||||
self.metrics.cache_hit_rate = min(100.0, self.metrics.cache_hit_rate + 0.1)
|
||||
else:
|
||||
self.metrics.cache_hit_rate = max(0.0, self.metrics.cache_hit_rate - 0.1)
|
||||
|
||||
# Méthodes utilitaires
|
||||
|
||||
def _generate_cache_key(self, data: bytes) -> str:
|
||||
"""Génère une clé de cache pour des données"""
|
||||
return hashlib.md5(data).hexdigest()
|
||||
|
||||
def _filter_nearby_elements(
|
||||
self,
|
||||
mouse_position: Tuple[int, int],
|
||||
elements: List[Any],
|
||||
radius: int = 50
|
||||
) -> List[Any]:
|
||||
"""Filtre les éléments proches de la souris"""
|
||||
mx, my = mouse_position
|
||||
nearby = []
|
||||
|
||||
for element in elements:
|
||||
if hasattr(element, 'bounding_box'):
|
||||
bbox = element.bounding_box
|
||||
# Calculer la distance au centre de l'élément
|
||||
cx = bbox.x + bbox.width / 2
|
||||
cy = bbox.y + bbox.height / 2
|
||||
distance = ((mx - cx) ** 2 + (my - cy) ** 2) ** 0.5
|
||||
|
||||
if distance <= radius:
|
||||
nearby.append(element)
|
||||
|
||||
return nearby
|
||||
|
||||
def _create_thumbnail(self, screenshot_b64: str, max_size: int = 64) -> Optional[bytes]:
|
||||
"""Crée une miniature d'une capture d'écran"""
|
||||
try:
|
||||
import base64
|
||||
from PIL import Image
|
||||
import io
|
||||
|
||||
# Décoder l'image
|
||||
image_data = base64.b64decode(screenshot_b64)
|
||||
image = Image.open(io.BytesIO(image_data))
|
||||
|
||||
# Redimensionner
|
||||
image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
|
||||
|
||||
# Encoder en bytes
|
||||
output = io.BytesIO()
|
||||
image.save(output, format='PNG', optimize=True)
|
||||
return output.getvalue()
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Erreur lors de la création de miniature: {e}")
|
||||
return None
|
||||
|
||||
async def _process_in_background(
|
||||
self,
|
||||
func: Callable,
|
||||
data: Any,
|
||||
priority: int = 2
|
||||
) -> Any:
|
||||
"""Traite une fonction en arrière-plan"""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# Utiliser le pool de processus pour les tâches lourdes
|
||||
if priority == 1: # Haute priorité
|
||||
return await loop.run_in_executor(self._process_pool, func, data)
|
||||
else:
|
||||
return await loop.run_in_executor(self._thread_pool, func, data)
|
||||
|
||||
async def _process_capture_with_cache(self, cache_key: str, processing_func: Callable) -> Any:
|
||||
"""Traite une capture avec gestion de cache"""
|
||||
# Vérifier le cache
|
||||
cached_result = self._get_from_cache(cache_key)
|
||||
if cached_result is not None:
|
||||
return cached_result
|
||||
|
||||
# Traiter et mettre en cache
|
||||
result = await self._process_in_background(processing_func, None)
|
||||
self._put_in_cache(cache_key, result, 1024) # Taille estimée
|
||||
|
||||
return result
|
||||
|
||||
async def _background_task_processor(self):
|
||||
"""Processeur de tâches en arrière-plan"""
|
||||
while self._task_processor_running:
|
||||
try:
|
||||
# Attendre une tâche avec timeout
|
||||
priority, task_id, task, *args = await asyncio.wait_for(
|
||||
self._task_queue.get(), timeout=1.0
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
# Traiter la tâche
|
||||
if task.task_type == "embedding":
|
||||
target, embedding_func = args
|
||||
result = await self._process_in_background(embedding_func, target)
|
||||
|
||||
# Appeler le callback si fourni
|
||||
if task.callback:
|
||||
await task.callback(target, result)
|
||||
|
||||
# Nettoyer la tâche
|
||||
if task_id in self._background_tasks:
|
||||
del self._background_tasks[task_id]
|
||||
|
||||
processing_time = (time.perf_counter() - start_time) * 1000
|
||||
|
||||
# Mettre à jour les métriques
|
||||
with self._metrics_lock:
|
||||
self.metrics.embedding_processing_time = processing_time
|
||||
self.metrics.active_background_tasks = len(self._background_tasks)
|
||||
|
||||
logger.debug(f"Tâche {task_id} terminée en {processing_time:.1f}ms")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur dans le processeur de tâches: {e}")
|
||||
|
||||
def get_performance_metrics(self) -> Dict[str, Any]:
|
||||
"""Récupère les métriques de performance"""
|
||||
with self._metrics_lock:
|
||||
return {
|
||||
'capture_processing_time_ms': self.metrics.capture_processing_time,
|
||||
'selection_response_time_ms': self.metrics.selection_response_time,
|
||||
'embedding_processing_time_ms': self.metrics.embedding_processing_time,
|
||||
'cache_hit_rate_percent': self.metrics.cache_hit_rate,
|
||||
'memory_usage_mb': self.metrics.memory_usage_mb,
|
||||
'active_background_tasks': self.metrics.active_background_tasks,
|
||||
'cache_entries': len(self._cache),
|
||||
'cache_size_bytes': self._cache_size_bytes,
|
||||
'precomputed_embeddings': len(self._precomputed_embeddings),
|
||||
'performance_thresholds': {
|
||||
'capture_processing_ms': self.capture_processing_threshold_ms,
|
||||
'selection_response_ms': self.selection_response_threshold_ms
|
||||
}
|
||||
}
|
||||
|
||||
def clear_cache(self):
|
||||
"""Vide le cache"""
|
||||
with self._cache_lock:
|
||||
self._cache.clear()
|
||||
self._cache_size_bytes = 0
|
||||
logger.info("Cache vidé")
|
||||
|
||||
def optimize_memory_usage(self):
|
||||
"""Optimise l'usage mémoire"""
|
||||
# Nettoyer les embeddings anciens
|
||||
cutoff_time = datetime.now() - timedelta(hours=1)
|
||||
|
||||
old_embeddings = [
|
||||
sig for sig, _ in self._precomputed_embeddings.items()
|
||||
# Critère de nettoyage basé sur l'usage
|
||||
]
|
||||
|
||||
for sig in old_embeddings[:len(old_embeddings)//2]: # Nettoyer la moitié
|
||||
if sig in self._precomputed_embeddings:
|
||||
del self._precomputed_embeddings[sig]
|
||||
if sig in self._screenshot_thumbnails:
|
||||
del self._screenshot_thumbnails[sig]
|
||||
|
||||
logger.info(f"Nettoyage mémoire - {len(old_embeddings)//2} embeddings supprimés")
|
||||
@@ -1,661 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Gestionnaire de Persistance Visuelle pour RPA Vision V3
|
||||
|
||||
Ce gestionnaire gère la sauvegarde et la récupération complète des données visuelles,
|
||||
incluant les embeddings, captures d'écran, métadonnées et validation post-chargement.
|
||||
|
||||
Exigences: 9.1, 9.2, 9.3, 9.4, 9.5
|
||||
Auteur: Assistant IA
|
||||
Date: 2026-01-07
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
import base64
|
||||
import gzip
|
||||
import pickle # noqa: S403 - usage legacy restreint au fallback de migration
|
||||
import io
|
||||
from typing import Dict, List, Optional, Any, Tuple
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
|
||||
from core.visual.visual_target_manager import VisualTarget, VisualTargetManager
|
||||
from core.visual.screenshot_validation_manager import ScreenshotValidationManager, ValidationResult
|
||||
from core.security.signed_serializer import (
|
||||
SignatureVerificationError,
|
||||
UnsupportedFormatError,
|
||||
dumps_signed,
|
||||
loads_signed,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class VisualWorkflowData:
|
||||
"""Données visuelles complètes d'un workflow"""
|
||||
workflow_id: str
|
||||
version: str
|
||||
created_at: datetime
|
||||
visual_targets: Dict[str, VisualTarget]
|
||||
target_signatures: Dict[str, str] # node_id -> target_signature
|
||||
validation_history: Dict[str, List[ValidationResult]]
|
||||
metadata: Dict[str, Any]
|
||||
|
||||
@dataclass
|
||||
class PersistenceStats:
|
||||
"""Statistiques de persistance"""
|
||||
total_targets: int
|
||||
total_size_bytes: int
|
||||
compression_ratio: float
|
||||
save_duration_ms: float
|
||||
load_duration_ms: float
|
||||
|
||||
class VisualPersistenceManager:
|
||||
"""
|
||||
Gestionnaire de persistance pour les données visuelles.
|
||||
|
||||
Gère la sauvegarde complète des embeddings, captures d'écran et métadonnées
|
||||
avec compression, validation et récupération intelligente.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
target_manager: VisualTargetManager,
|
||||
validation_manager: ScreenshotValidationManager,
|
||||
storage_path: str = "data/visual_workflows"
|
||||
):
|
||||
"""
|
||||
Initialise le gestionnaire de persistance.
|
||||
|
||||
Args:
|
||||
target_manager: Gestionnaire des cibles visuelles
|
||||
validation_manager: Gestionnaire de validation
|
||||
storage_path: Chemin de stockage des données
|
||||
"""
|
||||
self.target_manager = target_manager
|
||||
self.validation_manager = validation_manager
|
||||
self.storage_path = Path(storage_path)
|
||||
|
||||
# Configuration
|
||||
self.compression_enabled = True
|
||||
self.validation_on_load = True
|
||||
self.backup_enabled = True
|
||||
self.max_backup_versions = 5
|
||||
|
||||
# Statistiques
|
||||
self.stats = PersistenceStats(0, 0, 0.0, 0.0, 0.0)
|
||||
|
||||
# Créer le répertoire de stockage
|
||||
self.storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.info(f"Gestionnaire de persistance visuelle initialisé - Stockage: {self.storage_path}")
|
||||
|
||||
async def save_workflow_visual_data(
|
||||
self,
|
||||
workflow_id: str,
|
||||
node_targets: Dict[str, VisualTarget],
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Sauvegarde les données visuelles complètes d'un workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
node_targets: Mapping node_id -> VisualTarget
|
||||
metadata: Métadonnées additionnelles
|
||||
|
||||
Returns:
|
||||
True si la sauvegarde a réussi
|
||||
"""
|
||||
start_time = datetime.now()
|
||||
|
||||
try:
|
||||
logger.info(f"💾 Sauvegarde des données visuelles pour workflow {workflow_id}")
|
||||
|
||||
# Créer la structure de données
|
||||
workflow_data = VisualWorkflowData(
|
||||
workflow_id=workflow_id,
|
||||
version="1.0",
|
||||
created_at=datetime.now(),
|
||||
visual_targets={},
|
||||
target_signatures={},
|
||||
validation_history={},
|
||||
metadata=metadata or {}
|
||||
)
|
||||
|
||||
# Traiter chaque cible visuelle
|
||||
for node_id, target in node_targets.items():
|
||||
if target:
|
||||
# Stocker la cible avec sa signature
|
||||
workflow_data.visual_targets[target.signature] = target
|
||||
workflow_data.target_signatures[node_id] = target.signature
|
||||
|
||||
# Récupérer l'historique de validation
|
||||
validation_history = await self._get_validation_history(target.signature)
|
||||
if validation_history:
|
||||
workflow_data.validation_history[target.signature] = validation_history
|
||||
|
||||
# Sauvegarder les données
|
||||
success = await self._save_workflow_data(workflow_data)
|
||||
|
||||
if success:
|
||||
# Créer une sauvegarde si activée
|
||||
if self.backup_enabled:
|
||||
await self._create_backup(workflow_id)
|
||||
|
||||
# Mettre à jour les statistiques
|
||||
duration = (datetime.now() - start_time).total_seconds() * 1000
|
||||
self.stats.save_duration_ms = duration
|
||||
self.stats.total_targets = len(workflow_data.visual_targets)
|
||||
|
||||
logger.info(f"✅ Sauvegarde terminée en {duration:.0f}ms - {len(workflow_data.visual_targets)} cibles")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors de la sauvegarde: {e}")
|
||||
return False
|
||||
|
||||
async def load_workflow_visual_data(
|
||||
self,
|
||||
workflow_id: str,
|
||||
validate_on_load: Optional[bool] = None
|
||||
) -> Tuple[Dict[str, VisualTarget], Dict[str, ValidationResult]]:
|
||||
"""
|
||||
Charge les données visuelles d'un workflow avec validation optionnelle.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
validate_on_load: Forcer la validation au chargement
|
||||
|
||||
Returns:
|
||||
Tuple (node_targets, validation_results)
|
||||
"""
|
||||
start_time = datetime.now()
|
||||
|
||||
try:
|
||||
logger.info(f"📂 Chargement des données visuelles pour workflow {workflow_id}")
|
||||
|
||||
# Charger les données
|
||||
workflow_data = await self._load_workflow_data(workflow_id)
|
||||
if not workflow_data:
|
||||
logger.warning(f"Aucune donnée visuelle trouvée pour {workflow_id}")
|
||||
return {}, {}
|
||||
|
||||
# Reconstruire le mapping node_id -> VisualTarget
|
||||
node_targets: Dict[str, VisualTarget] = {}
|
||||
validation_results: Dict[str, ValidationResult] = {}
|
||||
|
||||
for node_id, target_signature in workflow_data.target_signatures.items():
|
||||
if target_signature in workflow_data.visual_targets:
|
||||
target = workflow_data.visual_targets[target_signature]
|
||||
node_targets[node_id] = target
|
||||
|
||||
# Valider la cible si demandé
|
||||
should_validate = validate_on_load if validate_on_load is not None else self.validation_on_load
|
||||
if should_validate:
|
||||
validation_result = await self.validation_manager.validate_target_now(target)
|
||||
validation_results[node_id] = validation_result
|
||||
|
||||
# Mettre à jour la cible si nécessaire
|
||||
if not validation_result.is_valid and validation_result.recovery_actions:
|
||||
updated_target = await self._attempt_target_recovery(target, validation_result)
|
||||
if updated_target:
|
||||
node_targets[node_id] = updated_target
|
||||
|
||||
# Mettre à jour les statistiques
|
||||
duration = (datetime.now() - start_time).total_seconds() * 1000
|
||||
self.stats.load_duration_ms = duration
|
||||
|
||||
logger.info(f"✅ Chargement terminé en {duration:.0f}ms - {len(node_targets)} cibles restaurées")
|
||||
|
||||
return node_targets, validation_results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors du chargement: {e}")
|
||||
return {}, {}
|
||||
|
||||
async def export_workflow_visual_data(
|
||||
self,
|
||||
workflow_id: str,
|
||||
export_path: str,
|
||||
include_validation_history: bool = True
|
||||
) -> bool:
|
||||
"""
|
||||
Exporte les données visuelles d'un workflow vers un fichier.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
export_path: Chemin d'export
|
||||
include_validation_history: Inclure l'historique de validation
|
||||
|
||||
Returns:
|
||||
True si l'export a réussi
|
||||
"""
|
||||
try:
|
||||
logger.info(f"📤 Export des données visuelles vers {export_path}")
|
||||
|
||||
# Charger les données
|
||||
workflow_data = await self._load_workflow_data(workflow_id)
|
||||
if not workflow_data:
|
||||
logger.error(f"Aucune donnée à exporter pour {workflow_id}")
|
||||
return False
|
||||
|
||||
# Préparer les données d'export
|
||||
export_data = {
|
||||
"workflow_id": workflow_data.workflow_id,
|
||||
"version": workflow_data.version,
|
||||
"created_at": workflow_data.created_at.isoformat(),
|
||||
"exported_at": datetime.now().isoformat(),
|
||||
"visual_targets": {},
|
||||
"target_signatures": workflow_data.target_signatures,
|
||||
"metadata": workflow_data.metadata
|
||||
}
|
||||
|
||||
# Sérialiser les cibles visuelles
|
||||
for signature, target in workflow_data.visual_targets.items():
|
||||
export_data["visual_targets"][signature] = await self._serialize_target_for_export(target)
|
||||
|
||||
# Inclure l'historique de validation si demandé
|
||||
if include_validation_history:
|
||||
export_data["validation_history"] = {}
|
||||
for signature, history in workflow_data.validation_history.items():
|
||||
export_data["validation_history"][signature] = [
|
||||
self._serialize_validation_result(result) for result in history
|
||||
]
|
||||
|
||||
# Écrire le fichier d'export
|
||||
export_file = Path(export_path)
|
||||
export_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(export_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(export_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
logger.info(f"✅ Export terminé: {export_file}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors de l'export: {e}")
|
||||
return False
|
||||
|
||||
async def import_workflow_visual_data(
|
||||
self,
|
||||
import_path: str,
|
||||
target_workflow_id: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Importe les données visuelles depuis un fichier.
|
||||
|
||||
Args:
|
||||
import_path: Chemin du fichier d'import
|
||||
target_workflow_id: ID du workflow cible (optionnel)
|
||||
|
||||
Returns:
|
||||
ID du workflow importé ou None si échec
|
||||
"""
|
||||
try:
|
||||
logger.info(f"📥 Import des données visuelles depuis {import_path}")
|
||||
|
||||
# Lire le fichier d'import
|
||||
import_file = Path(import_path)
|
||||
if not import_file.exists():
|
||||
logger.error(f"Fichier d'import non trouvé: {import_path}")
|
||||
return None
|
||||
|
||||
with open(import_file, 'r', encoding='utf-8') as f:
|
||||
import_data = json.load(f)
|
||||
|
||||
# Déterminer l'ID du workflow
|
||||
workflow_id = target_workflow_id or import_data.get("workflow_id")
|
||||
if not workflow_id:
|
||||
logger.error("ID de workflow manquant pour l'import")
|
||||
return None
|
||||
|
||||
# Reconstruire les données du workflow
|
||||
workflow_data = VisualWorkflowData(
|
||||
workflow_id=workflow_id,
|
||||
version=import_data.get("version", "1.0"),
|
||||
created_at=datetime.fromisoformat(import_data.get("created_at", datetime.now().isoformat())),
|
||||
visual_targets={},
|
||||
target_signatures=import_data.get("target_signatures", {}),
|
||||
validation_history={},
|
||||
metadata=import_data.get("metadata", {})
|
||||
)
|
||||
|
||||
# Désérialiser les cibles visuelles
|
||||
for signature, target_data in import_data.get("visual_targets", {}).items():
|
||||
target = await self._deserialize_target_from_import(target_data)
|
||||
if target:
|
||||
workflow_data.visual_targets[signature] = target
|
||||
|
||||
# Désérialiser l'historique de validation
|
||||
for signature, history_data in import_data.get("validation_history", {}).items():
|
||||
workflow_data.validation_history[signature] = [
|
||||
self._deserialize_validation_result(result_data)
|
||||
for result_data in history_data
|
||||
]
|
||||
|
||||
# Sauvegarder les données importées
|
||||
success = await self._save_workflow_data(workflow_data)
|
||||
|
||||
if success:
|
||||
logger.info(f"✅ Import terminé pour workflow {workflow_id}")
|
||||
return workflow_id
|
||||
else:
|
||||
logger.error("Échec de la sauvegarde des données importées")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors de l'import: {e}")
|
||||
return None
|
||||
|
||||
async def cleanup_old_data(self, days_to_keep: int = 30) -> int:
|
||||
"""
|
||||
Nettoie les anciennes données visuelles.
|
||||
|
||||
Args:
|
||||
days_to_keep: Nombre de jours à conserver
|
||||
|
||||
Returns:
|
||||
Nombre de fichiers supprimés
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🧹 Nettoyage des données anciennes (> {days_to_keep} jours)")
|
||||
|
||||
cutoff_date = datetime.now().timestamp() - (days_to_keep * 24 * 3600)
|
||||
deleted_count = 0
|
||||
|
||||
for file_path in self.storage_path.glob("*.vwd"): # Visual Workflow Data
|
||||
if file_path.stat().st_mtime < cutoff_date:
|
||||
file_path.unlink()
|
||||
deleted_count += 1
|
||||
logger.debug(f"Supprimé: {file_path}")
|
||||
|
||||
logger.info(f"✅ Nettoyage terminé - {deleted_count} fichiers supprimés")
|
||||
return deleted_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors du nettoyage: {e}")
|
||||
return 0
|
||||
|
||||
# Méthodes privées
|
||||
|
||||
async def _save_workflow_data(self, workflow_data: VisualWorkflowData) -> bool:
|
||||
"""Sauvegarde les données d'un workflow"""
|
||||
try:
|
||||
file_path = self.storage_path / f"{workflow_data.workflow_id}.vwd"
|
||||
|
||||
# Sérialiser les données
|
||||
serialized_data = await self._serialize_workflow_data(workflow_data)
|
||||
|
||||
# Compresser si activé
|
||||
if self.compression_enabled:
|
||||
compressed_data = gzip.compress(serialized_data)
|
||||
self.stats.compression_ratio = len(serialized_data) / len(compressed_data)
|
||||
data_to_write = compressed_data
|
||||
else:
|
||||
data_to_write = serialized_data
|
||||
self.stats.compression_ratio = 1.0
|
||||
|
||||
# Écrire le fichier
|
||||
with open(file_path, 'wb') as f:
|
||||
f.write(data_to_write)
|
||||
|
||||
self.stats.total_size_bytes = len(data_to_write)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la sauvegarde: {e}")
|
||||
return False
|
||||
|
||||
async def _load_workflow_data(self, workflow_id: str) -> Optional[VisualWorkflowData]:
|
||||
"""Charge les données d'un workflow"""
|
||||
try:
|
||||
file_path = self.storage_path / f"{workflow_id}.vwd"
|
||||
|
||||
if not file_path.exists():
|
||||
return None
|
||||
|
||||
# Lire le fichier
|
||||
with open(file_path, 'rb') as f:
|
||||
data = f.read()
|
||||
|
||||
# Décompresser si nécessaire
|
||||
if self.compression_enabled:
|
||||
try:
|
||||
data = gzip.decompress(data)
|
||||
except gzip.BadGzipFile:
|
||||
# Fichier non compressé
|
||||
pass
|
||||
|
||||
# Désérialiser les données
|
||||
workflow_data = await self._deserialize_workflow_data(data)
|
||||
return workflow_data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du chargement: {e}")
|
||||
return None
|
||||
|
||||
async def _serialize_workflow_data(self, workflow_data: VisualWorkflowData) -> bytes:
|
||||
"""Sérialise les données d'un workflow en JSON signé HMAC."""
|
||||
# Convertir en dictionnaire
|
||||
data_dict = asdict(workflow_data)
|
||||
|
||||
# Traiter les types spéciaux
|
||||
data_dict['created_at'] = workflow_data.created_at.isoformat()
|
||||
|
||||
# Sérialiser les cibles visuelles
|
||||
serialized_targets = {}
|
||||
for signature, target in workflow_data.visual_targets.items():
|
||||
serialized_targets[signature] = await self._serialize_visual_target(target)
|
||||
data_dict['visual_targets'] = serialized_targets
|
||||
|
||||
# Sérialiser l'historique de validation
|
||||
serialized_history = {}
|
||||
for signature, history in workflow_data.validation_history.items():
|
||||
serialized_history[signature] = [
|
||||
self._serialize_validation_result(result) for result in history
|
||||
]
|
||||
data_dict['validation_history'] = serialized_history
|
||||
|
||||
# JSON signé HMAC (cf. core.security.signed_serializer)
|
||||
return dumps_signed(data_dict)
|
||||
|
||||
async def _deserialize_workflow_data(self, data: bytes) -> VisualWorkflowData:
|
||||
"""Désérialise les données d'un workflow (JSON signé HMAC ;
|
||||
fallback pickle legacy avec WARNING pour migrer les anciens fichiers)."""
|
||||
try:
|
||||
data_dict = loads_signed(data)
|
||||
except SignatureVerificationError:
|
||||
# Fichier altéré ou clé différente : on refuse sans fallback.
|
||||
logger.error("Workflow visuel : signature HMAC invalide — refus.")
|
||||
raise
|
||||
except UnsupportedFormatError:
|
||||
# Ancien format pickle : fallback explicite et bruyant.
|
||||
import os
|
||||
if os.getenv("RPA_ALLOW_PICKLE_FALLBACK", "1") == "0":
|
||||
raise
|
||||
logger.warning(
|
||||
"Workflow visuel au format pickle legacy — lecture de compat, "
|
||||
"ré-écrire en JSON signé dès que possible."
|
||||
)
|
||||
data_dict = pickle.loads(data) # noqa: S301 - fallback legacy
|
||||
|
||||
# Reconstruire les objets
|
||||
workflow_data = VisualWorkflowData(
|
||||
workflow_id=data_dict['workflow_id'],
|
||||
version=data_dict['version'],
|
||||
created_at=datetime.fromisoformat(data_dict['created_at']),
|
||||
visual_targets={},
|
||||
target_signatures=data_dict['target_signatures'],
|
||||
validation_history={},
|
||||
metadata=data_dict['metadata']
|
||||
)
|
||||
|
||||
# Désérialiser les cibles visuelles
|
||||
for signature, target_data in data_dict['visual_targets'].items():
|
||||
target = await self._deserialize_visual_target(target_data)
|
||||
workflow_data.visual_targets[signature] = target
|
||||
|
||||
# Désérialiser l'historique de validation
|
||||
for signature, history_data in data_dict['validation_history'].items():
|
||||
workflow_data.validation_history[signature] = [
|
||||
self._deserialize_validation_result(result_data) for result_data in history_data
|
||||
]
|
||||
|
||||
return workflow_data
|
||||
|
||||
async def _serialize_visual_target(self, target: VisualTarget) -> Dict[str, Any]:
|
||||
"""Sérialise une cible visuelle"""
|
||||
return {
|
||||
'embedding': base64.b64encode(target.embedding.tobytes()).decode('utf-8'),
|
||||
'embedding_shape': target.embedding.shape,
|
||||
'embedding_dtype': str(target.embedding.dtype),
|
||||
'screenshot': target.screenshot,
|
||||
'bounding_box': asdict(target.bounding_box),
|
||||
'confidence': target.confidence,
|
||||
'contextual_info': asdict(target.contextual_info),
|
||||
'signature': target.signature,
|
||||
'metadata': asdict(target.metadata),
|
||||
'created_at': target.created_at.isoformat(),
|
||||
'last_validated': target.last_validated.isoformat() if target.last_validated else None,
|
||||
'validation_count': target.validation_count
|
||||
}
|
||||
|
||||
async def _deserialize_visual_target(self, data: Dict[str, Any]) -> VisualTarget:
|
||||
"""Désérialise une cible visuelle"""
|
||||
# Reconstruire l'embedding
|
||||
embedding_bytes = base64.b64decode(data['embedding'])
|
||||
embedding = np.frombuffer(embedding_bytes, dtype=data['embedding_dtype'])
|
||||
embedding = embedding.reshape(data['embedding_shape'])
|
||||
|
||||
# Reconstruire la cible
|
||||
from core.models import BBox, ContextualInfo, VisualMetadata
|
||||
|
||||
return VisualTarget(
|
||||
embedding=embedding,
|
||||
screenshot=data['screenshot'],
|
||||
bounding_box=BBox(**data['bounding_box']),
|
||||
confidence=data['confidence'],
|
||||
contextual_info=ContextualInfo(**data['contextual_info']),
|
||||
signature=data['signature'],
|
||||
metadata=VisualMetadata(**data['metadata']),
|
||||
created_at=datetime.fromisoformat(data['created_at']),
|
||||
last_validated=datetime.fromisoformat(data['last_validated']) if data['last_validated'] else None,
|
||||
validation_count=data['validation_count']
|
||||
)
|
||||
|
||||
def _serialize_validation_result(self, result: ValidationResult) -> Dict[str, Any]:
|
||||
"""Sérialise un résultat de validation"""
|
||||
return asdict(result)
|
||||
|
||||
def _deserialize_validation_result(self, data: Dict[str, Any]) -> ValidationResult:
|
||||
"""Désérialise un résultat de validation"""
|
||||
return ValidationResult(**data)
|
||||
|
||||
async def _serialize_target_for_export(self, target: VisualTarget) -> Dict[str, Any]:
|
||||
"""Sérialise une cible pour l'export JSON"""
|
||||
serialized = await self._serialize_visual_target(target)
|
||||
# Convertir les bytes en base64 pour JSON
|
||||
return serialized
|
||||
|
||||
async def _deserialize_target_from_import(self, data: Dict[str, Any]) -> Optional[VisualTarget]:
|
||||
"""Désérialise une cible depuis l'import JSON"""
|
||||
try:
|
||||
return await self._deserialize_visual_target(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la désérialisation de cible: {e}")
|
||||
return None
|
||||
|
||||
async def _get_validation_history(self, target_signature: str) -> List[ValidationResult]:
|
||||
"""Récupère l'historique de validation d'une cible"""
|
||||
# À implémenter selon le système de validation
|
||||
return []
|
||||
|
||||
async def _attempt_target_recovery(
|
||||
self,
|
||||
target: VisualTarget,
|
||||
validation_result: ValidationResult
|
||||
) -> Optional[VisualTarget]:
|
||||
"""Tente de récupérer une cible invalide"""
|
||||
try:
|
||||
# Utiliser les actions de récupération du résultat de validation
|
||||
for action in validation_result.recovery_actions:
|
||||
if action.auto_executable and action.confidence > 0.7:
|
||||
# Exécuter l'action de récupération
|
||||
success = await self.validation_manager.execute_recovery_action(
|
||||
target.signature, action
|
||||
)
|
||||
if success:
|
||||
# Récupérer la cible mise à jour
|
||||
updated_target = await self.target_manager.get_target_by_signature(target.signature)
|
||||
if updated_target:
|
||||
logger.info(f"Cible récupérée avec succès: {target.signature}")
|
||||
return updated_target
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la récupération de cible: {e}")
|
||||
return None
|
||||
|
||||
async def _create_backup(self, workflow_id: str) -> bool:
|
||||
"""Crée une sauvegarde du workflow"""
|
||||
try:
|
||||
source_file = self.storage_path / f"{workflow_id}.vwd"
|
||||
if not source_file.exists():
|
||||
return False
|
||||
|
||||
# Créer le répertoire de sauvegarde
|
||||
backup_dir = self.storage_path / "backups" / workflow_id
|
||||
backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Nom de fichier avec timestamp
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
backup_file = backup_dir / f"{workflow_id}_{timestamp}.vwd"
|
||||
|
||||
# Copier le fichier
|
||||
import shutil
|
||||
shutil.copy2(source_file, backup_file)
|
||||
|
||||
# Nettoyer les anciennes sauvegardes
|
||||
await self._cleanup_old_backups(backup_dir)
|
||||
|
||||
logger.debug(f"Sauvegarde créée: {backup_file}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la création de sauvegarde: {e}")
|
||||
return False
|
||||
|
||||
async def _cleanup_old_backups(self, backup_dir: Path):
|
||||
"""Nettoie les anciennes sauvegardes"""
|
||||
try:
|
||||
backup_files = sorted(backup_dir.glob("*.vwd"), key=lambda x: x.stat().st_mtime, reverse=True)
|
||||
|
||||
# Supprimer les fichiers excédentaires
|
||||
for file_to_delete in backup_files[self.max_backup_versions:]:
|
||||
file_to_delete.unlink()
|
||||
logger.debug(f"Ancienne sauvegarde supprimée: {file_to_delete}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du nettoyage des sauvegardes: {e}")
|
||||
|
||||
def get_persistence_stats(self) -> Dict[str, Any]:
|
||||
"""Récupère les statistiques de persistance"""
|
||||
return {
|
||||
'total_targets': self.stats.total_targets,
|
||||
'total_size_bytes': self.stats.total_size_bytes,
|
||||
'compression_ratio': self.stats.compression_ratio,
|
||||
'save_duration_ms': self.stats.save_duration_ms,
|
||||
'load_duration_ms': self.stats.load_duration_ms,
|
||||
'compression_enabled': self.compression_enabled,
|
||||
'validation_on_load': self.validation_on_load,
|
||||
'backup_enabled': self.backup_enabled,
|
||||
'storage_path': str(self.storage_path)
|
||||
}
|
||||
@@ -1,657 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Outil de Migration de Workflows pour RPA Vision V3
|
||||
|
||||
Cet outil migre les workflows existants utilisant des sélecteurs CSS/XPath
|
||||
vers le système 100% visuel avec signatures visuelles et embeddings.
|
||||
|
||||
Fonctionnalités:
|
||||
- Conversion automatique avec validation
|
||||
- Interface de migration guidée
|
||||
- Préservation de la fonctionnalité des workflows
|
||||
- Sauvegarde et rollback
|
||||
|
||||
Exigences: 9.3, 9.4
|
||||
Auteur: Assistant IA
|
||||
Date: 2026-01-07
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
from typing import Dict, List, Optional, Any, Tuple
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
|
||||
from core.visual.visual_target_manager import VisualTarget, VisualTargetManager
|
||||
from core.visual.visual_embedding_manager import VisualEmbeddingManager
|
||||
from core.visual.screenshot_validation_manager import ScreenshotValidationManager
|
||||
from core.capture.screen_capturer import ScreenCapturer
|
||||
from core.detection.ui_detector import UIDetector
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class MigrationTask:
|
||||
"""Tâche de migration d'un nœud"""
|
||||
node_id: str
|
||||
node_type: str
|
||||
legacy_selectors: Dict[str, str]
|
||||
migration_status: str = "pending" # pending, in_progress, completed, failed
|
||||
visual_target: Optional[VisualTarget] = None
|
||||
error_message: Optional[str] = None
|
||||
confidence_score: float = 0.0
|
||||
manual_review_required: bool = False
|
||||
|
||||
@dataclass
|
||||
class MigrationReport:
|
||||
"""Rapport de migration d'un workflow"""
|
||||
workflow_id: str
|
||||
workflow_name: str
|
||||
migration_started: datetime
|
||||
migration_completed: Optional[datetime] = None
|
||||
total_nodes: int = 0
|
||||
migrated_nodes: int = 0
|
||||
failed_nodes: int = 0
|
||||
manual_review_nodes: int = 0
|
||||
migration_tasks: List[MigrationTask] = None
|
||||
backup_path: Optional[str] = None
|
||||
success_rate: float = 0.0
|
||||
|
||||
class WorkflowMigrationTool:
|
||||
"""
|
||||
Outil de migration des workflows vers le système 100% visuel.
|
||||
|
||||
Convertit automatiquement les sélecteurs CSS/XPath en cibles visuelles
|
||||
avec validation et interface de migration guidée.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
visual_target_manager: VisualTargetManager,
|
||||
visual_embedding_manager: VisualEmbeddingManager,
|
||||
validation_manager: ScreenshotValidationManager,
|
||||
screen_capturer: ScreenCapturer,
|
||||
ui_detector: UIDetector,
|
||||
migration_storage_path: str = "data/migrations"
|
||||
):
|
||||
"""
|
||||
Initialise l'outil de migration.
|
||||
|
||||
Args:
|
||||
visual_target_manager: Gestionnaire des cibles visuelles
|
||||
visual_embedding_manager: Gestionnaire des embeddings
|
||||
validation_manager: Gestionnaire de validation
|
||||
screen_capturer: Captureur d'écran
|
||||
ui_detector: Détecteur UI
|
||||
migration_storage_path: Chemin de stockage des migrations
|
||||
"""
|
||||
self.visual_target_manager = visual_target_manager
|
||||
self.visual_embedding_manager = visual_embedding_manager
|
||||
self.validation_manager = validation_manager
|
||||
self.screen_capturer = screen_capturer
|
||||
self.ui_detector = ui_detector
|
||||
|
||||
# Configuration
|
||||
self.migration_storage_path = Path(migration_storage_path)
|
||||
self.migration_storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Seuils de migration
|
||||
self.confidence_threshold = 0.8
|
||||
self.manual_review_threshold = 0.6
|
||||
|
||||
# Types de sélecteurs supportés
|
||||
self.supported_selector_types = {
|
||||
'css_selector': self._migrate_css_selector,
|
||||
'xpath_selector': self._migrate_xpath_selector,
|
||||
'id_selector': self._migrate_id_selector,
|
||||
'class_selector': self._migrate_class_selector,
|
||||
'text_selector': self._migrate_text_selector
|
||||
}
|
||||
|
||||
logger.info("Outil de migration de workflows initialisé")
|
||||
|
||||
async def migrate_workflow(
|
||||
self,
|
||||
workflow_data: Dict[str, Any],
|
||||
interactive_mode: bool = True,
|
||||
create_backup: bool = True
|
||||
) -> MigrationReport:
|
||||
"""
|
||||
Migre un workflow complet vers le système visuel.
|
||||
|
||||
Args:
|
||||
workflow_data: Données du workflow à migrer
|
||||
interactive_mode: Mode interactif pour la validation manuelle
|
||||
create_backup: Créer une sauvegarde avant migration
|
||||
|
||||
Returns:
|
||||
Rapport de migration
|
||||
"""
|
||||
workflow_id = workflow_data.get('id', 'unknown')
|
||||
workflow_name = workflow_data.get('name', 'Workflow sans nom')
|
||||
|
||||
logger.info(f"🔄 Début de migration du workflow: {workflow_name} ({workflow_id})")
|
||||
|
||||
# Créer le rapport de migration
|
||||
report = MigrationReport(
|
||||
workflow_id=workflow_id,
|
||||
workflow_name=workflow_name,
|
||||
migration_started=datetime.now(),
|
||||
migration_tasks=[]
|
||||
)
|
||||
|
||||
try:
|
||||
# Créer une sauvegarde si demandé
|
||||
if create_backup:
|
||||
backup_path = await self._create_workflow_backup(workflow_data)
|
||||
report.backup_path = backup_path
|
||||
logger.info(f"💾 Sauvegarde créée: {backup_path}")
|
||||
|
||||
# Analyser les nœuds du workflow
|
||||
nodes = workflow_data.get('nodes', [])
|
||||
report.total_nodes = len(nodes)
|
||||
|
||||
# Identifier les nœuds nécessitant une migration
|
||||
migration_tasks = []
|
||||
for node in nodes:
|
||||
task = await self._analyze_node_for_migration(node)
|
||||
if task:
|
||||
migration_tasks.append(task)
|
||||
report.migration_tasks.append(task)
|
||||
|
||||
logger.info(f"📋 {len(migration_tasks)} nœuds nécessitent une migration")
|
||||
|
||||
# Migrer chaque nœud
|
||||
for task in migration_tasks:
|
||||
logger.info(f"🔧 Migration du nœud {task.node_id} ({task.node_type})")
|
||||
|
||||
task.migration_status = "in_progress"
|
||||
|
||||
try:
|
||||
# Tenter la migration automatique
|
||||
success = await self._migrate_node_task(task, workflow_data)
|
||||
|
||||
if success:
|
||||
task.migration_status = "completed"
|
||||
report.migrated_nodes += 1
|
||||
logger.info(f"✅ Nœud {task.node_id} migré avec succès")
|
||||
else:
|
||||
# Vérifier si une révision manuelle est nécessaire
|
||||
if (task.confidence_score >= self.manual_review_threshold and
|
||||
interactive_mode):
|
||||
|
||||
task.manual_review_required = True
|
||||
task.migration_status = "manual_review"
|
||||
report.manual_review_nodes += 1
|
||||
|
||||
logger.warning(f"⚠️ Nœud {task.node_id} nécessite une révision manuelle")
|
||||
else:
|
||||
task.migration_status = "failed"
|
||||
report.failed_nodes += 1
|
||||
logger.error(f"❌ Échec de migration du nœud {task.node_id}")
|
||||
|
||||
except Exception as e:
|
||||
task.migration_status = "failed"
|
||||
task.error_message = str(e)
|
||||
report.failed_nodes += 1
|
||||
logger.error(f"❌ Erreur lors de la migration du nœud {task.node_id}: {e}")
|
||||
|
||||
# Traiter les révisions manuelles si en mode interactif
|
||||
if interactive_mode and report.manual_review_nodes > 0:
|
||||
await self._handle_manual_reviews(report, workflow_data)
|
||||
|
||||
# Finaliser le rapport
|
||||
report.migration_completed = datetime.now()
|
||||
report.success_rate = (report.migrated_nodes / max(1, report.total_nodes)) * 100
|
||||
|
||||
# Sauvegarder le rapport
|
||||
await self._save_migration_report(report)
|
||||
|
||||
logger.info(f"✅ Migration terminée - Succès: {report.success_rate:.1f}% "
|
||||
f"({report.migrated_nodes}/{report.total_nodes})")
|
||||
|
||||
return report
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur critique lors de la migration: {e}")
|
||||
report.migration_completed = datetime.now()
|
||||
report.success_rate = 0.0
|
||||
return report
|
||||
|
||||
async def validate_migrated_workflow(
|
||||
self,
|
||||
workflow_data: Dict[str, Any],
|
||||
migration_report: MigrationReport
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Valide un workflow migré en testant les cibles visuelles.
|
||||
|
||||
Args:
|
||||
workflow_data: Données du workflow migré
|
||||
migration_report: Rapport de migration
|
||||
|
||||
Returns:
|
||||
Rapport de validation
|
||||
"""
|
||||
logger.info("🔍 Validation du workflow migré")
|
||||
|
||||
validation_report = {
|
||||
'workflow_id': workflow_data.get('id'),
|
||||
'validation_started': datetime.now(),
|
||||
'total_targets': 0,
|
||||
'valid_targets': 0,
|
||||
'invalid_targets': 0,
|
||||
'target_validations': []
|
||||
}
|
||||
|
||||
try:
|
||||
# Capturer l'écran actuel pour la validation
|
||||
current_screen = await self.screen_capturer.capture_screen()
|
||||
screen_state = await self.ui_detector.detect_elements(current_screen)
|
||||
|
||||
# Valider chaque cible migrée
|
||||
for task in migration_report.migration_tasks:
|
||||
if task.migration_status == "completed" and task.visual_target:
|
||||
validation_report['total_targets'] += 1
|
||||
|
||||
# Valider la cible
|
||||
validation_result = await self.validation_manager.validate_target_now(
|
||||
task.visual_target
|
||||
)
|
||||
|
||||
target_validation = {
|
||||
'node_id': task.node_id,
|
||||
'target_signature': task.visual_target.signature,
|
||||
'is_valid': validation_result.is_valid,
|
||||
'confidence': validation_result.confidence,
|
||||
'issues': validation_result.issues
|
||||
}
|
||||
|
||||
validation_report['target_validations'].append(target_validation)
|
||||
|
||||
if validation_result.is_valid:
|
||||
validation_report['valid_targets'] += 1
|
||||
else:
|
||||
validation_report['invalid_targets'] += 1
|
||||
|
||||
validation_report['validation_completed'] = datetime.now()
|
||||
validation_report['success_rate'] = (
|
||||
validation_report['valid_targets'] /
|
||||
max(1, validation_report['total_targets']) * 100
|
||||
)
|
||||
|
||||
logger.info(f"✅ Validation terminée - {validation_report['success_rate']:.1f}% "
|
||||
f"de cibles valides")
|
||||
|
||||
return validation_report
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors de la validation: {e}")
|
||||
validation_report['error'] = str(e)
|
||||
return validation_report
|
||||
|
||||
async def rollback_migration(
|
||||
self,
|
||||
migration_report: MigrationReport
|
||||
) -> bool:
|
||||
"""
|
||||
Annule une migration en restaurant la sauvegarde.
|
||||
|
||||
Args:
|
||||
migration_report: Rapport de migration à annuler
|
||||
|
||||
Returns:
|
||||
True si le rollback a réussi
|
||||
"""
|
||||
try:
|
||||
if not migration_report.backup_path:
|
||||
logger.error("Aucune sauvegarde disponible pour le rollback")
|
||||
return False
|
||||
|
||||
backup_file = Path(migration_report.backup_path)
|
||||
if not backup_file.exists():
|
||||
logger.error(f"Fichier de sauvegarde non trouvé: {backup_file}")
|
||||
return False
|
||||
|
||||
logger.info(f"🔄 Rollback de la migration {migration_report.workflow_id}")
|
||||
|
||||
# Charger la sauvegarde
|
||||
with open(backup_file, 'r', encoding='utf-8') as f:
|
||||
original_workflow = json.load(f)
|
||||
|
||||
# Supprimer les cibles visuelles créées
|
||||
for task in migration_report.migration_tasks:
|
||||
if task.visual_target:
|
||||
await self.visual_target_manager.remove_target(task.visual_target.signature)
|
||||
|
||||
logger.info("✅ Rollback terminé avec succès")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Erreur lors du rollback: {e}")
|
||||
return False
|
||||
|
||||
# Méthodes privées
|
||||
|
||||
async def _analyze_node_for_migration(self, node: Dict[str, Any]) -> Optional[MigrationTask]:
|
||||
"""Analyse un nœud pour déterminer s'il nécessite une migration"""
|
||||
node_id = node.get('id', 'unknown')
|
||||
node_type = node.get('type', 'unknown')
|
||||
parameters = node.get('parameters', {})
|
||||
|
||||
# Chercher des sélecteurs legacy
|
||||
legacy_selectors = {}
|
||||
|
||||
for selector_type in self.supported_selector_types.keys():
|
||||
if selector_type in parameters and parameters[selector_type]:
|
||||
legacy_selectors[selector_type] = parameters[selector_type]
|
||||
|
||||
# Chercher d'autres patterns de sélecteurs
|
||||
legacy_patterns = ['selector', 'target', 'element_selector', 'locator']
|
||||
for pattern in legacy_patterns:
|
||||
if pattern in parameters and parameters[pattern]:
|
||||
# Déterminer le type de sélecteur
|
||||
selector_value = parameters[pattern]
|
||||
if isinstance(selector_value, str):
|
||||
if selector_value.startswith('//') or selector_value.startswith('.//'):
|
||||
legacy_selectors['xpath_selector'] = selector_value
|
||||
elif selector_value.startswith('#') or selector_value.startswith('.'):
|
||||
legacy_selectors['css_selector'] = selector_value
|
||||
else:
|
||||
legacy_selectors['text_selector'] = selector_value
|
||||
|
||||
# Créer une tâche de migration si des sélecteurs legacy sont trouvés
|
||||
if legacy_selectors:
|
||||
return MigrationTask(
|
||||
node_id=node_id,
|
||||
node_type=node_type,
|
||||
legacy_selectors=legacy_selectors
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
async def _migrate_node_task(
|
||||
self,
|
||||
task: MigrationTask,
|
||||
workflow_data: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Migre une tâche de nœud spécifique"""
|
||||
try:
|
||||
# Capturer l'écran actuel
|
||||
screenshot = await self.screen_capturer.capture_screen()
|
||||
screen_state = await self.ui_detector.detect_elements(screenshot)
|
||||
|
||||
# Tenter de localiser l'élément avec les sélecteurs legacy
|
||||
target_element = None
|
||||
best_confidence = 0.0
|
||||
|
||||
for selector_type, selector_value in task.legacy_selectors.items():
|
||||
if selector_type in self.supported_selector_types:
|
||||
migration_func = self.supported_selector_types[selector_type]
|
||||
|
||||
element, confidence = await migration_func(
|
||||
selector_value, screen_state, workflow_data
|
||||
)
|
||||
|
||||
if element and confidence > best_confidence:
|
||||
target_element = element
|
||||
best_confidence = confidence
|
||||
|
||||
task.confidence_score = best_confidence
|
||||
|
||||
# Créer une cible visuelle si un élément a été trouvé
|
||||
if target_element and best_confidence >= self.confidence_threshold:
|
||||
visual_target = await self.visual_target_manager.create_target_from_element(
|
||||
target_element, screenshot
|
||||
)
|
||||
|
||||
if visual_target:
|
||||
task.visual_target = visual_target
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
task.error_message = str(e)
|
||||
return False
|
||||
|
||||
async def _migrate_css_selector(
|
||||
self,
|
||||
css_selector: str,
|
||||
screen_state: Any,
|
||||
workflow_data: Dict[str, Any]
|
||||
) -> Tuple[Optional[Any], float]:
|
||||
"""Migre un sélecteur CSS"""
|
||||
try:
|
||||
# Analyser le sélecteur CSS pour extraire des indices
|
||||
confidence = 0.5 # Confiance de base
|
||||
|
||||
# Logique de migration spécifique aux sélecteurs CSS
|
||||
# Cette implémentation est simplifiée
|
||||
|
||||
# Chercher des éléments correspondants par type ou attributs
|
||||
for element in screen_state.ui_elements:
|
||||
# Heuristiques basées sur le sélecteur CSS
|
||||
if '#' in css_selector: # ID selector
|
||||
confidence += 0.2
|
||||
elif '.' in css_selector: # Class selector
|
||||
confidence += 0.1
|
||||
elif css_selector in ['button', 'input', 'a']: # Tag selector
|
||||
if element.element_type.lower() == css_selector:
|
||||
confidence += 0.3
|
||||
return element, confidence
|
||||
|
||||
return None, confidence
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la migration CSS: {e}")
|
||||
return None, 0.0
|
||||
|
||||
async def _migrate_xpath_selector(
|
||||
self,
|
||||
xpath_selector: str,
|
||||
screen_state: Any,
|
||||
workflow_data: Dict[str, Any]
|
||||
) -> Tuple[Optional[Any], float]:
|
||||
"""Migre un sélecteur XPath"""
|
||||
try:
|
||||
confidence = 0.4 # Confiance de base pour XPath
|
||||
|
||||
# Analyser le XPath pour extraire des informations
|
||||
if 'text()' in xpath_selector:
|
||||
# Sélecteur basé sur le texte
|
||||
text_content = self._extract_text_from_xpath(xpath_selector)
|
||||
if text_content:
|
||||
return await self._find_element_by_text(text_content, screen_state)
|
||||
|
||||
if '@id' in xpath_selector:
|
||||
# Sélecteur basé sur l'ID
|
||||
confidence += 0.2
|
||||
|
||||
if 'button' in xpath_selector or 'input' in xpath_selector:
|
||||
# Sélecteur basé sur le type d'élément
|
||||
element_type = self._extract_element_type_from_xpath(xpath_selector)
|
||||
if element_type:
|
||||
for element in screen_state.ui_elements:
|
||||
if element.element_type.lower() == element_type.lower():
|
||||
confidence += 0.3
|
||||
return element, confidence
|
||||
|
||||
return None, confidence
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la migration XPath: {e}")
|
||||
return None, 0.0
|
||||
|
||||
async def _migrate_id_selector(
|
||||
self,
|
||||
id_selector: str,
|
||||
screen_state: Any,
|
||||
workflow_data: Dict[str, Any]
|
||||
) -> Tuple[Optional[Any], float]:
|
||||
"""Migre un sélecteur ID"""
|
||||
# Les sélecteurs ID ont généralement une bonne confiance
|
||||
confidence = 0.8
|
||||
|
||||
# Chercher un élément avec un ID correspondant
|
||||
# (Implémentation simplifiée)
|
||||
return None, confidence
|
||||
|
||||
async def _migrate_class_selector(
|
||||
self,
|
||||
class_selector: str,
|
||||
screen_state: Any,
|
||||
workflow_data: Dict[str, Any]
|
||||
) -> Tuple[Optional[Any], float]:
|
||||
"""Migre un sélecteur de classe"""
|
||||
confidence = 0.6
|
||||
|
||||
# Logique de migration pour les sélecteurs de classe
|
||||
return None, confidence
|
||||
|
||||
async def _migrate_text_selector(
|
||||
self,
|
||||
text_selector: str,
|
||||
screen_state: Any,
|
||||
workflow_data: Dict[str, Any]
|
||||
) -> Tuple[Optional[Any], float]:
|
||||
"""Migre un sélecteur basé sur le texte"""
|
||||
return await self._find_element_by_text(text_selector, screen_state)
|
||||
|
||||
async def _find_element_by_text(
|
||||
self,
|
||||
text: str,
|
||||
screen_state: Any
|
||||
) -> Tuple[Optional[Any], float]:
|
||||
"""Trouve un élément par son contenu textuel"""
|
||||
try:
|
||||
for element in screen_state.ui_elements:
|
||||
if element.text_content and text.lower() in element.text_content.lower():
|
||||
# Calculer la confiance basée sur la correspondance
|
||||
if element.text_content.lower() == text.lower():
|
||||
confidence = 0.9 # Correspondance exacte
|
||||
else:
|
||||
confidence = 0.7 # Correspondance partielle
|
||||
|
||||
return element, confidence
|
||||
|
||||
return None, 0.0
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la recherche par texte: {e}")
|
||||
return None, 0.0
|
||||
|
||||
def _extract_text_from_xpath(self, xpath: str) -> Optional[str]:
|
||||
"""Extrait le texte d'un sélecteur XPath"""
|
||||
try:
|
||||
# Chercher des patterns comme text()='...' ou contains(text(),'...')
|
||||
import re
|
||||
|
||||
# Pattern pour text()='value'
|
||||
match = re.search(r"text\(\)\s*=\s*['\"]([^'\"]+)['\"]", xpath)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
# Pattern pour contains(text(),'value')
|
||||
match = re.search(r"contains\s*\(\s*text\(\)\s*,\s*['\"]([^'\"]+)['\"]", xpath)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
return None
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _extract_element_type_from_xpath(self, xpath: str) -> Optional[str]:
|
||||
"""Extrait le type d'élément d'un sélecteur XPath"""
|
||||
try:
|
||||
# Chercher des patterns comme //button ou //input
|
||||
import re
|
||||
|
||||
match = re.search(r"//(\w+)", xpath)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
return None
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
async def _create_workflow_backup(self, workflow_data: Dict[str, Any]) -> str:
|
||||
"""Crée une sauvegarde du workflow"""
|
||||
workflow_id = workflow_data.get('id', 'unknown')
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
|
||||
backup_filename = f"workflow_{workflow_id}_{timestamp}_backup.json"
|
||||
backup_path = self.migration_storage_path / "backups" / backup_filename
|
||||
|
||||
backup_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(backup_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(workflow_data, f, indent=2, ensure_ascii=False, default=str)
|
||||
|
||||
return str(backup_path)
|
||||
|
||||
async def _save_migration_report(self, report: MigrationReport):
|
||||
"""Sauvegarde le rapport de migration"""
|
||||
report_filename = f"migration_report_{report.workflow_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
||||
report_path = self.migration_storage_path / "reports" / report_filename
|
||||
|
||||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Convertir le rapport en dictionnaire
|
||||
report_dict = asdict(report)
|
||||
|
||||
# Sérialiser les dates
|
||||
report_dict['migration_started'] = report.migration_started.isoformat()
|
||||
if report.migration_completed:
|
||||
report_dict['migration_completed'] = report.migration_completed.isoformat()
|
||||
|
||||
with open(report_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(report_dict, f, indent=2, ensure_ascii=False, default=str)
|
||||
|
||||
logger.info(f"Rapport de migration sauvegardé: {report_path}")
|
||||
|
||||
async def _handle_manual_reviews(
|
||||
self,
|
||||
report: MigrationReport,
|
||||
workflow_data: Dict[str, Any]
|
||||
):
|
||||
"""Gère les révisions manuelles en mode interactif"""
|
||||
logger.info(f"🔍 {report.manual_review_nodes} nœuds nécessitent une révision manuelle")
|
||||
|
||||
for task in report.migration_tasks:
|
||||
if task.manual_review_required:
|
||||
logger.info(f"📝 Révision manuelle requise pour le nœud {task.node_id}")
|
||||
|
||||
# Dans une vraie implémentation, ceci ouvrirait une interface
|
||||
# pour permettre à l'utilisateur de valider ou corriger la migration
|
||||
|
||||
# Pour l'instant, simuler une validation automatique
|
||||
if task.confidence_score >= 0.7:
|
||||
task.migration_status = "completed"
|
||||
task.manual_review_required = False
|
||||
report.migrated_nodes += 1
|
||||
report.manual_review_nodes -= 1
|
||||
logger.info(f"✅ Révision automatique acceptée pour {task.node_id}")
|
||||
|
||||
def get_migration_statistics(self) -> Dict[str, Any]:
|
||||
"""Récupère les statistiques de migration"""
|
||||
# Compter les fichiers de rapport
|
||||
reports_dir = self.migration_storage_path / "reports"
|
||||
backups_dir = self.migration_storage_path / "backups"
|
||||
|
||||
total_reports = len(list(reports_dir.glob("*.json"))) if reports_dir.exists() else 0
|
||||
total_backups = len(list(backups_dir.glob("*.json"))) if backups_dir.exists() else 0
|
||||
|
||||
return {
|
||||
'total_migrations': total_reports,
|
||||
'total_backups': total_backups,
|
||||
'migration_storage_path': str(self.migration_storage_path),
|
||||
'supported_selector_types': list(self.supported_selector_types.keys()),
|
||||
'confidence_threshold': self.confidence_threshold,
|
||||
'manual_review_threshold': self.manual_review_threshold
|
||||
}
|
||||
Reference in New Issue
Block a user