""" GraphBuilder - Construction Automatique de Workflow Graphs Ce module implémente la construction automatique de graphes de workflows en analysant les sessions enregistrées et en détectant les patterns répétés. Architecture: 1. Création de ScreenStates depuis RawSession 2. Calcul de State Embeddings pour tous les états 3. Détection de patterns via clustering DBSCAN 4. Construction de WorkflowNodes depuis clusters 5. Construction de WorkflowEdges depuis transitions Algorithme de Détection de Patterns: - Utilise DBSCAN (Density-Based Spatial Clustering of Applications with Noise) - Métrique: similarité cosinus entre embeddings - Filtre les clusters avec moins de N répétitions - Calcule un prototype (moyenne) pour chaque cluster Example: >>> builder = GraphBuilder(min_pattern_repetitions=3) >>> workflow = builder.build_from_session(raw_session) >>> print(f"Workflow with {len(workflow.nodes)} nodes") """ import logging import os from typing import List, Dict, Optional, Tuple, Any from collections import defaultdict, Counter from datetime import datetime from pathlib import Path import numpy as np from sklearn.cluster import DBSCAN from core.models.raw_session import RawSession, Event from core.models.screen_state import ( ScreenState, WindowContext, RawLevel, PerceptionLevel, ContextLevel, EmbeddingRef ) from core.models.workflow_graph import ( Workflow, WorkflowNode, WorkflowEdge, ScreenTemplate, Action, TargetSpec, EdgeConstraints, PostConditions, WindowConstraint, TextConstraint, UIConstraint, EmbeddingPrototype, ) from core.embedding.state_embedding_builder import StateEmbeddingBuilder from core.embedding.faiss_manager import FAISSManager from core.training.quality_validator import TrainingQualityValidator, QualityReport logger = logging.getLogger(__name__) class GraphBuilder: """ Constructeur de graphes de workflows depuis sessions brutes. Cette classe analyse une RawSession pour construire automatiquement un Workflow avec ses nodes et edges en détectant les patterns répétés. Attributes: embedding_builder: Builder pour calculer les State Embeddings faiss_manager: Manager FAISS pour indexation (optionnel) min_pattern_repetitions: Nombre minimum de répétitions pour un pattern clustering_eps: Distance maximum entre points pour DBSCAN clustering_min_samples: Nombre minimum d'échantillons par cluster Example: >>> builder = GraphBuilder(min_pattern_repetitions=3) >>> workflow = builder.build_from_session(session, "Login Workflow") """ def __init__( self, embedding_builder: Optional[StateEmbeddingBuilder] = None, faiss_manager: Optional[FAISSManager] = None, quality_validator: Optional[TrainingQualityValidator] = None, min_pattern_repetitions: int = 3, clustering_eps: float = 0.15, clustering_min_samples: int = 2, enable_quality_validation: bool = True, ): """ Initialiser le GraphBuilder. Args: embedding_builder: Builder pour State Embeddings (créé si None) faiss_manager: Manager FAISS pour indexation (optionnel) quality_validator: Validateur de qualité (créé si None) min_pattern_repetitions: Nombre minimum de répétitions pour un pattern clustering_eps: Epsilon pour DBSCAN (distance max entre points) clustering_min_samples: Nombre minimum d'échantillons pour un cluster enable_quality_validation: Activer la validation de qualité """ self.embedding_builder = embedding_builder or StateEmbeddingBuilder() self.faiss_manager = faiss_manager self.quality_validator = quality_validator or TrainingQualityValidator() self.min_pattern_repetitions = min_pattern_repetitions self.clustering_eps = clustering_eps self.clustering_min_samples = clustering_min_samples self.enable_quality_validation = enable_quality_validation self._screen_analyzer = None # ScreenAnalyzer (lazy import) logger.info( f"GraphBuilder initialized: " f"min_repetitions={min_pattern_repetitions}, " f"eps={clustering_eps}, " f"min_samples={clustering_min_samples}, " f"quality_validation={enable_quality_validation}" ) def build_from_session( self, session: RawSession, workflow_name: Optional[str] = None, precomputed_states: Optional[List["ScreenState"]] = None, ) -> Workflow: """ Construire un Workflow complet depuis une RawSession. Processus: 1. Créer ScreenStates depuis screenshots (ou utiliser precomputed_states) 2. Calculer embeddings pour chaque état 3. Détecter patterns via clustering 4. Construire nodes depuis clusters 5. Construire edges depuis transitions Args: session: Session brute à analyser workflow_name: Nom du workflow (généré si None) precomputed_states: ScreenStates déjà analysés (streaming). Si fourni, saute l'étape 1 (pas de re-analyse via ScreenAnalyzer). Returns: Workflow construit avec nodes et edges Raises: ValueError: Si la session est vide ou invalide """ if not precomputed_states and not session.screenshots: raise ValueError("Session has no screenshots and no precomputed states") logger.info( f"Building workflow from session {session.session_id} " f"with {len(precomputed_states or session.screenshots)} " f"{'precomputed states' if precomputed_states else 'screenshots'}" ) # Étape 1: Créer ScreenStates (ou réutiliser ceux pré-calculés) if precomputed_states: screen_states = precomputed_states logger.debug(f"Using {len(screen_states)} precomputed screen states") else: screen_states = self._create_screen_states(session) logger.debug(f"Created {len(screen_states)} screen states") # Étape 2: Calculer embeddings embeddings = self._compute_embeddings(screen_states) logger.debug(f"Computed {len(embeddings)} embeddings") # Étape 3: Détecter patterns clusters = self._detect_patterns(embeddings, screen_states) logger.info(f"Detected {len(clusters)} patterns") # Étape 4: Construire nodes nodes = self._build_nodes(clusters, screen_states, embeddings) logger.info(f"Built {len(nodes)} workflow nodes") # Étape 5: Construire edges edges = self._build_edges(nodes, screen_states, session) logger.info(f"Built {len(edges)} workflow edges") # Créer Workflow from core.models.workflow_graph import WorkflowStats, SafetyRules, LearningConfig workflow = Workflow( workflow_id=workflow_name or f"workflow_{session.session_id}", name=workflow_name or "Unnamed Workflow", description="Auto-generated workflow", version=1, learning_state="OBSERVATION", created_at=datetime.now(), updated_at=datetime.now(), entry_nodes=[nodes[0].node_id] if nodes else [], end_nodes=[], nodes=nodes, edges=edges, safety_rules=SafetyRules(), stats=WorkflowStats(), learning=LearningConfig() ) # Étape 6: Validation de qualité quality_report = None if self.enable_quality_validation and screen_states: quality_report = self._validate_workflow_quality( workflow, screen_states, embeddings, clusters ) # Stocker le rapport dans les métadonnées du workflow workflow.metadata = workflow.metadata or {} workflow.metadata['quality_report'] = quality_report.to_dict() # Ajuster learning_state basé sur la qualité if quality_report.is_production_ready: workflow.learning_state = "AUTO_CANDIDATE" logger.info("Workflow qualité suffisante -> AUTO_CANDIDATE") else: workflow.learning_state = "OBSERVATION" logger.warning( f"Qualité insuffisante ({quality_report.overall_score:.3f}), " f"workflow reste en OBSERVATION" ) logger.info( f"Workflow '{workflow.name}' built successfully: " f"{len(nodes)} nodes, {len(edges)} edges" ) return workflow def _validate_workflow_quality( self, workflow: Workflow, screen_states: List[ScreenState], embeddings: List[np.ndarray], clusters: Dict[int, List[int]] ) -> QualityReport: """ Valider la qualité du workflow construit. Args: workflow: Workflow à valider screen_states: États d'écran utilisés embeddings: Embeddings calculés clusters: Clusters détectés Returns: QualityReport avec métriques et recommandations """ logger.info(f"Validation qualité du workflow {workflow.workflow_id}") # Préparer les données pour le validateur embeddings_array = np.array(embeddings) # Créer labels depuis les clusters labels = np.full(len(embeddings), -1) # -1 = bruit for cluster_id, indices in clusters.items(): for idx in indices: labels[idx] = cluster_id # Valider avec le TrainingQualityValidator report = self.quality_validator.validate_workflow( workflow=workflow, observations=screen_states, embeddings=embeddings_array, labels=labels ) logger.info( f"Validation terminée: score={report.overall_score:.3f}, " f"production_ready={report.is_production_ready}" ) return report def _create_screen_states(self, session: RawSession) -> List[ScreenState]: """ Créer ScreenStates enrichis depuis les screenshots de la session. Pour chaque screenshot: 1. Trouver l'événement associé pour le contexte de fenêtre 2. Créer les 4 niveaux du ScreenState 3. Optionnellement détecter les éléments UI Args: session: Session brute Returns: Liste de ScreenStates enrichis """ screen_states = [] # Créer un mapping screenshot_id -> événement screenshot_to_event = {} for event in session.events: if event.screenshot_id: screenshot_to_event[event.screenshot_id] = event for i, screenshot in enumerate(session.screenshots): # Trouver l'événement associé event = screenshot_to_event.get(screenshot.screenshot_id) # Créer WindowContext depuis l'événement if event and event.window: window = WindowContext( app_name=event.window.app_name, window_title=event.window.title, screen_resolution=session.environment.get("screen", {}).get("primary_resolution", [1920, 1080]), workspace="main" ) else: window = WindowContext( app_name="unknown", window_title="Unknown", screen_resolution=[1920, 1080], workspace="main" ) # Créer RawLevel # Construire chemin absolu : data/training/sessions/{session_id}/{session_id}/{relative_path} screenshot_absolute_path = f"data/training/sessions/{session.session_id}/{session.session_id}/{screenshot.relative_path}" screenshot_path = Path(screenshot_absolute_path) raw = RawLevel( screenshot_path=str(screenshot_path), capture_method="mss", file_size_bytes=screenshot_path.stat().st_size if screenshot_path.exists() else 0 ) # Créer PerceptionLevel — enrichir avec OCR si le screenshot existe detected_text = [] text_method = "none" if screenshot_path.exists(): try: if self._screen_analyzer is None: from core.pipeline.screen_analyzer import ScreenAnalyzer self._screen_analyzer = ScreenAnalyzer(session_id=session.session_id) extracted = self._screen_analyzer._extract_text(str(screenshot_path)) if extracted: detected_text = extracted text_method = self._screen_analyzer._get_ocr_method_name() except Exception as e: logger.debug(f"OCR échoué pour {screenshot_path}: {e}") perception = PerceptionLevel( embedding=EmbeddingRef( provider="openclip_ViT-B-32", vector_id=f"data/embeddings/screens/{session.session_id}_state_{i:04d}.npy", dimensions=512 ), detected_text=detected_text, text_detection_method=text_method, confidence_avg=0.85 if detected_text else 0.0 ) # Créer ContextLevel context = ContextLevel( current_workflow_candidate=None, workflow_step=i, user_id=session.user.get("id", "unknown"), tags=list(session.context.get("tags", [])) if isinstance(session.context.get("tags"), list) else [], business_variables={} ) # Parser timestamp if isinstance(screenshot.captured_at, str): timestamp = datetime.fromisoformat(screenshot.captured_at.replace('Z', '+00:00')) else: timestamp = screenshot.captured_at # Créer ScreenState complet state = ScreenState( screen_state_id=f"{session.session_id}_state_{i:04d}", timestamp=timestamp, session_id=session.session_id, window=window, raw=raw, perception=perception, context=context, metadata={ "screenshot_id": screenshot.screenshot_id, "event_type": event.type if event else None, "event_time": event.t if event else None }, ui_elements=[] # Sera rempli par UIDetector si disponible ) screen_states.append(state) logger.info(f"Created {len(screen_states)} enriched screen states") return screen_states def _compute_embeddings( self, screen_states: List[ScreenState] ) -> List[np.ndarray]: """ Calculer State Embeddings pour tous les états. Utilise StateEmbeddingBuilder pour générer des embeddings multi-modaux (image + texte + UI). Ajoute optionnellement les embeddings à l'index FAISS. Args: screen_states: Liste de ScreenStates Returns: Liste de vecteurs d'embeddings (numpy arrays) """ embeddings = [] for state in screen_states: # Construire embedding state_embedding = self.embedding_builder.build(state) vector = state_embedding.get_vector() embeddings.append(vector) # Ajouter à FAISS si disponible if self.faiss_manager: self.faiss_manager.add_embedding( state.screen_state_id, vector, {"state_id": state.screen_state_id}, ) return embeddings def _detect_patterns( self, embeddings: List[np.ndarray], screen_states: List[ScreenState], ) -> Dict[int, List[int]]: """ Détecter patterns répétés via clustering DBSCAN. Algorithme: 1. Convertir embeddings en matrice numpy 2. Appliquer DBSCAN avec métrique cosinus 3. Grouper états par cluster 4. Filtrer clusters avec assez de répétitions Args: embeddings: Vecteurs d'embeddings screen_states: ScreenStates correspondants Returns: Dictionnaire {cluster_id: [indices des états]} Note: Les états non assignés (bruit) ont label=-1 et sont ignorés """ if len(embeddings) < self.min_pattern_repetitions: logger.warning( f"Not enough states ({len(embeddings)}) for pattern detection " f"(minimum: {self.min_pattern_repetitions})" ) return {} # Convertir en matrice numpy X = np.array(embeddings) # Clustering DBSCAN clustering = DBSCAN( eps=self.clustering_eps, min_samples=self.clustering_min_samples, metric="cosine", ) labels = clustering.fit_predict(X) # Grouper par cluster clusters = defaultdict(list) noise_count = 0 for idx, label in enumerate(labels): if label == -1: noise_count += 1 else: clusters[label].append(idx) # Filtrer clusters avec assez de répétitions filtered_clusters = { cluster_id: indices for cluster_id, indices in clusters.items() if len(indices) >= self.min_pattern_repetitions } logger.info( f"Clustering results: {len(filtered_clusters)} patterns, " f"{noise_count} noise points, " f"{len(clusters) - len(filtered_clusters)} small clusters filtered" ) return filtered_clusters def _build_nodes( self, clusters: Dict[int, List[int]], screen_states: List[ScreenState], embeddings: List[np.ndarray], ) -> List[WorkflowNode]: """ Construire WorkflowNodes depuis les clusters détectés. Pour chaque cluster: 1. Calculer embedding prototype (moyenne normalisée) 2. Extraire contraintes depuis états du cluster 3. Créer ScreenTemplate 4. Créer WorkflowNode Args: clusters: Clusters détectés {cluster_id: [indices]} screen_states: ScreenStates embeddings: Embeddings Returns: Liste de WorkflowNodes """ nodes = [] for cluster_id, indices in clusters.items(): # Calculer embedding prototype (moyenne) cluster_embeddings = [embeddings[i] for i in indices] prototype = np.mean(cluster_embeddings, axis=0) prototype = prototype / np.linalg.norm(prototype) # Normaliser # Extraire contraintes depuis les états du cluster cluster_states = [screen_states[i] for i in indices] template = self._create_screen_template(cluster_states, prototype) # Créer node node = WorkflowNode( node_id=f"node_{cluster_id:03d}", name=f"State Pattern {cluster_id}", description=f"Pattern auto-détecté ({len(indices)} observations)", template=template, metadata={ "observation_count": len(indices), "_prototype_vector": prototype.tolist(), }, ) nodes.append(node) logger.debug( f"Created node {node.node_id} with {len(indices)} observations" ) return nodes def _create_screen_template( self, states: List[ScreenState], prototype_embedding: np.ndarray, ) -> ScreenTemplate: """ Créer un ScreenTemplate depuis un cluster d'états. Extrait les contraintes communes à tous les états du cluster : - window_title_pattern : titre de fenêtre commun - required_text_patterns : textes présents dans la majorité des états - required_ui_elements : rôles/types UI récurrents Args: states: États du cluster prototype_embedding: Embedding prototype Returns: ScreenTemplate avec contraintes extraites """ # --- Extraction du titre de fenêtre commun --- window_title_pattern = self._extract_window_pattern(states) # --- Extraction des textes récurrents --- required_text_patterns = self._extract_common_texts(states) # --- Extraction des éléments UI récurrents --- required_ui_elements = self._extract_common_ui_elements(states) # Construire les sous-objets de contraintes window_constraint = WindowConstraint( title_pattern=window_title_pattern, title_contains=window_title_pattern, ) text_constraint = TextConstraint( required_texts=required_text_patterns, ) ui_roles = [ e.get("role", "") for e in required_ui_elements if e.get("role") ] ui_constraint = UIConstraint( required_roles=ui_roles, ) embedding_proto = EmbeddingPrototype( provider="openclip_ViT-B-32", vector_id="", # Le vecteur est stocké dans node.metadata._prototype_vector min_cosine_similarity=0.85, sample_count=len(states), ) return ScreenTemplate( window=window_constraint, text=text_constraint, ui=ui_constraint, embedding=embedding_proto, ) def _extract_window_pattern(self, states: List[ScreenState]) -> Optional[str]: """Extraire un pattern de titre de fenêtre commun aux états du cluster.""" titles = [s.window.window_title for s in states if s.window.window_title] if not titles: return None # Si tous les titres sont identiques, retourner directement if len(set(titles)) == 1: return titles[0] # Trouver le préfixe commun le plus long prefix = os.path.commonprefix(titles) if len(prefix) >= 5: return prefix.rstrip(" -–—|") # Fallback: le titre le plus fréquent from collections import Counter most_common = Counter(titles).most_common(1)[0][0] return most_common def _extract_common_texts( self, states: List[ScreenState], min_presence_ratio: float = 0.6 ) -> List[str]: """ Extraire les textes présents dans la majorité des états du cluster. Args: states: États du cluster min_presence_ratio: Proportion minimale de présence (0.6 = 60% des états) """ if not states: return [] # Collecter les textes de chaque état text_counts: Dict[str, int] = defaultdict(int) states_with_text = 0 for state in states: if hasattr(state.perception, 'detected_text') and state.perception.detected_text: states_with_text += 1 seen_in_state = set() for text in state.perception.detected_text: normalized = text.strip().lower() if len(normalized) >= 3 and normalized not in seen_in_state: text_counts[normalized] += 1 seen_in_state.add(normalized) if states_with_text == 0: return [] # Garder les textes présents dans au moins min_presence_ratio des états threshold = max(2, int(states_with_text * min_presence_ratio)) common_texts = [ text for text, count in text_counts.items() if count >= threshold ] # Limiter à 10 textes les plus fréquents common_texts.sort(key=lambda t: text_counts[t], reverse=True) return common_texts[:10] def _extract_common_ui_elements( self, states: List[ScreenState], min_presence_ratio: float = 0.5 ) -> List[Dict[str, Any]]: """ Extraire les types/rôles d'éléments UI récurrents dans le cluster. Retourne une liste de contraintes UI au format: [{"type": "button", "role": "validate", "min_count": 1}, ...] """ if not states: return [] # Compter les paires (type, role) dans chaque état role_counts: Dict[str, int] = defaultdict(int) type_counts: Dict[str, int] = defaultdict(int) states_with_ui = 0 for state in states: if state.ui_elements: states_with_ui += 1 seen_roles = set() seen_types = set() for el in state.ui_elements: el_type = getattr(el, 'type', 'unknown') el_role = getattr(el, 'role', 'unknown') if el_role != 'unknown' and el_role not in seen_roles: role_counts[el_role] += 1 seen_roles.add(el_role) if el_type != 'unknown' and el_type not in seen_types: type_counts[el_type] += 1 seen_types.add(el_type) if states_with_ui == 0: return [] threshold = max(2, int(states_with_ui * min_presence_ratio)) constraints = [] # Ajouter les rôles récurrents for role, count in role_counts.items(): if count >= threshold: constraints.append({ "role": role, "min_count": 1, }) # Limiter à 8 contraintes constraints.sort(key=lambda c: role_counts.get(c.get("role", ""), 0), reverse=True) return constraints[:8] def _build_edges( self, nodes: List[WorkflowNode], screen_states: List[ScreenState], session: RawSession, ) -> List[WorkflowEdge]: """ Construire WorkflowEdges depuis les transitions observées. Algorithme: 1. Mapper chaque ScreenState vers son node (via embedding similarity) 2. Identifier les transitions (state_i -> state_j où node change) 3. Extraire l'action depuis l'événement entre les deux états 4. Créer WorkflowEdge avec action et conditions Args: nodes: WorkflowNodes construits screen_states: ScreenStates session: Session brute (pour événements) Returns: Liste de WorkflowEdges """ if not nodes or len(screen_states) < 2: logger.warning("Not enough data to build edges") return [] edges = [] edge_counts = defaultdict(int) # Pour compter les occurrences de chaque transition # Étape 1: Mapper chaque état vers son node state_to_node = self._map_states_to_nodes(screen_states, nodes) # Étape 2: Créer un mapping screenshot_id -> événement screenshot_to_event = {} for event in session.events: if event.screenshot_id: screenshot_to_event[event.screenshot_id] = event # Étape 3: Parcourir les transitions for i in range(len(screen_states) - 1): current_state = screen_states[i] next_state = screen_states[i + 1] current_node_id = state_to_node.get(current_state.screen_state_id) next_node_id = state_to_node.get(next_state.screen_state_id) # Si les deux états sont dans des nodes différents, c'est une transition if current_node_id and next_node_id and current_node_id != next_node_id: # Trouver l'événement qui a causé la transition event = self._find_transition_event( current_state, next_state, session.events ) # Créer l'edge edge_key = f"{current_node_id}_to_{next_node_id}" edge_counts[edge_key] += 1 # Ne créer l'edge qu'une fois, mais compter les occurrences if edge_counts[edge_key] == 1: edge = self._create_edge( current_node_id, next_node_id, event, edge_key ) edges.append(edge) # Mettre à jour les stats des edges avec les comptages for edge in edges: edge_key = f"{edge.from_node}_to_{edge.to_node}" edge.stats.execution_count = edge_counts[edge_key] edge.stats.success_count = edge_counts[edge_key] logger.info(f"Built {len(edges)} edges from {sum(edge_counts.values())} transitions") return edges def _map_states_to_nodes( self, screen_states: List[ScreenState], nodes: List[WorkflowNode] ) -> Dict[str, str]: """ Mapper chaque ScreenState vers le node le plus proche. Utilise la similarité d'embedding pour trouver le meilleur match. """ state_to_node = {} # Récupérer les embeddings des prototypes de nodes node_prototypes = {} for node in nodes: # Priorité : vecteur en mémoire (metadata), sinon chargement depuis disque proto_list = node.metadata.get("_prototype_vector") if proto_list is not None: node_prototypes[node.node_id] = np.array(proto_list, dtype=np.float32) elif node.template and node.template.embedding and node.template.embedding.vector_id: proto_path = Path(node.template.embedding.vector_id) if proto_path.exists(): node_prototypes[node.node_id] = np.load(proto_path) if not node_prototypes: logger.warning("No node prototypes available for mapping") return state_to_node # Pour chaque état, trouver le node le plus proche for state in screen_states: # Calculer embedding de l'état try: state_embedding = self.embedding_builder.build(state) state_vector = state_embedding.get_vector() # Trouver le node avec la meilleure similarité best_node_id = None best_similarity = -1 for node_id, prototype in node_prototypes.items(): similarity = np.dot(state_vector, prototype) if similarity > best_similarity: best_similarity = similarity best_node_id = node_id if best_node_id and best_similarity > 0.7: # Seuil minimum state_to_node[state.screen_state_id] = best_node_id except Exception as e: logger.warning(f"Failed to map state {state.screen_state_id}: {e}") return state_to_node def _find_transition_event( self, current_state: ScreenState, next_state: ScreenState, events: List[Event] ) -> Optional[Event]: """ Trouver l'événement qui a causé la transition entre deux états. Cherche l'événement (clic, frappe) qui s'est produit entre les deux screenshots. """ current_time = current_state.metadata.get("event_time", 0) next_time = next_state.metadata.get("event_time", float('inf')) # Chercher les événements d'action entre les deux timestamps action_events = [] for event in events: if current_time <= event.t < next_time: if event.type in ["mouse_click", "key_press", "text_input"]: action_events.append(event) # Retourner le dernier événement d'action (celui qui a probablement causé la transition) if action_events: return action_events[-1] return None def _create_edge( self, from_node: str, to_node: str, event: Optional[Event], edge_id: str ) -> WorkflowEdge: """ Créer un WorkflowEdge depuis une transition observée. """ # Déterminer le type d'action if event: action_type = event.type action_params = {} if action_type == "mouse_click": action_params = { "button": event.data.get("button", "left"), "position": event.data.get("pos", [0, 0]), "wait_after_ms": 500 } target_role = "unknown_element" # Sera affiné avec détection UI elif action_type == "key_press": action_params = { "keys": event.data.get("keys", []), "wait_after_ms": 200 } target_role = "keyboard_input" elif action_type == "text_input": action_params = { "text": event.data.get("text", ""), "wait_after_ms": 300 } target_role = "text_field" else: action_params = {} target_role = "unknown" else: action_type = "unknown" action_params = {} target_role = "unknown" # Créer l'action action = Action( type=action_type, target=TargetSpec( by_role=target_role, selection_policy="first", fallback_strategy="visual_similarity" ), parameters=action_params ) # Créer les contraintes constraints = EdgeConstraints( pre_conditions={}, required_confidence=0.8, max_wait_time_ms=5000 ) # Créer les post-conditions post_conditions = PostConditions( expected_node=to_node, window_change_expected=False, new_ui_elements_expected=[], timeout_ms=3000 ) # Créer l'edge from core.models.workflow_graph import EdgeStats return WorkflowEdge( edge_id=edge_id, from_node=from_node, to_node=to_node, action=action, constraints=constraints, post_conditions=post_conditions, stats=EdgeStats(), metadata={ "created_from_event": event.type if event else None, "auto_generated": True } ) def main(): """Point d'entrée pour tests manuels.""" logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) builder = GraphBuilder(min_pattern_repetitions=3) logger.info(f"GraphBuilder initialized: {builder}") logger.info("Ready to build workflows from sessions") if __name__ == "__main__": main()