v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
4
visual_workflow_builder/backend/services/__init__.py
Normal file
4
visual_workflow_builder/backend/services/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
"""Package des services backend.
|
||||
|
||||
Auteur : Dom, Alice, Kiro - 08 janvier 2026
|
||||
"""
|
||||
328
visual_workflow_builder/backend/services/anchor_image_service.py
Normal file
328
visual_workflow_builder/backend/services/anchor_image_service.py
Normal file
@@ -0,0 +1,328 @@
|
||||
"""
|
||||
Service de stockage des images d'ancres visuelles côté serveur.
|
||||
|
||||
Auteur : Dom, Alice, Kiro - 21 janvier 2026
|
||||
|
||||
Ce service gère le stockage des images d'ancres sur disque pour éviter
|
||||
les problèmes de mémoire causés par le stockage base64 dans les workflows.
|
||||
|
||||
Structure sur disque :
|
||||
/backend/data/anchor_images/
|
||||
/{anchor_id}/
|
||||
original.png # Image originale (crop de la zone sélectionnée)
|
||||
thumbnail.jpg # 200x150, JPEG q80 (~10-30 Ko)
|
||||
metadata.json # dimensions, timestamp, bounding_box
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import base64
|
||||
import uuid
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, Tuple
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
# Configuration
|
||||
DATA_DIR = Path(__file__).parent.parent / 'data' / 'anchor_images'
|
||||
THUMBNAIL_SIZE = (200, 150)
|
||||
THUMBNAIL_QUALITY = 80
|
||||
|
||||
|
||||
def ensure_data_dir():
|
||||
"""S'assurer que le répertoire de données existe."""
|
||||
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def generate_anchor_id() -> str:
|
||||
"""Générer un ID unique pour une ancre."""
|
||||
return f"anchor_{uuid.uuid4().hex[:12]}_{int(datetime.now().timestamp())}"
|
||||
|
||||
|
||||
def decode_base64_image(image_base64: str) -> Image.Image:
|
||||
"""Décoder une image base64 en objet PIL Image."""
|
||||
# Retirer le préfixe data:image/...;base64, si présent
|
||||
if ',' in image_base64:
|
||||
image_base64 = image_base64.split(',', 1)[1]
|
||||
|
||||
image_data = base64.b64decode(image_base64)
|
||||
return Image.open(BytesIO(image_data))
|
||||
|
||||
|
||||
def crop_image(image: Image.Image, bounding_box: Dict[str, int], margin: int = 10) -> Image.Image:
|
||||
"""
|
||||
Recadrer l'image selon le bounding box avec une marge.
|
||||
|
||||
Args:
|
||||
image: Image PIL source
|
||||
bounding_box: Dictionnaire avec x, y, width, height
|
||||
margin: Marge en pixels autour de la zone
|
||||
|
||||
Returns:
|
||||
Image recadrée
|
||||
"""
|
||||
x = max(0, int(bounding_box['x']) - margin)
|
||||
y = max(0, int(bounding_box['y']) - margin)
|
||||
width = int(bounding_box['width']) + margin * 2
|
||||
height = int(bounding_box['height']) + margin * 2
|
||||
|
||||
# S'assurer de ne pas dépasser les limites de l'image
|
||||
right = min(image.width, x + width)
|
||||
bottom = min(image.height, y + height)
|
||||
|
||||
return image.crop((x, y, right, bottom))
|
||||
|
||||
|
||||
def create_thumbnail(image: Image.Image, size: Tuple[int, int] = THUMBNAIL_SIZE) -> Image.Image:
|
||||
"""
|
||||
Créer une miniature de l'image.
|
||||
|
||||
Args:
|
||||
image: Image PIL source
|
||||
size: Taille cible (largeur, hauteur)
|
||||
|
||||
Returns:
|
||||
Image miniature
|
||||
"""
|
||||
# Utiliser LANCZOS pour un meilleur rendu
|
||||
thumbnail = image.copy()
|
||||
thumbnail.thumbnail(size, Image.Resampling.LANCZOS)
|
||||
return thumbnail
|
||||
|
||||
|
||||
def save_anchor_image(
|
||||
anchor_id: Optional[str],
|
||||
image_base64: str,
|
||||
bounding_box: Dict[str, int],
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Sauvegarder une image d'ancre sur le disque.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre (généré si None)
|
||||
image_base64: Image en base64 (screenshot complet)
|
||||
bounding_box: Zone de sélection {x, y, width, height}
|
||||
metadata: Métadonnées additionnelles
|
||||
|
||||
Returns:
|
||||
Dictionnaire avec anchor_id, URLs et métadonnées
|
||||
"""
|
||||
ensure_data_dir()
|
||||
|
||||
# Générer l'ID si nécessaire
|
||||
if not anchor_id:
|
||||
anchor_id = generate_anchor_id()
|
||||
|
||||
# Créer le répertoire de l'ancre
|
||||
anchor_dir = DATA_DIR / anchor_id
|
||||
anchor_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
# Décoder l'image
|
||||
full_image = decode_base64_image(image_base64)
|
||||
|
||||
# Recadrer selon le bounding box
|
||||
cropped_image = crop_image(full_image, bounding_box)
|
||||
|
||||
# Convertir en RGB si nécessaire (pour JPEG)
|
||||
if cropped_image.mode in ('RGBA', 'P'):
|
||||
cropped_rgb = Image.new('RGB', cropped_image.size, (255, 255, 255))
|
||||
if cropped_image.mode == 'RGBA':
|
||||
cropped_rgb.paste(cropped_image, mask=cropped_image.split()[3])
|
||||
else:
|
||||
cropped_rgb.paste(cropped_image)
|
||||
cropped_image = cropped_rgb
|
||||
|
||||
# Sauvegarder l'image originale (crop)
|
||||
original_path = anchor_dir / 'original.png'
|
||||
cropped_image.save(str(original_path), 'PNG', optimize=True)
|
||||
|
||||
# Créer et sauvegarder la miniature
|
||||
thumbnail = create_thumbnail(cropped_image)
|
||||
thumbnail_path = anchor_dir / 'thumbnail.jpg'
|
||||
thumbnail.save(str(thumbnail_path), 'JPEG', quality=THUMBNAIL_QUALITY, optimize=True)
|
||||
|
||||
# Préparer les métadonnées
|
||||
meta = {
|
||||
'anchor_id': anchor_id,
|
||||
'bounding_box': bounding_box,
|
||||
'original_size': {
|
||||
'width': cropped_image.width,
|
||||
'height': cropped_image.height
|
||||
},
|
||||
'thumbnail_size': {
|
||||
'width': thumbnail.width,
|
||||
'height': thumbnail.height
|
||||
},
|
||||
'created_at': datetime.now().isoformat(),
|
||||
'original_file_size': original_path.stat().st_size,
|
||||
'thumbnail_file_size': thumbnail_path.stat().st_size,
|
||||
}
|
||||
|
||||
# Ajouter les métadonnées supplémentaires
|
||||
if metadata:
|
||||
meta['extra'] = metadata
|
||||
|
||||
# Sauvegarder les métadonnées
|
||||
metadata_path = anchor_dir / 'metadata.json'
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(meta, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Retourner les informations
|
||||
return {
|
||||
'success': True,
|
||||
'anchor_id': anchor_id,
|
||||
'thumbnail_url': f'/api/anchor-images/{anchor_id}/thumbnail',
|
||||
'original_url': f'/api/anchor-images/{anchor_id}/original',
|
||||
'metadata': meta
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
# Nettoyer en cas d'erreur
|
||||
if anchor_dir.exists():
|
||||
shutil.rmtree(anchor_dir)
|
||||
raise ValueError(f"Erreur lors de la sauvegarde de l'image: {str(e)}")
|
||||
|
||||
|
||||
def get_thumbnail_path(anchor_id: str) -> Optional[Path]:
|
||||
"""
|
||||
Obtenir le chemin du fichier miniature.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
Chemin du fichier ou None si inexistant
|
||||
"""
|
||||
path = DATA_DIR / anchor_id / 'thumbnail.jpg'
|
||||
return path if path.exists() else None
|
||||
|
||||
|
||||
def get_original_path(anchor_id: str) -> Optional[Path]:
|
||||
"""
|
||||
Obtenir le chemin du fichier original.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
Chemin du fichier ou None si inexistant
|
||||
"""
|
||||
path = DATA_DIR / anchor_id / 'original.png'
|
||||
return path if path.exists() else None
|
||||
|
||||
|
||||
def get_anchor_metadata(anchor_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Obtenir les métadonnées d'une ancre.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
Métadonnées ou None si inexistant
|
||||
"""
|
||||
path = DATA_DIR / anchor_id / 'metadata.json'
|
||||
if not path.exists():
|
||||
return None
|
||||
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def delete_anchor_image(anchor_id: str) -> bool:
|
||||
"""
|
||||
Supprimer une image d'ancre et ses fichiers associés.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
True si supprimé, False si inexistant
|
||||
"""
|
||||
anchor_dir = DATA_DIR / anchor_id
|
||||
if not anchor_dir.exists():
|
||||
return False
|
||||
|
||||
shutil.rmtree(anchor_dir)
|
||||
return True
|
||||
|
||||
|
||||
def list_anchor_images() -> list:
|
||||
"""
|
||||
Lister toutes les images d'ancres stockées.
|
||||
|
||||
Returns:
|
||||
Liste des métadonnées de toutes les ancres
|
||||
"""
|
||||
ensure_data_dir()
|
||||
|
||||
anchors = []
|
||||
for anchor_dir in DATA_DIR.iterdir():
|
||||
if anchor_dir.is_dir():
|
||||
metadata = get_anchor_metadata(anchor_dir.name)
|
||||
if metadata:
|
||||
anchors.append(metadata)
|
||||
|
||||
# Trier par date de création décroissante
|
||||
anchors.sort(key=lambda x: x.get('created_at', ''), reverse=True)
|
||||
return anchors
|
||||
|
||||
|
||||
def cleanup_old_anchors(max_age_days: int = 30) -> int:
|
||||
"""
|
||||
Nettoyer les anciennes images d'ancres.
|
||||
|
||||
Args:
|
||||
max_age_days: Age maximum en jours
|
||||
|
||||
Returns:
|
||||
Nombre d'ancres supprimées
|
||||
"""
|
||||
ensure_data_dir()
|
||||
|
||||
from datetime import timedelta
|
||||
cutoff = datetime.now() - timedelta(days=max_age_days)
|
||||
deleted = 0
|
||||
|
||||
for anchor_dir in DATA_DIR.iterdir():
|
||||
if anchor_dir.is_dir():
|
||||
metadata = get_anchor_metadata(anchor_dir.name)
|
||||
if metadata:
|
||||
created_at = datetime.fromisoformat(metadata.get('created_at', datetime.now().isoformat()))
|
||||
if created_at < cutoff:
|
||||
shutil.rmtree(anchor_dir)
|
||||
deleted += 1
|
||||
|
||||
return deleted
|
||||
|
||||
|
||||
def get_storage_stats() -> Dict[str, Any]:
|
||||
"""
|
||||
Obtenir des statistiques sur le stockage des ancres.
|
||||
|
||||
Returns:
|
||||
Dictionnaire avec les statistiques
|
||||
"""
|
||||
ensure_data_dir()
|
||||
|
||||
total_size = 0
|
||||
count = 0
|
||||
|
||||
for anchor_dir in DATA_DIR.iterdir():
|
||||
if anchor_dir.is_dir():
|
||||
count += 1
|
||||
for file in anchor_dir.iterdir():
|
||||
if file.is_file():
|
||||
total_size += file.stat().st_size
|
||||
|
||||
return {
|
||||
'anchor_count': count,
|
||||
'total_size_bytes': total_size,
|
||||
'total_size_mb': round(total_size / (1024 * 1024), 2),
|
||||
'data_directory': str(DATA_DIR),
|
||||
}
|
||||
730
visual_workflow_builder/backend/services/converter.py
Normal file
730
visual_workflow_builder/backend/services/converter.py
Normal file
@@ -0,0 +1,730 @@
|
||||
"""
|
||||
Visual to WorkflowGraph Converter - Visual Workflow Builder
|
||||
|
||||
Convertit les workflows visuels en WorkflowGraph exécutables.
|
||||
|
||||
Exigences: 6.1, 18.1
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple, Any
|
||||
from datetime import datetime
|
||||
|
||||
# Ajouter le chemin racine pour importer les modules core
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
|
||||
from core.models.workflow_graph import (
|
||||
Workflow,
|
||||
WorkflowNode,
|
||||
WorkflowEdge,
|
||||
Action,
|
||||
TargetSpec,
|
||||
ScreenTemplate,
|
||||
WindowConstraint,
|
||||
TextConstraint,
|
||||
UIConstraint,
|
||||
EmbeddingPrototype,
|
||||
EdgeConstraints,
|
||||
PostConditions,
|
||||
EdgeStats,
|
||||
SafetyRules,
|
||||
WorkflowStats,
|
||||
LearningConfig
|
||||
)
|
||||
|
||||
from models.visual_workflow import (
|
||||
VisualWorkflow,
|
||||
VisualNode,
|
||||
VisualEdge
|
||||
)
|
||||
|
||||
try:
|
||||
from .self_healing_converter import get_self_healing_converter
|
||||
SELF_HEALING_AVAILABLE = True
|
||||
except ImportError:
|
||||
SELF_HEALING_AVAILABLE = False
|
||||
def get_self_healing_converter():
|
||||
return None
|
||||
|
||||
|
||||
class ConversionError(Exception):
|
||||
"""Erreur lors de la conversion"""
|
||||
pass
|
||||
|
||||
|
||||
class VisualToGraphConverter:
|
||||
"""
|
||||
Convertisseur de workflows visuels en WorkflowGraph.
|
||||
|
||||
Exigences: 6.1, 18.1
|
||||
"""
|
||||
|
||||
# Mapping des types de nodes visuels vers les types d'actions
|
||||
NODE_TYPE_TO_ACTION = {
|
||||
'click': 'mouse_click',
|
||||
'type': 'text_input',
|
||||
'wait': 'wait',
|
||||
'navigate': 'navigate',
|
||||
'extract': 'extract_data',
|
||||
'variable': 'set_variable',
|
||||
'condition': 'evaluate_condition',
|
||||
'loop': 'execute_loop',
|
||||
'validate': 'key_press',
|
||||
'scroll': 'scroll',
|
||||
'screenshot': 'screenshot',
|
||||
'transform': 'transform_data',
|
||||
'api': 'api_call',
|
||||
'database': 'database_query',
|
||||
'start': 'workflow_start',
|
||||
'end': 'workflow_end'
|
||||
}
|
||||
|
||||
# Types de nodes de logique
|
||||
LOGIC_NODE_TYPES = {'condition', 'loop'}
|
||||
|
||||
def __init__(self):
|
||||
"""Initialise le convertisseur"""
|
||||
self.errors: List[str] = []
|
||||
self.warnings: List[str] = []
|
||||
|
||||
def convert(self, visual_workflow: VisualWorkflow) -> Workflow:
|
||||
"""
|
||||
Convertit un VisualWorkflow en Workflow exécutable.
|
||||
|
||||
Args:
|
||||
visual_workflow: Le workflow visuel à convertir
|
||||
|
||||
Returns:
|
||||
Workflow exécutable
|
||||
|
||||
Raises:
|
||||
ConversionError: Si la conversion échoue
|
||||
"""
|
||||
self.errors = []
|
||||
self.warnings = []
|
||||
|
||||
# Valider la structure avant conversion
|
||||
validation_errors = visual_workflow.validate()
|
||||
if validation_errors:
|
||||
raise ConversionError(f"Workflow invalide: {', '.join(validation_errors)}")
|
||||
|
||||
# Vérifier qu'il y a au moins un node
|
||||
if not visual_workflow.nodes:
|
||||
raise ConversionError("Le workflow ne contient aucun node")
|
||||
|
||||
# Convertir les nodes
|
||||
workflow_nodes = self._convert_nodes(visual_workflow)
|
||||
|
||||
# Convertir les edges
|
||||
workflow_edges = self._convert_edges(visual_workflow, workflow_nodes)
|
||||
|
||||
# Déterminer les nodes d'entrée et de sortie
|
||||
entry_nodes, end_nodes = self._determine_entry_exit_nodes(
|
||||
visual_workflow, workflow_nodes, workflow_edges
|
||||
)
|
||||
|
||||
# Détecter et configurer les boucles et conditions
|
||||
loops, conditionals = self._detect_logic_structures(
|
||||
visual_workflow, workflow_nodes, workflow_edges
|
||||
)
|
||||
|
||||
# Créer le workflow
|
||||
workflow = Workflow(
|
||||
workflow_id=visual_workflow.id,
|
||||
name=visual_workflow.name,
|
||||
description=visual_workflow.description or "",
|
||||
version=int(visual_workflow.version.split('.')[0]), # "1.0.0" -> 1
|
||||
learning_state="OBSERVATION",
|
||||
created_at=visual_workflow.created_at,
|
||||
updated_at=visual_workflow.updated_at,
|
||||
entry_nodes=entry_nodes,
|
||||
end_nodes=end_nodes,
|
||||
nodes=workflow_nodes,
|
||||
edges=workflow_edges,
|
||||
safety_rules=self._create_safety_rules(visual_workflow),
|
||||
stats=WorkflowStats(),
|
||||
learning=LearningConfig(),
|
||||
loops=loops,
|
||||
conditionals=conditionals,
|
||||
metadata={
|
||||
'created_by': visual_workflow.created_by,
|
||||
'tags': visual_workflow.tags,
|
||||
'category': visual_workflow.category,
|
||||
'is_template': visual_workflow.is_template,
|
||||
'source': 'visual_workflow_builder'
|
||||
}
|
||||
)
|
||||
|
||||
# Intégrer les paramètres Self-Healing au niveau workflow
|
||||
if SELF_HEALING_AVAILABLE:
|
||||
self_healing_converter = get_self_healing_converter()
|
||||
if self_healing_converter:
|
||||
workflow = self_healing_converter.convert_workflow_settings(visual_workflow, workflow)
|
||||
|
||||
return workflow
|
||||
|
||||
def _detect_logic_structures(
|
||||
self,
|
||||
visual_workflow: VisualWorkflow,
|
||||
workflow_nodes: List[WorkflowNode],
|
||||
workflow_edges: List[WorkflowEdge]
|
||||
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
||||
"""
|
||||
Détecte et configure les structures de logique (boucles et conditions).
|
||||
|
||||
Exigences: 8.1, 8.2, 9.1, 9.2
|
||||
"""
|
||||
loops = {}
|
||||
conditionals = {}
|
||||
|
||||
for node in workflow_nodes:
|
||||
visual_type = node.metadata.get('visual_type')
|
||||
parameters = node.metadata.get('parameters', {})
|
||||
|
||||
if visual_type == 'condition':
|
||||
# Configuration d'un node conditionnel (Exigences 8.1, 8.2)
|
||||
conditionals[node.node_id] = {
|
||||
'expression': parameters.get('expression', ''),
|
||||
'true_branch': self._find_branch_target(node.node_id, 'true', workflow_edges),
|
||||
'false_branch': self._find_branch_target(node.node_id, 'false', workflow_edges),
|
||||
'metadata': {
|
||||
'visual_position': node.metadata.get('visual_position'),
|
||||
'condition_type': parameters.get('type', 'expression')
|
||||
}
|
||||
}
|
||||
|
||||
elif visual_type == 'loop':
|
||||
# Configuration d'un node de boucle (Exigences 9.1, 9.2)
|
||||
loop_type = parameters.get('type', 'repeat')
|
||||
loop_config = {
|
||||
'loop_type': loop_type,
|
||||
'body_nodes': self._find_loop_body(node.node_id, workflow_edges),
|
||||
'exit_node': self._find_loop_exit(node.node_id, workflow_edges),
|
||||
'metadata': {
|
||||
'visual_position': node.metadata.get('visual_position')
|
||||
}
|
||||
}
|
||||
|
||||
# Ajouter les paramètres spécifiques au type de boucle
|
||||
if loop_type == 'repeat':
|
||||
loop_config['count'] = parameters.get('count', 1)
|
||||
elif loop_type == 'while':
|
||||
loop_config['condition'] = parameters.get('condition', '')
|
||||
loop_config['max_iterations'] = parameters.get('max_iterations', 100)
|
||||
elif loop_type == 'for-each':
|
||||
loop_config['collection'] = parameters.get('collection', '')
|
||||
loop_config['item_variable'] = parameters.get('item_variable', 'item')
|
||||
|
||||
loops[node.node_id] = loop_config
|
||||
|
||||
return loops, conditionals
|
||||
|
||||
def _find_branch_target(
|
||||
self,
|
||||
node_id: str,
|
||||
branch_type: str,
|
||||
edges: List[WorkflowEdge]
|
||||
) -> Optional[str]:
|
||||
"""Trouve le node cible d'une branche de condition"""
|
||||
for edge in edges:
|
||||
if edge.from_node == node_id:
|
||||
# Vérifier le port source ou les métadonnées
|
||||
source_port = edge.metadata.get('source_port', '')
|
||||
if branch_type in source_port.lower():
|
||||
return edge.to_node
|
||||
|
||||
# Vérifier les pre-conditions
|
||||
if 'condition_result' in edge.constraints.pre_conditions:
|
||||
expected_result = branch_type == 'true'
|
||||
if edge.constraints.pre_conditions['condition_result'] == expected_result:
|
||||
return edge.to_node
|
||||
|
||||
return None
|
||||
|
||||
def _find_loop_body(
|
||||
self,
|
||||
loop_node_id: str,
|
||||
edges: List[WorkflowEdge]
|
||||
) -> List[str]:
|
||||
"""Trouve les nodes du corps de la boucle"""
|
||||
body_nodes = []
|
||||
|
||||
# Trouver le premier node du corps (edge avec port 'body' ou 'loop')
|
||||
for edge in edges:
|
||||
if edge.from_node == loop_node_id:
|
||||
source_port = edge.metadata.get('source_port', '')
|
||||
if 'body' in source_port.lower() or 'loop' in source_port.lower():
|
||||
body_nodes.append(edge.to_node)
|
||||
# TODO: Suivre le graphe pour trouver tous les nodes du corps
|
||||
break
|
||||
|
||||
return body_nodes
|
||||
|
||||
def _find_loop_exit(
|
||||
self,
|
||||
loop_node_id: str,
|
||||
edges: List[WorkflowEdge]
|
||||
) -> Optional[str]:
|
||||
"""Trouve le node de sortie de la boucle"""
|
||||
for edge in edges:
|
||||
if edge.from_node == loop_node_id:
|
||||
source_port = edge.metadata.get('source_port', '')
|
||||
# Chercher le port de sortie (exit, out_exit, etc.)
|
||||
if 'exit' in source_port.lower():
|
||||
return edge.to_node
|
||||
# Si pas de port body, c'est probablement la sortie
|
||||
if 'body' not in source_port.lower() and 'loop' not in source_port.lower():
|
||||
# Vérifier si ce n'est pas déjà le corps
|
||||
body_targets = self._find_loop_body(loop_node_id, edges)
|
||||
if edge.to_node not in body_targets:
|
||||
return edge.to_node
|
||||
|
||||
return None
|
||||
|
||||
def _convert_nodes(self, visual_workflow: VisualWorkflow) -> List[WorkflowNode]:
|
||||
"""Convertit les nodes visuels en WorkflowNodes"""
|
||||
workflow_nodes = []
|
||||
|
||||
for vnode in visual_workflow.nodes:
|
||||
try:
|
||||
wnode = self._convert_node(vnode)
|
||||
workflow_nodes.append(wnode)
|
||||
except Exception as e:
|
||||
self.errors.append(f"Erreur conversion node {vnode.id}: {str(e)}")
|
||||
|
||||
if self.errors:
|
||||
raise ConversionError(f"Erreurs lors de la conversion des nodes: {', '.join(self.errors)}")
|
||||
|
||||
return workflow_nodes
|
||||
|
||||
def _convert_node(self, vnode: VisualNode) -> WorkflowNode:
|
||||
"""Convertit un VisualNode en WorkflowNode"""
|
||||
|
||||
# Créer un template d'écran basique
|
||||
# Dans une vraie implémentation, on utiliserait les embeddings et contraintes
|
||||
template = ScreenTemplate(
|
||||
window=WindowConstraint(),
|
||||
text=TextConstraint(),
|
||||
ui=UIConstraint(),
|
||||
embedding=EmbeddingPrototype(
|
||||
provider="visual_workflow_builder",
|
||||
vector_id=f"node_{vnode.id}",
|
||||
min_cosine_similarity=0.85,
|
||||
sample_count=0
|
||||
)
|
||||
)
|
||||
|
||||
# Créer le WorkflowNode
|
||||
wnode = WorkflowNode(
|
||||
node_id=vnode.id,
|
||||
name=vnode.label or vnode.type,
|
||||
description=vnode.description or f"Node de type {vnode.type}",
|
||||
template=template,
|
||||
is_entry=False, # Sera déterminé plus tard
|
||||
is_end=False, # Sera déterminé plus tard
|
||||
metadata={
|
||||
'visual_type': vnode.type,
|
||||
'visual_position': vnode.position.to_dict(),
|
||||
'visual_size': vnode.size.to_dict(),
|
||||
'parameters': vnode.parameters,
|
||||
'color': vnode.color
|
||||
}
|
||||
)
|
||||
|
||||
# Intégrer la configuration Self-Healing
|
||||
if SELF_HEALING_AVAILABLE:
|
||||
self_healing_converter = get_self_healing_converter()
|
||||
if self_healing_converter:
|
||||
wnode = self_healing_converter.convert_node_config(vnode, wnode)
|
||||
|
||||
return wnode
|
||||
|
||||
def _convert_edges(
|
||||
self,
|
||||
visual_workflow: VisualWorkflow,
|
||||
workflow_nodes: List[WorkflowNode]
|
||||
) -> List[WorkflowEdge]:
|
||||
"""Convertit les edges visuels en WorkflowEdges"""
|
||||
workflow_edges = []
|
||||
|
||||
# Créer un mapping node_id -> node pour validation
|
||||
node_map = {node.node_id: node for node in workflow_nodes}
|
||||
|
||||
for vedge in visual_workflow.edges:
|
||||
try:
|
||||
# Vérifier que les nodes source et target existent
|
||||
if vedge.source not in node_map:
|
||||
raise ConversionError(f"Node source {vedge.source} introuvable")
|
||||
if vedge.target not in node_map:
|
||||
raise ConversionError(f"Node target {vedge.target} introuvable")
|
||||
|
||||
source_node = node_map[vedge.source]
|
||||
target_node = node_map[vedge.target]
|
||||
|
||||
# Créer l'action basée sur le type du node source
|
||||
action = self._create_action_from_node(source_node, visual_workflow)
|
||||
|
||||
# Créer les contraintes (avec gestion des conditions)
|
||||
constraints = self._create_edge_constraints(vedge, source_node)
|
||||
|
||||
# Créer les post-conditions
|
||||
post_conditions = PostConditions(
|
||||
expected_node=target_node.node_id,
|
||||
timeout_ms=3000
|
||||
)
|
||||
|
||||
# Créer le WorkflowEdge
|
||||
wedge = WorkflowEdge(
|
||||
edge_id=vedge.id,
|
||||
from_node=vedge.source,
|
||||
to_node=vedge.target,
|
||||
action=action,
|
||||
constraints=constraints,
|
||||
post_conditions=post_conditions,
|
||||
stats=EdgeStats(),
|
||||
metadata={
|
||||
'visual_condition': vedge.condition.to_dict() if vedge.condition else None,
|
||||
'visual_style': vedge.style.to_dict() if vedge.style else None,
|
||||
'source_port': vedge.source_port,
|
||||
'target_port': vedge.target_port
|
||||
}
|
||||
)
|
||||
|
||||
workflow_edges.append(wedge)
|
||||
|
||||
except Exception as e:
|
||||
self.errors.append(f"Erreur conversion edge {vedge.id}: {str(e)}")
|
||||
|
||||
if self.errors:
|
||||
raise ConversionError(f"Erreurs lors de la conversion des edges: {', '.join(self.errors)}")
|
||||
|
||||
return workflow_edges
|
||||
|
||||
def _create_edge_constraints(
|
||||
self,
|
||||
vedge: VisualEdge,
|
||||
source_node: WorkflowNode
|
||||
) -> EdgeConstraints:
|
||||
"""Crée les contraintes d'edge avec support des conditions"""
|
||||
|
||||
constraints = EdgeConstraints(
|
||||
required_confidence=0.8,
|
||||
max_wait_time_ms=5000
|
||||
)
|
||||
|
||||
# Si le node source est une condition, ajouter la condition à l'edge
|
||||
visual_type = source_node.metadata.get('visual_type')
|
||||
if visual_type == 'condition':
|
||||
# Déterminer si c'est la branche true ou false basé sur le port
|
||||
source_port = vedge.source_port
|
||||
if 'true' in source_port.lower() or source_port == 'out_true':
|
||||
constraints.pre_conditions['condition_result'] = True
|
||||
elif 'false' in source_port.lower() or source_port == 'out_false':
|
||||
constraints.pre_conditions['condition_result'] = False
|
||||
|
||||
# Si l'edge a une condition explicite, l'ajouter
|
||||
if vedge.condition:
|
||||
if vedge.condition.type == 'expression' and vedge.condition.expression:
|
||||
constraints.pre_conditions['expression'] = vedge.condition.expression
|
||||
elif vedge.condition.type in ['success', 'failure']:
|
||||
constraints.pre_conditions['execution_status'] = vedge.condition.type
|
||||
|
||||
return constraints
|
||||
|
||||
def _create_action_from_node(
|
||||
self,
|
||||
node: WorkflowNode,
|
||||
visual_workflow: VisualWorkflow
|
||||
) -> Action:
|
||||
"""Crée une Action basée sur le type et les paramètres du node"""
|
||||
|
||||
visual_type = node.metadata.get('visual_type', 'unknown')
|
||||
parameters = node.metadata.get('parameters', {})
|
||||
|
||||
# Déterminer le type d'action
|
||||
action_type = self.NODE_TYPE_TO_ACTION.get(visual_type, 'mouse_click')
|
||||
|
||||
# Créer le TargetSpec
|
||||
target_spec = self._create_target_spec(visual_type, parameters)
|
||||
|
||||
# Créer les paramètres d'action
|
||||
action_params = self._create_action_parameters(visual_type, parameters, visual_workflow)
|
||||
|
||||
return Action(
|
||||
type=action_type,
|
||||
target=target_spec,
|
||||
parameters=action_params
|
||||
)
|
||||
|
||||
def _create_target_spec(self, node_type: str, parameters: Dict[str, Any]) -> TargetSpec:
|
||||
"""Crée un TargetSpec basé sur les paramètres du node"""
|
||||
|
||||
# Extraire les informations de cible
|
||||
target_info = parameters.get('target', {})
|
||||
|
||||
# Si target est une string, c'est un sélecteur simple
|
||||
if isinstance(target_info, str):
|
||||
return TargetSpec(
|
||||
by_text=target_info,
|
||||
selection_policy="first"
|
||||
)
|
||||
|
||||
# Si target est un dict, extraire les détails
|
||||
if isinstance(target_info, dict):
|
||||
return TargetSpec(
|
||||
by_role=target_info.get('role'),
|
||||
by_text=target_info.get('text'),
|
||||
by_position=tuple(target_info['position']) if 'position' in target_info else None,
|
||||
selection_policy=target_info.get('selection_policy', 'first')
|
||||
)
|
||||
|
||||
# Par défaut, créer un target générique
|
||||
return TargetSpec(
|
||||
by_role="button",
|
||||
selection_policy="first"
|
||||
)
|
||||
|
||||
def _create_action_parameters(
|
||||
self,
|
||||
node_type: str,
|
||||
parameters: Dict[str, Any],
|
||||
visual_workflow: VisualWorkflow
|
||||
) -> Dict[str, Any]:
|
||||
"""Crée les paramètres d'action avec substitution de variables"""
|
||||
|
||||
action_params = {}
|
||||
|
||||
if node_type == 'click':
|
||||
# Pour les actions de clic
|
||||
action_params['click_type'] = parameters.get('click_type', 'left')
|
||||
action_params['timeout_ms'] = parameters.get('timeout', 5000)
|
||||
action_params['retries'] = parameters.get('retries', 3)
|
||||
action_params['wait_after_ms'] = parameters.get('wait_after', 500)
|
||||
|
||||
elif node_type == 'type':
|
||||
# Pour les actions de saisie de texte
|
||||
text = parameters.get('text', '')
|
||||
text = self._substitute_variables(text, visual_workflow)
|
||||
action_params['text'] = text
|
||||
action_params['clear_first'] = parameters.get('clear_first', False)
|
||||
action_params['typing_speed'] = parameters.get('typing_speed', 'normal')
|
||||
action_params['press_enter'] = parameters.get('press_enter', False)
|
||||
|
||||
elif node_type == 'wait':
|
||||
# Pour les actions d'attente
|
||||
duration = parameters.get('duration', 1000)
|
||||
action_params['duration_ms'] = int(duration)
|
||||
action_params['wait_type'] = parameters.get('wait_type', 'fixed')
|
||||
|
||||
elif node_type == 'navigate':
|
||||
# Pour la navigation
|
||||
url = parameters.get('url', '')
|
||||
url = self._substitute_variables(url, visual_workflow)
|
||||
action_params['url'] = url
|
||||
action_params['wait_for_load'] = parameters.get('wait_for_load', True)
|
||||
action_params['timeout_ms'] = parameters.get('timeout', 10000)
|
||||
|
||||
elif node_type == 'validate':
|
||||
# Pour la validation (touche Entrée)
|
||||
action_params['key'] = 'Return'
|
||||
action_params['validation_type'] = parameters.get('validation_type', 'exists')
|
||||
action_params['expected_text'] = parameters.get('expected_text', '')
|
||||
|
||||
elif node_type == 'scroll':
|
||||
# Pour le défilement
|
||||
action_params['direction'] = parameters.get('direction', 'down')
|
||||
action_params['amount'] = parameters.get('amount', 3)
|
||||
|
||||
elif node_type == 'screenshot':
|
||||
# Pour les captures d'écran
|
||||
action_params['filename'] = parameters.get('filename', '')
|
||||
action_params['full_screen'] = not parameters.get('region')
|
||||
|
||||
elif node_type == 'extract':
|
||||
# Pour l'extraction de données
|
||||
variable_name = parameters.get('variable', '')
|
||||
action_params['variable_name'] = variable_name
|
||||
action_params['extraction_type'] = parameters.get('extraction_type', 'text')
|
||||
action_params['attribute_name'] = parameters.get('attribute_name', '')
|
||||
|
||||
elif node_type == 'variable':
|
||||
# Pour la définition de variables
|
||||
var_name = parameters.get('name', '')
|
||||
var_value = parameters.get('value', '')
|
||||
var_value = self._substitute_variables(str(var_value), visual_workflow)
|
||||
action_params['variable_name'] = var_name
|
||||
action_params['variable_value'] = var_value
|
||||
action_params['variable_type'] = parameters.get('variable_type', 'string')
|
||||
|
||||
elif node_type == 'transform':
|
||||
# Pour la transformation de données
|
||||
action_params['transformation_type'] = parameters.get('transformation_type', 'format')
|
||||
action_params['input_variable'] = parameters.get('input_variable', '')
|
||||
action_params['output_variable'] = parameters.get('output_variable', '')
|
||||
action_params['transformation_rule'] = parameters.get('transformation_rule', '')
|
||||
|
||||
elif node_type == 'api':
|
||||
# Pour les appels API
|
||||
action_params['method'] = parameters.get('method', 'GET')
|
||||
action_params['url'] = self._substitute_variables(parameters.get('url', ''), visual_workflow)
|
||||
action_params['headers'] = parameters.get('headers', {})
|
||||
action_params['body'] = parameters.get('body', '')
|
||||
action_params['response_variable'] = parameters.get('response_variable', '')
|
||||
|
||||
elif node_type == 'database':
|
||||
# Pour les requêtes base de données
|
||||
action_params['connection_string'] = parameters.get('connection_string', '')
|
||||
action_params['query'] = self._substitute_variables(parameters.get('query', ''), visual_workflow)
|
||||
action_params['result_variable'] = parameters.get('result_variable', '')
|
||||
|
||||
elif node_type == 'condition':
|
||||
# Pour les conditions (Exigences 8.1, 8.2, 8.5)
|
||||
expression = parameters.get('expression', '')
|
||||
expression = self._substitute_variables(expression, visual_workflow)
|
||||
action_params['expression'] = expression
|
||||
action_params['condition_type'] = parameters.get('condition_type', 'expression')
|
||||
|
||||
# Valider la syntaxe de l'expression (Exigence 8.5)
|
||||
validation_result = self._validate_expression(expression)
|
||||
if not validation_result['valid']:
|
||||
self.warnings.append(
|
||||
f"Expression de condition potentiellement invalide: {expression} - {validation_result['message']}"
|
||||
)
|
||||
|
||||
elif node_type == 'loop':
|
||||
# Pour les boucles (Exigences 9.1, 9.2, 9.5)
|
||||
loop_type = parameters.get('type', 'repeat') # for-each, while, repeat
|
||||
action_params['loop_type'] = loop_type
|
||||
|
||||
if loop_type == 'repeat':
|
||||
# Boucle avec nombre d'itérations fixe
|
||||
count = parameters.get('count', 1)
|
||||
action_params['count'] = int(count)
|
||||
|
||||
elif loop_type == 'while':
|
||||
# Boucle avec condition
|
||||
condition = parameters.get('condition', '')
|
||||
condition = self._substitute_variables(condition, visual_workflow)
|
||||
action_params['condition'] = condition
|
||||
action_params['max_iterations'] = parameters.get('max_iterations', 100)
|
||||
|
||||
elif loop_type == 'for-each':
|
||||
# Boucle sur une collection
|
||||
collection = parameters.get('collection', '')
|
||||
collection = self._substitute_variables(collection, visual_workflow)
|
||||
action_params['collection'] = collection
|
||||
action_params['item_variable'] = parameters.get('item_variable', 'item')
|
||||
|
||||
action_params['max_iterations'] = parameters.get('max_iterations', 100)
|
||||
|
||||
return action_params
|
||||
|
||||
def _validate_expression(self, expression: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Valide la syntaxe d'une expression de condition.
|
||||
|
||||
Exigence: 8.5
|
||||
"""
|
||||
# Validation basique - dans une vraie implémentation, on utiliserait un parser
|
||||
if not expression or not expression.strip():
|
||||
return {'valid': False, 'message': 'Expression vide'}
|
||||
|
||||
# Vérifier les opérateurs de base
|
||||
valid_operators = ['==', '!=', '<', '>', '<=', '>=', 'and', 'or', 'not', 'in']
|
||||
has_operator = any(op in expression for op in valid_operators)
|
||||
|
||||
if not has_operator:
|
||||
return {'valid': False, 'message': 'Aucun opérateur de comparaison trouvé'}
|
||||
|
||||
# Vérifier les parenthèses équilibrées
|
||||
if expression.count('(') != expression.count(')'):
|
||||
return {'valid': False, 'message': 'Parenthèses non équilibrées'}
|
||||
|
||||
return {'valid': True, 'message': 'OK'}
|
||||
|
||||
def _substitute_variables(self, text: str, visual_workflow: VisualWorkflow) -> str:
|
||||
"""Substitue les références de variables ${var} dans le texte"""
|
||||
|
||||
# Pour l'instant, on garde les références telles quelles
|
||||
# L'exécution fera la substitution réelle
|
||||
return text
|
||||
|
||||
def _determine_entry_exit_nodes(
|
||||
self,
|
||||
visual_workflow: VisualWorkflow,
|
||||
workflow_nodes: List[WorkflowNode],
|
||||
workflow_edges: List[WorkflowEdge]
|
||||
) -> Tuple[List[str], List[str]]:
|
||||
"""Détermine les nodes d'entrée et de sortie du workflow"""
|
||||
|
||||
# Créer des sets pour les nodes avec edges entrants/sortants
|
||||
nodes_with_incoming = {edge.to_node for edge in workflow_edges}
|
||||
nodes_with_outgoing = {edge.from_node for edge in workflow_edges}
|
||||
|
||||
# Entry nodes = nodes sans edges entrants
|
||||
entry_nodes = [
|
||||
node.node_id for node in workflow_nodes
|
||||
if node.node_id not in nodes_with_incoming
|
||||
]
|
||||
|
||||
# End nodes = nodes sans edges sortants
|
||||
end_nodes = [
|
||||
node.node_id for node in workflow_nodes
|
||||
if node.node_id not in nodes_with_outgoing
|
||||
]
|
||||
|
||||
# Si pas de entry nodes, prendre le premier node
|
||||
if not entry_nodes and workflow_nodes:
|
||||
entry_nodes = [workflow_nodes[0].node_id]
|
||||
self.warnings.append("Aucun node d'entrée détecté, utilisation du premier node")
|
||||
|
||||
# Si pas de end nodes, prendre le dernier node
|
||||
if not end_nodes and workflow_nodes:
|
||||
end_nodes = [workflow_nodes[-1].node_id]
|
||||
self.warnings.append("Aucun node de sortie détecté, utilisation du dernier node")
|
||||
|
||||
# Marquer les nodes
|
||||
for node in workflow_nodes:
|
||||
if node.node_id in entry_nodes:
|
||||
node.is_entry = True
|
||||
if node.node_id in end_nodes:
|
||||
node.is_end = True
|
||||
|
||||
return entry_nodes, end_nodes
|
||||
|
||||
def _create_safety_rules(self, visual_workflow: VisualWorkflow) -> SafetyRules:
|
||||
"""Crée les règles de sécurité basées sur les settings du workflow"""
|
||||
|
||||
settings = visual_workflow.settings
|
||||
|
||||
return SafetyRules(
|
||||
require_confirmation_for=[],
|
||||
forbidden_windows=[],
|
||||
execution_timeout_minutes=settings.timeout // 60000 if settings.timeout > 0 else 0
|
||||
)
|
||||
|
||||
def get_errors(self) -> List[str]:
|
||||
"""Retourne les erreurs de conversion"""
|
||||
return self.errors
|
||||
|
||||
def get_warnings(self) -> List[str]:
|
||||
"""Retourne les avertissements de conversion"""
|
||||
return self.warnings
|
||||
|
||||
|
||||
def convert_visual_to_graph(visual_workflow: VisualWorkflow) -> Workflow:
|
||||
"""
|
||||
Fonction utilitaire pour convertir un workflow visuel.
|
||||
|
||||
Args:
|
||||
visual_workflow: Le workflow visuel à convertir
|
||||
|
||||
Returns:
|
||||
Workflow exécutable
|
||||
|
||||
Raises:
|
||||
ConversionError: Si la conversion échoue
|
||||
"""
|
||||
converter = VisualToGraphConverter()
|
||||
return converter.convert(visual_workflow)
|
||||
356
visual_workflow_builder/backend/services/real_screen_capture.py
Normal file
356
visual_workflow_builder/backend/services/real_screen_capture.py
Normal file
@@ -0,0 +1,356 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Service de Capture d'Écran Réelle - RPA Vision V3
|
||||
Auteur : Dom, Alice, Kiro - 8 janvier 2026
|
||||
|
||||
Service pour capturer l'écran réel de l'utilisateur et détecter les éléments UI.
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import mss
|
||||
import base64
|
||||
import io
|
||||
from PIL import Image
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
import threading
|
||||
import time
|
||||
import logging
|
||||
|
||||
# Import des modules RPA Vision V3 pour la détection UI
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Ajouter le chemin vers le répertoire racine du projet
|
||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..'))
|
||||
if project_root not in sys.path:
|
||||
sys.path.insert(0, project_root)
|
||||
|
||||
try:
|
||||
from core.detection.ui_detector import UIDetector
|
||||
UI_DETECTOR_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"Warning: UIDetector non disponible: {e}")
|
||||
UI_DETECTOR_AVAILABLE = False
|
||||
UIDetector = None
|
||||
|
||||
try:
|
||||
from core.models.screen_state import ScreenState, UIElement
|
||||
SCREEN_STATE_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"Warning: ScreenState non disponible: {e}")
|
||||
SCREEN_STATE_AVAILABLE = False
|
||||
ScreenState = None
|
||||
UIElement = None
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class RealScreenCaptureService:
|
||||
"""
|
||||
Service de capture d'écran réelle avec détection d'éléments UI
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.is_capturing = False
|
||||
self.capture_thread = None
|
||||
self.current_screenshot = None
|
||||
self.detected_elements = []
|
||||
|
||||
# Initialiser le détecteur UI si disponible
|
||||
if UI_DETECTOR_AVAILABLE:
|
||||
self.ui_detector = UIDetector()
|
||||
else:
|
||||
self.ui_detector = None
|
||||
print("Warning: UIDetector non disponible - détection d'éléments désactivée")
|
||||
|
||||
self.capture_interval = 1.0 # 1 seconde par défaut
|
||||
self.monitors = []
|
||||
self.selected_monitor = 0
|
||||
|
||||
# Initialiser MSS pour la capture d'écran
|
||||
try:
|
||||
# Utiliser MSS temporairement pour détecter les moniteurs
|
||||
with mss.mss() as sct:
|
||||
self.monitors = sct.monitors
|
||||
logger.info(f"Détecté {len(self.monitors)} moniteurs")
|
||||
for i, monitor in enumerate(self.monitors):
|
||||
logger.info(f"Moniteur {i}: {monitor}")
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la détection des moniteurs: {e}")
|
||||
self.monitors = [{"top": 0, "left": 0, "width": 1920, "height": 1080}]
|
||||
|
||||
def _detect_monitors(self):
|
||||
"""Détecte les moniteurs disponibles"""
|
||||
try:
|
||||
self.monitors = self.sct.monitors
|
||||
logger.info(f"Détecté {len(self.monitors)} moniteurs")
|
||||
for i, monitor in enumerate(self.monitors):
|
||||
logger.info(f"Moniteur {i}: {monitor}")
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la détection des moniteurs: {e}")
|
||||
self.monitors = [{"top": 0, "left": 0, "width": 1920, "height": 1080}]
|
||||
|
||||
def get_monitors(self) -> List[Dict]:
|
||||
"""Retourne la liste des moniteurs disponibles"""
|
||||
return [
|
||||
{
|
||||
"id": i,
|
||||
"width": monitor.get("width", 0),
|
||||
"height": monitor.get("height", 0),
|
||||
"top": monitor.get("top", 0),
|
||||
"left": monitor.get("left", 0)
|
||||
}
|
||||
for i, monitor in enumerate(self.monitors)
|
||||
]
|
||||
|
||||
def select_monitor(self, monitor_id: int) -> bool:
|
||||
"""Sélectionne le moniteur à capturer"""
|
||||
if 0 <= monitor_id < len(self.monitors):
|
||||
self.selected_monitor = monitor_id
|
||||
logger.info(f"Moniteur sélectionné: {monitor_id}")
|
||||
return True
|
||||
return False
|
||||
|
||||
def start_capture(self, interval: float = 1.0) -> bool:
|
||||
"""Démarre la capture d'écran en temps réel"""
|
||||
if self.is_capturing:
|
||||
logger.warning("Capture déjà en cours")
|
||||
return False
|
||||
|
||||
self.capture_interval = interval
|
||||
self.is_capturing = True
|
||||
|
||||
# Démarrer le thread de capture
|
||||
self.capture_thread = threading.Thread(target=self._capture_loop, daemon=True)
|
||||
self.capture_thread.start()
|
||||
|
||||
logger.info(f"Capture démarrée (intervalle: {interval}s)")
|
||||
return True
|
||||
|
||||
def stop_capture(self) -> bool:
|
||||
"""Arrête la capture d'écran"""
|
||||
if not self.is_capturing:
|
||||
return False
|
||||
|
||||
self.is_capturing = False
|
||||
|
||||
if self.capture_thread and self.capture_thread.is_alive():
|
||||
self.capture_thread.join(timeout=2.0)
|
||||
|
||||
logger.info("Capture arrêtée")
|
||||
return True
|
||||
|
||||
def _capture_loop(self):
|
||||
"""Boucle principale de capture avec MSS local au thread"""
|
||||
# Créer une instance MSS locale au thread pour éviter les problèmes de threading
|
||||
try:
|
||||
with mss.mss() as sct_local:
|
||||
while self.is_capturing:
|
||||
try:
|
||||
# Capturer l'écran avec l'instance locale
|
||||
screenshot = self._capture_screen_with_sct(sct_local)
|
||||
if screenshot is not None:
|
||||
self.current_screenshot = screenshot
|
||||
|
||||
# Détecter les éléments UI
|
||||
if UI_DETECTOR_AVAILABLE and self.ui_detector:
|
||||
self._detect_ui_elements(screenshot)
|
||||
|
||||
# Attendre avant la prochaine capture
|
||||
time.sleep(self.capture_interval)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur dans la boucle de capture: {e}")
|
||||
time.sleep(1.0) # Attendre avant de réessayer
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de l'initialisation MSS dans le thread: {e}")
|
||||
|
||||
def _capture_screen_with_sct(self, sct):
|
||||
"""Capture l'écran avec une instance MSS donnée"""
|
||||
try:
|
||||
if self.selected_monitor >= len(self.monitors):
|
||||
self.selected_monitor = 0
|
||||
|
||||
monitor = self.monitors[self.selected_monitor]
|
||||
|
||||
# Capturer avec MSS
|
||||
screenshot = sct.grab(monitor)
|
||||
|
||||
# Convertir en array numpy
|
||||
img_array = np.array(screenshot)
|
||||
|
||||
# Convertir BGRA vers BGR (OpenCV)
|
||||
if img_array.shape[2] == 4:
|
||||
img_array = cv2.cvtColor(img_array, cv2.COLOR_BGRA2BGR)
|
||||
|
||||
return img_array
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la capture d'écran: {e}")
|
||||
return None
|
||||
|
||||
def _capture_screen(self) -> Optional[np.ndarray]:
|
||||
"""Capture l'écran sélectionné (version legacy, utilise _capture_screen_with_sct)"""
|
||||
try:
|
||||
with mss.mss() as sct:
|
||||
return self._capture_screen_with_sct(sct)
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la capture d'écran legacy: {e}")
|
||||
return None
|
||||
|
||||
def _detect_ui_elements(self, screenshot: np.ndarray):
|
||||
"""Détecte les éléments UI sur la capture d'écran"""
|
||||
try:
|
||||
# Créer un ScreenState temporaire pour la détection
|
||||
screen_state = ScreenState(
|
||||
timestamp=time.time(),
|
||||
screenshot_path="", # Pas de fichier, image en mémoire
|
||||
screenshot_data=screenshot,
|
||||
ui_elements=[],
|
||||
metadata={"source": "real_capture"}
|
||||
)
|
||||
|
||||
# Utiliser le détecteur UI existant
|
||||
detected_elements = self.ui_detector.detect_elements(screen_state)
|
||||
|
||||
# Mettre à jour les éléments détectés
|
||||
self.detected_elements = detected_elements
|
||||
|
||||
logger.debug(f"Détecté {len(detected_elements)} éléments UI")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la détection UI: {e}")
|
||||
self.detected_elements = []
|
||||
|
||||
def get_current_screenshot_base64(self) -> Optional[str]:
|
||||
"""Retourne la capture d'écran actuelle en base64"""
|
||||
if self.current_screenshot is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Convertir en PIL Image
|
||||
if len(self.current_screenshot.shape) == 3:
|
||||
# BGR vers RGB
|
||||
rgb_image = cv2.cvtColor(self.current_screenshot, cv2.COLOR_BGR2RGB)
|
||||
pil_image = Image.fromarray(rgb_image)
|
||||
else:
|
||||
pil_image = Image.fromarray(self.current_screenshot)
|
||||
|
||||
# Redimensionner pour l'affichage web (optionnel)
|
||||
max_width = 1200
|
||||
if pil_image.width > max_width:
|
||||
ratio = max_width / pil_image.width
|
||||
new_height = int(pil_image.height * ratio)
|
||||
pil_image = pil_image.resize((max_width, new_height), Image.Resampling.LANCZOS)
|
||||
|
||||
# Convertir en base64
|
||||
buffer = io.BytesIO()
|
||||
pil_image.save(buffer, format='JPEG', quality=85)
|
||||
img_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
||||
|
||||
return f"data:image/jpeg;base64,{img_base64}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la conversion base64: {e}")
|
||||
return None
|
||||
|
||||
def get_detected_elements(self) -> List[Dict]:
|
||||
"""Retourne les éléments UI détectés"""
|
||||
elements = []
|
||||
|
||||
for element in self.detected_elements:
|
||||
try:
|
||||
elements.append({
|
||||
"id": getattr(element, 'id', ''),
|
||||
"type": getattr(element, 'element_type', 'unknown'),
|
||||
"text": getattr(element, 'text', ''),
|
||||
"bbox": {
|
||||
"x": getattr(element, 'bbox', {}).get('x', 0),
|
||||
"y": getattr(element, 'bbox', {}).get('y', 0),
|
||||
"width": getattr(element, 'bbox', {}).get('width', 0),
|
||||
"height": getattr(element, 'bbox', {}).get('height', 0)
|
||||
},
|
||||
"confidence": getattr(element, 'confidence', 0.0),
|
||||
"attributes": getattr(element, 'attributes', {})
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la sérialisation d'un élément: {e}")
|
||||
|
||||
return elements
|
||||
|
||||
def capture_single(self, monitor_id: Optional[int] = None) -> Optional[str]:
|
||||
"""
|
||||
Effectue une capture d'écran unique et retourne le résultat en base64.
|
||||
|
||||
Args:
|
||||
monitor_id: ID du moniteur (utilise le moniteur sélectionné si None)
|
||||
|
||||
Returns:
|
||||
Screenshot en base64 (data:image/jpeg;base64,...) ou None si échec
|
||||
"""
|
||||
try:
|
||||
# Utiliser le moniteur spécifié ou celui sélectionné
|
||||
target_monitor = monitor_id if monitor_id is not None else self.selected_monitor
|
||||
|
||||
with mss.mss() as sct:
|
||||
# Obtenir le moniteur cible (index 0 = tous, 1+ = moniteurs individuels)
|
||||
if target_monitor >= len(sct.monitors) - 1:
|
||||
logger.warning(f"Moniteur {target_monitor} invalide, utilisation du moniteur 0")
|
||||
target_monitor = 0
|
||||
|
||||
# mss.monitors[0] = tous les écrans combinés, [1] = premier écran, etc.
|
||||
monitor = sct.monitors[target_monitor + 1] if target_monitor >= 0 else sct.monitors[1]
|
||||
|
||||
# Capturer l'écran
|
||||
screenshot = sct.grab(monitor)
|
||||
|
||||
# Convertir en numpy array
|
||||
img_array = np.array(screenshot)
|
||||
|
||||
# Convertir BGRA vers BGR si nécessaire
|
||||
if img_array.shape[2] == 4:
|
||||
img_array = cv2.cvtColor(img_array, cv2.COLOR_BGRA2BGR)
|
||||
|
||||
# Convertir BGR vers RGB pour PIL
|
||||
rgb_image = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)
|
||||
pil_image = Image.fromarray(rgb_image)
|
||||
|
||||
# Redimensionner si trop grand
|
||||
max_width = 1600
|
||||
if pil_image.width > max_width:
|
||||
ratio = max_width / pil_image.width
|
||||
new_height = int(pil_image.height * ratio)
|
||||
pil_image = pil_image.resize((max_width, new_height), Image.LANCZOS)
|
||||
|
||||
# Convertir en base64 JPEG pour réduire la taille
|
||||
buffer = io.BytesIO()
|
||||
pil_image.save(buffer, format='JPEG', quality=85)
|
||||
base64_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
||||
|
||||
logger.info(f"Capture unique réussie - Moniteur {target_monitor}, taille: {len(base64_data)} caractères")
|
||||
|
||||
return f"data:image/jpeg;base64,{base64_data}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la capture unique: {e}")
|
||||
return None
|
||||
|
||||
def get_status(self) -> Dict:
|
||||
"""Retourne le statut du service"""
|
||||
return {
|
||||
"is_capturing": self.is_capturing,
|
||||
"selected_monitor": self.selected_monitor,
|
||||
"monitors_count": len(self.monitors),
|
||||
"capture_interval": self.capture_interval,
|
||||
"elements_detected": len(self.detected_elements),
|
||||
"has_screenshot": self.current_screenshot is not None
|
||||
}
|
||||
|
||||
def cleanup(self):
|
||||
"""Nettoie les ressources"""
|
||||
self.stop_capture()
|
||||
# Plus besoin de fermer self.sct car nous utilisons des instances locales
|
||||
|
||||
# Instance globale du service
|
||||
real_capture_service = RealScreenCaptureService()
|
||||
@@ -0,0 +1,353 @@
|
||||
"""Self-healing converter for Visual Workflow Builder."""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from ..models.visual_workflow import VisualNode, VisualWorkflow
|
||||
from ..models.self_healing_config import SelfHealingConfig, RecoveryStrategy, RecoveryMode
|
||||
|
||||
# Import core workflow models
|
||||
try:
|
||||
from core.models.workflow_graph import WorkflowGraph, WorkflowNode, WorkflowEdge
|
||||
CORE_AVAILABLE = True
|
||||
except ImportError:
|
||||
CORE_AVAILABLE = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SelfHealingConverter:
|
||||
"""Converts visual workflow self-healing configurations to core format."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the converter."""
|
||||
self.enabled = CORE_AVAILABLE
|
||||
|
||||
if not self.enabled:
|
||||
logger.warning("Core workflow models not available - self-healing conversion disabled")
|
||||
|
||||
def convert_node_config(
|
||||
self,
|
||||
visual_node: VisualNode,
|
||||
workflow_node: 'WorkflowNode'
|
||||
) -> 'WorkflowNode':
|
||||
"""
|
||||
Convert visual node self-healing config to core workflow node.
|
||||
|
||||
Args:
|
||||
visual_node: Visual workflow node with self-healing config
|
||||
workflow_node: Core workflow node to update
|
||||
|
||||
Returns:
|
||||
Updated workflow node with self-healing configuration
|
||||
"""
|
||||
if not self.enabled or not visual_node.self_healing:
|
||||
return workflow_node
|
||||
|
||||
try:
|
||||
config = visual_node.self_healing
|
||||
|
||||
# Convert to core format
|
||||
core_config = {
|
||||
'enabled': config.enabled,
|
||||
'recovery_mode': config.recovery_mode.value,
|
||||
'max_attempts': config.max_attempts,
|
||||
'confidence_threshold': config.confidence_threshold,
|
||||
'enabled_strategies': [s.value for s in config.enabled_strategies],
|
||||
'strategy_timeout': config.strategy_timeout,
|
||||
'learn_from_success': config.learn_from_success,
|
||||
'require_user_confirmation': config.require_user_confirmation,
|
||||
'stop_on_failure': config.stop_on_failure,
|
||||
'notify_on_recovery': config.notify_on_recovery,
|
||||
'notify_on_failure': config.notify_on_failure
|
||||
}
|
||||
|
||||
# Add to workflow node metadata
|
||||
if not hasattr(workflow_node, 'metadata'):
|
||||
workflow_node.metadata = {}
|
||||
|
||||
workflow_node.metadata['self_healing'] = core_config
|
||||
|
||||
logger.debug(f"Converted self-healing config for node {visual_node.id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to convert self-healing config for node {visual_node.id}: {e}")
|
||||
|
||||
return workflow_node
|
||||
|
||||
def convert_workflow_settings(
|
||||
self,
|
||||
visual_workflow: VisualWorkflow,
|
||||
workflow_graph: 'WorkflowGraph'
|
||||
) -> 'WorkflowGraph':
|
||||
"""
|
||||
Convert workflow-level self-healing settings.
|
||||
|
||||
Args:
|
||||
visual_workflow: Visual workflow with settings
|
||||
workflow_graph: Core workflow graph to update
|
||||
|
||||
Returns:
|
||||
Updated workflow graph with self-healing settings
|
||||
"""
|
||||
if not self.enabled:
|
||||
return workflow_graph
|
||||
|
||||
try:
|
||||
settings = visual_workflow.settings
|
||||
|
||||
# Add global self-healing settings
|
||||
if not hasattr(workflow_graph, 'metadata'):
|
||||
workflow_graph.metadata = {}
|
||||
|
||||
workflow_graph.metadata['self_healing_enabled'] = settings.enable_self_healing
|
||||
|
||||
# Collect node-level configurations for workflow-wide statistics
|
||||
node_configs = []
|
||||
for node in visual_workflow.nodes:
|
||||
if node.self_healing and node.self_healing.enabled:
|
||||
node_configs.append({
|
||||
'node_id': node.id,
|
||||
'node_type': node.type,
|
||||
'recovery_mode': node.self_healing.recovery_mode.value,
|
||||
'strategies': [s.value for s in node.self_healing.enabled_strategies]
|
||||
})
|
||||
|
||||
workflow_graph.metadata['self_healing_nodes'] = node_configs
|
||||
|
||||
logger.info(f"Converted workflow self-healing settings: {len(node_configs)} nodes configured")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to convert workflow self-healing settings: {e}")
|
||||
|
||||
return workflow_graph
|
||||
|
||||
def get_execution_config(
|
||||
self,
|
||||
visual_node: VisualNode
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get execution configuration for self-healing integration.
|
||||
|
||||
Args:
|
||||
visual_node: Visual node with self-healing config
|
||||
|
||||
Returns:
|
||||
Dictionary with execution configuration
|
||||
"""
|
||||
if not visual_node.self_healing or not visual_node.self_healing.enabled:
|
||||
return {'self_healing_enabled': False}
|
||||
|
||||
config = visual_node.self_healing
|
||||
|
||||
return {
|
||||
'self_healing_enabled': True,
|
||||
'recovery_config': {
|
||||
'mode': config.recovery_mode.value,
|
||||
'max_attempts': config.max_attempts,
|
||||
'confidence_threshold': config.confidence_threshold,
|
||||
'strategies': [s.value for s in config.enabled_strategies],
|
||||
'timeout': config.strategy_timeout,
|
||||
'learn_from_success': config.learn_from_success,
|
||||
'require_confirmation': config.require_user_confirmation,
|
||||
'stop_on_failure': config.stop_on_failure,
|
||||
'notifications': {
|
||||
'on_recovery': config.notify_on_recovery,
|
||||
'on_failure': config.notify_on_failure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def create_recovery_context(
|
||||
self,
|
||||
visual_node: VisualNode,
|
||||
workflow_id: str,
|
||||
action_info: Dict[str, Any],
|
||||
failure_info: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Create recovery context for self-healing execution.
|
||||
|
||||
Args:
|
||||
visual_node: Visual node that failed
|
||||
workflow_id: ID of the workflow
|
||||
action_info: Information about the failed action
|
||||
failure_info: Information about the failure
|
||||
|
||||
Returns:
|
||||
Recovery context dictionary or None if not configured
|
||||
"""
|
||||
if not visual_node.self_healing or not visual_node.self_healing.enabled:
|
||||
return None
|
||||
|
||||
config = visual_node.self_healing
|
||||
|
||||
# Extract action details
|
||||
action_type = visual_node.type
|
||||
target_element = visual_node.parameters.get('target', 'unknown')
|
||||
|
||||
# Create context
|
||||
context = {
|
||||
'workflow_id': workflow_id,
|
||||
'node_id': visual_node.id,
|
||||
'node_type': action_type,
|
||||
'original_action': action_type,
|
||||
'target_element': target_element,
|
||||
'failure_reason': failure_info.get('reason', 'unknown'),
|
||||
'screenshot_path': failure_info.get('screenshot_path', ''),
|
||||
'attempt_count': failure_info.get('attempt_count', 1),
|
||||
'max_attempts': config.max_attempts,
|
||||
'confidence_threshold': config.confidence_threshold,
|
||||
'metadata': {
|
||||
'node_parameters': visual_node.parameters,
|
||||
'recovery_mode': config.recovery_mode.value,
|
||||
'enabled_strategies': [s.value for s in config.enabled_strategies],
|
||||
'require_confirmation': config.require_user_confirmation
|
||||
}
|
||||
}
|
||||
|
||||
return context
|
||||
|
||||
def validate_configuration(
|
||||
self,
|
||||
config: SelfHealingConfig
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate self-healing configuration.
|
||||
|
||||
Args:
|
||||
config: Self-healing configuration to validate
|
||||
|
||||
Returns:
|
||||
Validation result with errors and warnings
|
||||
"""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
# Validate basic settings
|
||||
if config.max_attempts < 1 or config.max_attempts > 10:
|
||||
errors.append("max_attempts must be between 1 and 10")
|
||||
|
||||
if config.confidence_threshold < 0.0 or config.confidence_threshold > 1.0:
|
||||
errors.append("confidence_threshold must be between 0.0 and 1.0")
|
||||
|
||||
if config.strategy_timeout < 1.0 or config.strategy_timeout > 300.0:
|
||||
errors.append("strategy_timeout must be between 1.0 and 300.0 seconds")
|
||||
|
||||
# Validate strategies
|
||||
if not config.enabled_strategies:
|
||||
errors.append("At least one recovery strategy must be enabled")
|
||||
|
||||
# Check for conflicting settings
|
||||
if config.recovery_mode == RecoveryMode.DISABLED and config.enabled:
|
||||
warnings.append("Recovery mode is disabled but self-healing is enabled")
|
||||
|
||||
if config.recovery_mode == RecoveryMode.AGGRESSIVE and config.require_user_confirmation:
|
||||
warnings.append("Aggressive mode with user confirmation may slow down recovery")
|
||||
|
||||
if config.confidence_threshold > 0.9 and config.recovery_mode == RecoveryMode.AGGRESSIVE:
|
||||
warnings.append("High confidence threshold with aggressive mode may reduce recovery success")
|
||||
|
||||
# Strategy-specific validations
|
||||
if RecoveryStrategy.ALL in config.enabled_strategies and len(config.enabled_strategies) > 1:
|
||||
warnings.append("'All strategies' is selected along with specific strategies")
|
||||
|
||||
return {
|
||||
'valid': len(errors) == 0,
|
||||
'errors': errors,
|
||||
'warnings': warnings
|
||||
}
|
||||
|
||||
def get_strategy_recommendations(
|
||||
self,
|
||||
node_type: str,
|
||||
action_parameters: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get strategy recommendations based on node type and parameters.
|
||||
|
||||
Args:
|
||||
node_type: Type of the node (click, type, wait, etc.)
|
||||
action_parameters: Parameters of the action
|
||||
|
||||
Returns:
|
||||
Dictionary with strategy recommendations
|
||||
"""
|
||||
recommendations = {
|
||||
'recommended_strategies': [],
|
||||
'recovery_mode': RecoveryMode.BALANCED,
|
||||
'confidence_threshold': 0.7,
|
||||
'max_attempts': 3,
|
||||
'reasoning': []
|
||||
}
|
||||
|
||||
# Node type specific recommendations
|
||||
if node_type in ['click', 'hover']:
|
||||
recommendations['recommended_strategies'] = [
|
||||
RecoveryStrategy.SEMANTIC_VARIANT,
|
||||
RecoveryStrategy.SPATIAL_FALLBACK
|
||||
]
|
||||
recommendations['reasoning'].append(
|
||||
"UI interactions benefit from semantic and spatial recovery"
|
||||
)
|
||||
|
||||
elif node_type in ['type', 'input']:
|
||||
recommendations['recommended_strategies'] = [
|
||||
RecoveryStrategy.FORMAT_TRANSFORMATION,
|
||||
RecoveryStrategy.TIMING_ADAPTATION
|
||||
]
|
||||
recommendations['recovery_mode'] = RecoveryMode.CONSERVATIVE
|
||||
recommendations['confidence_threshold'] = 0.8
|
||||
recommendations['reasoning'].append(
|
||||
"Data input requires conservative approach with format validation"
|
||||
)
|
||||
|
||||
elif node_type in ['wait', 'navigate']:
|
||||
recommendations['recommended_strategies'] = [
|
||||
RecoveryStrategy.TIMING_ADAPTATION
|
||||
]
|
||||
recommendations['reasoning'].append(
|
||||
"Navigation and timing actions primarily need timing adjustments"
|
||||
)
|
||||
|
||||
elif node_type in ['extract', 'validate']:
|
||||
recommendations['recommended_strategies'] = [
|
||||
RecoveryStrategy.SEMANTIC_VARIANT,
|
||||
RecoveryStrategy.SPATIAL_FALLBACK
|
||||
]
|
||||
recommendations['confidence_threshold'] = 0.8
|
||||
recommendations['reasoning'].append(
|
||||
"Data extraction requires high confidence in element identification"
|
||||
)
|
||||
|
||||
else:
|
||||
# Default recommendations
|
||||
recommendations['recommended_strategies'] = [RecoveryStrategy.ALL]
|
||||
recommendations['reasoning'].append(
|
||||
"General node type - all strategies recommended"
|
||||
)
|
||||
|
||||
# Parameter-specific adjustments
|
||||
if 'target' in action_parameters:
|
||||
target = action_parameters['target']
|
||||
if isinstance(target, dict) and target.get('confidence', 1.0) < 0.8:
|
||||
recommendations['confidence_threshold'] = 0.6
|
||||
recommendations['reasoning'].append(
|
||||
"Low target confidence - reduced threshold recommended"
|
||||
)
|
||||
|
||||
return recommendations
|
||||
|
||||
|
||||
# Global converter instance
|
||||
_converter_instance: Optional[SelfHealingConverter] = None
|
||||
|
||||
|
||||
def get_self_healing_converter() -> SelfHealingConverter:
|
||||
"""Get or create the global self-healing converter instance."""
|
||||
global _converter_instance
|
||||
|
||||
if _converter_instance is None:
|
||||
_converter_instance = SelfHealingConverter()
|
||||
|
||||
return _converter_instance
|
||||
@@ -0,0 +1,383 @@
|
||||
"""Self-healing integration service for Visual Workflow Builder."""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from models.self_healing_config import (
|
||||
SelfHealingConfig, RecoveryNotification, RecoveryStatistics,
|
||||
RecoveryStrategy, RecoveryMode
|
||||
)
|
||||
|
||||
# Import core self-healing components
|
||||
try:
|
||||
from core.healing.execution_integration import get_self_healing_integration
|
||||
from core.healing.models import RecoveryContext, RecoveryResult
|
||||
SELF_HEALING_AVAILABLE = True
|
||||
except ImportError:
|
||||
SELF_HEALING_AVAILABLE = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VisualWorkflowSelfHealingService:
|
||||
"""Service for integrating self-healing with visual workflows."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the self-healing service."""
|
||||
self.enabled = SELF_HEALING_AVAILABLE
|
||||
self.notifications: List[RecoveryNotification] = []
|
||||
self.statistics = RecoveryStatistics()
|
||||
|
||||
if self.enabled:
|
||||
self.core_integration = get_self_healing_integration()
|
||||
logger.info("Visual Workflow Self-Healing service initialized")
|
||||
else:
|
||||
self.core_integration = None
|
||||
logger.warning("Core self-healing not available - service disabled")
|
||||
|
||||
def configure_node_self_healing(
|
||||
self,
|
||||
node_type: str,
|
||||
existing_config: Optional[SelfHealingConfig] = None
|
||||
) -> SelfHealingConfig:
|
||||
"""
|
||||
Configure self-healing for a node based on its type.
|
||||
|
||||
Args:
|
||||
node_type: Type of the node (click, type, wait, etc.)
|
||||
existing_config: Existing configuration to merge with defaults
|
||||
|
||||
Returns:
|
||||
SelfHealingConfig for the node
|
||||
"""
|
||||
# Get default configuration for node type
|
||||
default_config = SelfHealingConfig.get_default_for_action(node_type)
|
||||
|
||||
# Merge with existing configuration if provided
|
||||
if existing_config:
|
||||
# Preserve user customizations
|
||||
config = SelfHealingConfig(
|
||||
enabled=existing_config.enabled,
|
||||
recovery_mode=existing_config.recovery_mode,
|
||||
max_attempts=existing_config.max_attempts,
|
||||
confidence_threshold=existing_config.confidence_threshold,
|
||||
enabled_strategies=existing_config.enabled_strategies,
|
||||
strategy_timeout=existing_config.strategy_timeout,
|
||||
learn_from_success=existing_config.learn_from_success,
|
||||
require_user_confirmation=existing_config.require_user_confirmation,
|
||||
stop_on_failure=existing_config.stop_on_failure,
|
||||
notify_on_recovery=existing_config.notify_on_recovery,
|
||||
notify_on_failure=existing_config.notify_on_failure
|
||||
)
|
||||
else:
|
||||
config = default_config
|
||||
|
||||
return config
|
||||
|
||||
def handle_execution_failure(
|
||||
self,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
node_config: SelfHealingConfig,
|
||||
action_info: Dict[str, Any],
|
||||
failure_info: Dict[str, Any],
|
||||
screenshot_path: str,
|
||||
attempt_count: int = 1
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Handle execution failure and attempt recovery.
|
||||
|
||||
Args:
|
||||
workflow_id: ID of the workflow
|
||||
node_id: ID of the failed node
|
||||
node_config: Self-healing configuration for the node
|
||||
action_info: Information about the failed action
|
||||
failure_info: Information about the failure
|
||||
screenshot_path: Path to screenshot at failure
|
||||
attempt_count: Current attempt number
|
||||
|
||||
Returns:
|
||||
Recovery result dictionary or None if not attempted
|
||||
"""
|
||||
if not self.enabled or not node_config.enabled:
|
||||
return None
|
||||
|
||||
# Check if we should attempt recovery
|
||||
if attempt_count > node_config.max_attempts:
|
||||
logger.info(f"Max attempts ({node_config.max_attempts}) reached for node {node_id}")
|
||||
return None
|
||||
|
||||
try:
|
||||
# Create mock execution result for core integration
|
||||
execution_result = type('ExecutionResult', (), {
|
||||
'status': type('ExecutionStatus', (), {'TARGET_NOT_FOUND': 'TARGET_NOT_FOUND'})(),
|
||||
'message': failure_info.get('message', 'Execution failed')
|
||||
})()
|
||||
|
||||
# Attempt recovery using core integration
|
||||
recovery_result = self.core_integration.handle_execution_failure(
|
||||
action_info=action_info,
|
||||
execution_result=execution_result,
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
screenshot_path=screenshot_path,
|
||||
attempt_count=attempt_count
|
||||
)
|
||||
|
||||
if recovery_result:
|
||||
# Create notification
|
||||
notification = RecoveryNotification(
|
||||
node_id=node_id,
|
||||
strategy_used=recovery_result.strategy_used,
|
||||
success=recovery_result.success,
|
||||
confidence=recovery_result.confidence_score,
|
||||
execution_time=recovery_result.execution_time,
|
||||
message=self._create_recovery_message(recovery_result),
|
||||
timestamp=datetime.now().isoformat(),
|
||||
requires_attention=recovery_result.requires_user_input
|
||||
)
|
||||
|
||||
# Add notification if configured
|
||||
if (recovery_result.success and node_config.notify_on_recovery) or \
|
||||
(not recovery_result.success and node_config.notify_on_failure):
|
||||
self.notifications.append(notification)
|
||||
|
||||
# Update statistics
|
||||
self._update_statistics(recovery_result)
|
||||
|
||||
# Return result for UI
|
||||
return {
|
||||
'success': recovery_result.success,
|
||||
'strategy_used': recovery_result.strategy_used,
|
||||
'confidence': recovery_result.confidence_score,
|
||||
'execution_time': recovery_result.execution_time,
|
||||
'new_element': recovery_result.new_element,
|
||||
'requires_user_input': recovery_result.requires_user_input,
|
||||
'message': notification.message,
|
||||
'notification': notification.to_dict()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Self-healing attempt failed: {e}")
|
||||
|
||||
# Create failure notification
|
||||
notification = RecoveryNotification(
|
||||
node_id=node_id,
|
||||
strategy_used='error',
|
||||
success=False,
|
||||
confidence=0.0,
|
||||
execution_time=0.0,
|
||||
message=f"Erreur lors de la tentative de récupération: {str(e)}",
|
||||
timestamp=datetime.now().isoformat(),
|
||||
requires_attention=True
|
||||
)
|
||||
|
||||
if node_config.notify_on_failure:
|
||||
self.notifications.append(notification)
|
||||
|
||||
return None
|
||||
|
||||
def get_recovery_suggestions(
|
||||
self,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
action_info: Dict[str, Any],
|
||||
screenshot_path: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get recovery suggestions for a node.
|
||||
|
||||
Args:
|
||||
workflow_id: ID of the workflow
|
||||
node_id: ID of the node
|
||||
action_info: Information about the action
|
||||
screenshot_path: Path to current screenshot
|
||||
|
||||
Returns:
|
||||
List of recovery suggestions
|
||||
"""
|
||||
if not self.enabled:
|
||||
return []
|
||||
|
||||
try:
|
||||
suggestions = self.core_integration.get_recovery_suggestions(
|
||||
action_info=action_info,
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
screenshot_path=screenshot_path
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
'strategy': suggestion.strategy,
|
||||
'confidence': suggestion.confidence,
|
||||
'description': suggestion.description,
|
||||
'estimated_time': suggestion.estimated_time,
|
||||
'metadata': suggestion.metadata
|
||||
}
|
||||
for suggestion in suggestions
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get recovery suggestions: {e}")
|
||||
return []
|
||||
|
||||
def get_notifications(
|
||||
self,
|
||||
workflow_id: Optional[str] = None,
|
||||
limit: int = 50
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get recovery notifications.
|
||||
|
||||
Args:
|
||||
workflow_id: Filter by workflow ID (optional)
|
||||
limit: Maximum number of notifications to return
|
||||
|
||||
Returns:
|
||||
List of notification dictionaries
|
||||
"""
|
||||
notifications = self.notifications[-limit:] # Get latest notifications
|
||||
return [n.to_dict() for n in notifications]
|
||||
|
||||
def clear_notifications(self, workflow_id: Optional[str] = None):
|
||||
"""
|
||||
Clear notifications.
|
||||
|
||||
Args:
|
||||
workflow_id: Clear notifications for specific workflow (optional)
|
||||
"""
|
||||
if workflow_id:
|
||||
# Filter out notifications for specific workflow
|
||||
# Note: We'd need to track workflow_id in notifications for this
|
||||
pass
|
||||
else:
|
||||
self.notifications.clear()
|
||||
|
||||
def get_statistics(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get recovery statistics.
|
||||
|
||||
Returns:
|
||||
Statistics dictionary
|
||||
"""
|
||||
stats = self.statistics.to_dict()
|
||||
|
||||
# Add core statistics if available
|
||||
if self.enabled and self.core_integration:
|
||||
try:
|
||||
core_stats = self.core_integration.get_statistics()
|
||||
stats.update(core_stats)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get core statistics: {e}")
|
||||
|
||||
return stats
|
||||
|
||||
def get_insights(self) -> List[str]:
|
||||
"""
|
||||
Get insights from recovery patterns.
|
||||
|
||||
Returns:
|
||||
List of insight strings
|
||||
"""
|
||||
insights = []
|
||||
|
||||
if self.statistics.total_attempts > 0:
|
||||
success_rate = self.statistics.success_rate
|
||||
if success_rate > 0.8:
|
||||
insights.append(
|
||||
f"🎯 Excellent taux de récupération: {success_rate:.1%} "
|
||||
f"({self.statistics.successful_recoveries}/{self.statistics.total_attempts})"
|
||||
)
|
||||
elif success_rate > 0.5:
|
||||
insights.append(
|
||||
f"⚠️ Taux de récupération modéré: {success_rate:.1%} - "
|
||||
"Considérez ajuster les seuils de confiance"
|
||||
)
|
||||
else:
|
||||
insights.append(
|
||||
f"❌ Taux de récupération faible: {success_rate:.1%} - "
|
||||
"Vérifiez la configuration des stratégies"
|
||||
)
|
||||
|
||||
if self.statistics.most_used_strategy:
|
||||
insights.append(
|
||||
f"🔧 Stratégie la plus efficace: {self.statistics.most_used_strategy}"
|
||||
)
|
||||
|
||||
if self.statistics.total_time_saved > 60:
|
||||
minutes_saved = self.statistics.total_time_saved / 60
|
||||
insights.append(
|
||||
f"⏱️ Temps économisé: {minutes_saved:.1f} minutes grâce à la récupération automatique"
|
||||
)
|
||||
|
||||
# Add core insights if available
|
||||
if self.enabled and self.core_integration:
|
||||
try:
|
||||
core_insights = self.core_integration.get_insights()
|
||||
insights.extend(core_insights)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get core insights: {e}")
|
||||
|
||||
return insights
|
||||
|
||||
def _create_recovery_message(self, recovery_result: 'RecoveryResult') -> str:
|
||||
"""Create a user-friendly recovery message."""
|
||||
if recovery_result.success:
|
||||
strategy_names = {
|
||||
'SemanticVariantStrategy': 'variante sémantique',
|
||||
'SpatialFallbackStrategy': 'recherche spatiale élargie',
|
||||
'TimingAdaptationStrategy': 'adaptation du délai',
|
||||
'FormatTransformationStrategy': 'transformation de format'
|
||||
}
|
||||
|
||||
strategy_name = strategy_names.get(
|
||||
recovery_result.strategy_used,
|
||||
recovery_result.strategy_used
|
||||
)
|
||||
|
||||
return (
|
||||
f"✅ Récupération réussie avec {strategy_name} "
|
||||
f"(confiance: {recovery_result.confidence_score:.1%}, "
|
||||
f"temps: {recovery_result.execution_time:.1f}s)"
|
||||
)
|
||||
else:
|
||||
return (
|
||||
f"❌ Échec de la récupération: {recovery_result.error_message or 'Raison inconnue'}"
|
||||
)
|
||||
|
||||
def _update_statistics(self, recovery_result: 'RecoveryResult'):
|
||||
"""Update recovery statistics."""
|
||||
self.statistics.total_attempts += 1
|
||||
|
||||
if recovery_result.success:
|
||||
self.statistics.successful_recoveries += 1
|
||||
self.statistics.total_time_saved += recovery_result.execution_time
|
||||
|
||||
# Update average confidence
|
||||
total_confidence = (
|
||||
self.statistics.average_confidence * (self.statistics.successful_recoveries - 1) +
|
||||
recovery_result.confidence_score
|
||||
)
|
||||
self.statistics.average_confidence = total_confidence / self.statistics.successful_recoveries
|
||||
|
||||
# Update most used strategy
|
||||
if not self.statistics.most_used_strategy:
|
||||
self.statistics.most_used_strategy = recovery_result.strategy_used
|
||||
else:
|
||||
self.statistics.failed_recoveries += 1
|
||||
|
||||
|
||||
# Global service instance
|
||||
_service_instance: Optional[VisualWorkflowSelfHealingService] = None
|
||||
|
||||
|
||||
def get_self_healing_service() -> VisualWorkflowSelfHealingService:
|
||||
"""Get or create the global self-healing service instance."""
|
||||
global _service_instance
|
||||
|
||||
if _service_instance is None:
|
||||
_service_instance = VisualWorkflowSelfHealingService()
|
||||
|
||||
return _service_instance
|
||||
188
visual_workflow_builder/backend/services/serialization.py
Normal file
188
visual_workflow_builder/backend/services/serialization.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""backend/services/serialization.py
|
||||
|
||||
Persistance simple (JSON/YAML) pour les workflows.
|
||||
|
||||
Auteur : Dom, Alice, Kiro - 08 janvier 2026
|
||||
|
||||
Patch #1:
|
||||
- Fournit WorkflowDatabase + WorkflowSerializer utilisés par api/workflows.py
|
||||
- Stockage fichier: un workflow = un fichier <id>.json dans un répertoire
|
||||
|
||||
Design:
|
||||
- Permissif: on conserve les champs inconnus via models.py
|
||||
- Robuste: erreurs encapsulées, pas de crash au boot
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
import yaml # PyYAML
|
||||
|
||||
from models import VisualWorkflow, generate_id, WorkflowSettings
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class SerializationError(Exception):
|
||||
"""Erreur de sérialisation."""
|
||||
pass
|
||||
|
||||
|
||||
class ValidationError(Exception):
|
||||
"""Erreur de validation pour les workflows sérialisés."""
|
||||
|
||||
def __init__(self, errors: List[str]):
|
||||
super().__init__(", ".join(errors))
|
||||
self.errors = errors
|
||||
|
||||
|
||||
def create_empty_workflow(name: str, description: str, created_by: str) -> VisualWorkflow:
|
||||
"""Crée un workflow vide avec les paramètres de base."""
|
||||
now = datetime.now()
|
||||
return VisualWorkflow(
|
||||
id=WorkflowSerializer.generate_workflow_id(),
|
||||
name=name,
|
||||
description=description,
|
||||
version="1.0.0",
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
created_by=created_by,
|
||||
nodes=[],
|
||||
edges=[],
|
||||
variables=[],
|
||||
settings=WorkflowSettings(),
|
||||
)
|
||||
|
||||
|
||||
class WorkflowSerializer:
|
||||
"""Sérialise/désérialise les workflows vers/depuis JSON ou YAML."""
|
||||
|
||||
@staticmethod
|
||||
def generate_workflow_id() -> str:
|
||||
"""Génère un identifiant unique pour un workflow."""
|
||||
return generate_id("wf")
|
||||
|
||||
@staticmethod
|
||||
def serialize(workflow: VisualWorkflow, format: str = "json") -> str:
|
||||
"""Sérialise un workflow vers une chaîne JSON ou YAML."""
|
||||
try:
|
||||
data = workflow.to_dict()
|
||||
fmt = (format or "json").lower()
|
||||
if fmt == "json":
|
||||
return json.dumps(data, ensure_ascii=False, indent=2)
|
||||
if fmt in ("yml", "yaml"):
|
||||
return yaml.safe_dump(data, allow_unicode=True, sort_keys=False)
|
||||
raise SerializationError(f"Format non supporté: {format}")
|
||||
except Exception as e:
|
||||
raise SerializationError(str(e)) from e
|
||||
|
||||
@staticmethod
|
||||
def deserialize(raw: Any, format: str = "json") -> VisualWorkflow:
|
||||
"""Désérialise un workflow depuis une chaîne JSON ou YAML."""
|
||||
try:
|
||||
fmt = (format or "json").lower()
|
||||
|
||||
if isinstance(raw, (bytes, bytearray)):
|
||||
raw = raw.decode("utf-8", errors="replace")
|
||||
|
||||
if isinstance(raw, str):
|
||||
if fmt == "json":
|
||||
data = json.loads(raw)
|
||||
elif fmt in ("yml", "yaml"):
|
||||
data = yaml.safe_load(raw)
|
||||
else:
|
||||
raise SerializationError(f"Format non supporté: {format}")
|
||||
elif isinstance(raw, dict):
|
||||
data = raw
|
||||
else:
|
||||
raise SerializationError("Type d'entrée invalide pour la désérialisation")
|
||||
|
||||
wf = VisualWorkflow.from_dict(data)
|
||||
errors = wf.validate()
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
return wf
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise SerializationError(str(e)) from e
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkflowDatabase:
|
||||
"""Stockage de workflows basé sur des fichiers."""
|
||||
|
||||
root_dir: str
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Crée le répertoire de stockage s'il n'existe pas."""
|
||||
os.makedirs(self.root_dir, exist_ok=True)
|
||||
|
||||
def _path(self, workflow_id: str) -> str:
|
||||
"""Retourne le chemin du fichier pour un workflow donné."""
|
||||
safe_id = "".join(c for c in workflow_id if c.isalnum() or c in ("_", "-")) or workflow_id
|
||||
return os.path.join(self.root_dir, f"{safe_id}.json")
|
||||
|
||||
def exists(self, workflow_id: str) -> bool:
|
||||
"""Vérifie si un workflow existe."""
|
||||
return os.path.exists(self._path(workflow_id))
|
||||
|
||||
def save(self, workflow: VisualWorkflow) -> None:
|
||||
"""Sauvegarde un workflow sur disque."""
|
||||
try:
|
||||
payload = WorkflowSerializer.serialize(workflow, format="json")
|
||||
with open(self._path(workflow.id), "w", encoding="utf-8") as f:
|
||||
f.write(payload)
|
||||
except Exception as e:
|
||||
raise SerializationError(f"Échec de la sauvegarde du workflow: {e}") from e
|
||||
|
||||
def load(self, workflow_id: str) -> Optional[VisualWorkflow]:
|
||||
"""Charge un workflow depuis le disque."""
|
||||
path = self._path(workflow_id)
|
||||
if not os.path.exists(path):
|
||||
return None
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
raw = f.read()
|
||||
return WorkflowSerializer.deserialize(raw, format="json")
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise SerializationError(f"Échec du chargement du workflow '{workflow_id}': {e}") from e
|
||||
|
||||
def delete(self, workflow_id: str) -> None:
|
||||
"""Supprime un workflow du disque."""
|
||||
path = self._path(workflow_id)
|
||||
try:
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
except Exception as e:
|
||||
raise SerializationError(f"Échec de la suppression du workflow '{workflow_id}': {e}") from e
|
||||
|
||||
def list(self) -> List[VisualWorkflow]:
|
||||
"""Liste tous les workflows disponibles.
|
||||
|
||||
Les fichiers invalides sont ignorés silencieusement pour éviter
|
||||
de bloquer le chargement de tous les workflows.
|
||||
"""
|
||||
workflows: List[VisualWorkflow] = []
|
||||
if not os.path.isdir(self.root_dir):
|
||||
return []
|
||||
for fname in os.listdir(self.root_dir):
|
||||
if not fname.endswith(".json"):
|
||||
continue
|
||||
wf_id = fname[:-5]
|
||||
try:
|
||||
wf = self.load(wf_id)
|
||||
if wf is not None:
|
||||
workflows.append(wf)
|
||||
except (SerializationError, ValidationError) as e:
|
||||
# Ignorer les fichiers invalides et continuer
|
||||
print(f"⚠️ Workflow ignoré '{wf_id}': {e}")
|
||||
except Exception as e:
|
||||
# Autres erreurs - les ignorer aussi pour ne pas bloquer
|
||||
print(f"⚠️ Erreur inattendue pour '{wf_id}': {e}")
|
||||
return workflows
|
||||
941
visual_workflow_builder/backend/services/template_service.py
Normal file
941
visual_workflow_builder/backend/services/template_service.py
Normal file
@@ -0,0 +1,941 @@
|
||||
"""
|
||||
Template Service
|
||||
|
||||
Handles template storage, retrieval, and instantiation.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from models.template import WorkflowTemplate, TemplateParameter, TemplateDifficulty, generate_template_id
|
||||
from models.visual_workflow import VisualWorkflow, ParameterType
|
||||
|
||||
|
||||
class TemplateService:
|
||||
"""Service for managing workflow templates"""
|
||||
|
||||
def __init__(self, data_dir: str = "data/templates"):
|
||||
self.data_dir = data_dir
|
||||
self.templates_file = os.path.join(data_dir, "templates.json")
|
||||
self._ensure_data_dir()
|
||||
self._load_default_templates()
|
||||
|
||||
def _ensure_data_dir(self):
|
||||
"""Ensure the data directory exists"""
|
||||
os.makedirs(self.data_dir, exist_ok=True)
|
||||
|
||||
def _load_templates(self) -> Dict[str, WorkflowTemplate]:
|
||||
"""Load all templates from storage"""
|
||||
if not os.path.exists(self.templates_file):
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(self.templates_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
return {
|
||||
template_id: WorkflowTemplate.from_dict(template_data)
|
||||
for template_id, template_data in data.items()
|
||||
}
|
||||
except (json.JSONDecodeError, KeyError, ValueError) as e:
|
||||
print(f"Error loading templates: {e}")
|
||||
return {}
|
||||
|
||||
def _save_templates(self, templates: Dict[str, WorkflowTemplate]):
|
||||
"""Save all templates to storage"""
|
||||
try:
|
||||
data = {
|
||||
template_id: template.to_dict()
|
||||
for template_id, template in templates.items()
|
||||
}
|
||||
with open(self.templates_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
print(f"Error saving templates: {e}")
|
||||
raise
|
||||
|
||||
def list_templates(self, category: Optional[str] = None, difficulty: Optional[str] = None) -> List[WorkflowTemplate]:
|
||||
"""List all templates with optional filtering"""
|
||||
templates = self._load_templates()
|
||||
result = list(templates.values())
|
||||
|
||||
# Filter by category
|
||||
if category:
|
||||
result = [t for t in result if t.category.lower() == category.lower()]
|
||||
|
||||
# Filter by difficulty
|
||||
if difficulty:
|
||||
result = [t for t in result if t.difficulty.value == difficulty.lower()]
|
||||
|
||||
# Sort by usage count (most used first), then by name
|
||||
result.sort(key=lambda t: (-t.usage_count, t.name))
|
||||
|
||||
return result
|
||||
|
||||
def get_template(self, template_id: str) -> Optional[WorkflowTemplate]:
|
||||
"""Get a specific template by ID"""
|
||||
templates = self._load_templates()
|
||||
return templates.get(template_id)
|
||||
|
||||
def create_template(self, template_data: Dict[str, Any]) -> WorkflowTemplate:
|
||||
"""Create a new template"""
|
||||
# Generate ID if not provided
|
||||
if 'id' not in template_data or not template_data['id']:
|
||||
template_data['id'] = generate_template_id()
|
||||
|
||||
# Set timestamps
|
||||
now = datetime.now()
|
||||
template_data['created_at'] = now.isoformat()
|
||||
template_data['updated_at'] = now.isoformat()
|
||||
|
||||
# Create template object
|
||||
template = WorkflowTemplate.from_dict(template_data)
|
||||
|
||||
# Validate
|
||||
errors = template.validate()
|
||||
if errors:
|
||||
raise ValueError(f"Template validation failed: {', '.join(errors)}")
|
||||
|
||||
# Save
|
||||
templates = self._load_templates()
|
||||
templates[template.id] = template
|
||||
self._save_templates(templates)
|
||||
|
||||
return template
|
||||
|
||||
def update_template(self, template_id: str, template_data: Dict[str, Any]) -> Optional[WorkflowTemplate]:
|
||||
"""Update an existing template"""
|
||||
templates = self._load_templates()
|
||||
|
||||
if template_id not in templates:
|
||||
return None
|
||||
|
||||
# Preserve ID and creation time
|
||||
template_data['id'] = template_id
|
||||
template_data['created_at'] = templates[template_id].created_at.isoformat()
|
||||
template_data['updated_at'] = datetime.now().isoformat()
|
||||
|
||||
# Create updated template
|
||||
template = WorkflowTemplate.from_dict(template_data)
|
||||
|
||||
# Validate
|
||||
errors = template.validate()
|
||||
if errors:
|
||||
raise ValueError(f"Template validation failed: {', '.join(errors)}")
|
||||
|
||||
# Save
|
||||
templates[template_id] = template
|
||||
self._save_templates(templates)
|
||||
|
||||
return template
|
||||
|
||||
def delete_template(self, template_id: str) -> bool:
|
||||
"""Delete a template"""
|
||||
templates = self._load_templates()
|
||||
|
||||
if template_id not in templates:
|
||||
return False
|
||||
|
||||
del templates[template_id]
|
||||
self._save_templates(templates)
|
||||
return True
|
||||
|
||||
def instantiate_template(self, template_id: str, parameters: Dict[str, Any],
|
||||
workflow_name: str, created_by: str = "user") -> Optional[VisualWorkflow]:
|
||||
"""Create a workflow instance from a template"""
|
||||
template = self.get_template(template_id)
|
||||
if not template:
|
||||
return None
|
||||
|
||||
# Increment usage count
|
||||
templates = self._load_templates()
|
||||
templates[template_id].usage_count += 1
|
||||
templates[template_id].updated_at = datetime.now()
|
||||
self._save_templates(templates)
|
||||
|
||||
# Create workflow instance
|
||||
return template.instantiate(parameters, workflow_name, created_by)
|
||||
|
||||
def create_template_from_workflow(self, workflow: VisualWorkflow, template_name: str,
|
||||
template_description: str, category: str,
|
||||
parameters: List[Dict[str, Any]] = None) -> WorkflowTemplate:
|
||||
"""Create a template from an existing workflow"""
|
||||
# Create template workflow (copy of original)
|
||||
template_workflow_data = workflow.to_dict()
|
||||
template_workflow_data['is_template'] = True
|
||||
template_workflow = VisualWorkflow.from_dict(template_workflow_data)
|
||||
|
||||
# Create template parameters
|
||||
template_params = []
|
||||
if parameters:
|
||||
template_params = [TemplateParameter.from_dict(p) for p in parameters]
|
||||
|
||||
# Create template
|
||||
template_data = {
|
||||
'id': generate_template_id(),
|
||||
'name': template_name,
|
||||
'description': template_description,
|
||||
'category': category,
|
||||
'workflow': template_workflow.to_dict(),
|
||||
'parameters': [p.to_dict() for p in template_params],
|
||||
'tags': workflow.tags,
|
||||
'difficulty': 'intermediate',
|
||||
'estimated_time': 10,
|
||||
'created_by': workflow.created_by
|
||||
}
|
||||
|
||||
return self.create_template(template_data)
|
||||
|
||||
def _load_default_templates(self):
|
||||
"""Load default templates if none exist"""
|
||||
templates = self._load_templates()
|
||||
if templates:
|
||||
return # Templates already exist
|
||||
|
||||
# Create default templates
|
||||
default_templates = self._create_default_templates()
|
||||
|
||||
for template in default_templates:
|
||||
templates[template.id] = template
|
||||
|
||||
self._save_templates(templates)
|
||||
|
||||
def _create_default_templates(self) -> List[WorkflowTemplate]:
|
||||
"""Create the default template set"""
|
||||
templates = []
|
||||
|
||||
# 1. Login Template
|
||||
login_template = self._create_login_template()
|
||||
templates.append(login_template)
|
||||
|
||||
# 2. Form Fill Template
|
||||
form_template = self._create_form_fill_template()
|
||||
templates.append(form_template)
|
||||
|
||||
# 3. Data Extraction Template
|
||||
extraction_template = self._create_data_extraction_template()
|
||||
templates.append(extraction_template)
|
||||
|
||||
# 4. Navigation Template
|
||||
navigation_template = self._create_navigation_template()
|
||||
templates.append(navigation_template)
|
||||
|
||||
return templates
|
||||
|
||||
def _create_login_template(self) -> WorkflowTemplate:
|
||||
"""Create the login workflow template"""
|
||||
from models.visual_workflow import (
|
||||
VisualWorkflow, VisualNode, VisualEdge, Position, Size, Port,
|
||||
Variable, WorkflowSettings
|
||||
)
|
||||
|
||||
# Create nodes
|
||||
nodes = [
|
||||
VisualNode(
|
||||
id="start",
|
||||
type="start",
|
||||
position=Position(50, 100),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Début"
|
||||
),
|
||||
VisualNode(
|
||||
id="navigate",
|
||||
type="navigate",
|
||||
position=Position(200, 100),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"url": "{{login_url}}",
|
||||
"wait_for_load": True,
|
||||
"timeout": 10000
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Naviguer vers la page de connexion"
|
||||
),
|
||||
VisualNode(
|
||||
id="username",
|
||||
type="type",
|
||||
position=Position(400, 50),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{username_selector}}",
|
||||
"text": "{{username}}",
|
||||
"clear_first": True,
|
||||
"timeout": 5000
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Saisir nom d'utilisateur"
|
||||
),
|
||||
VisualNode(
|
||||
id="password",
|
||||
type="type",
|
||||
position=Position(400, 150),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{password_selector}}",
|
||||
"text": "{{password}}",
|
||||
"clear_first": True,
|
||||
"timeout": 5000
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Saisir mot de passe"
|
||||
),
|
||||
VisualNode(
|
||||
id="login_button",
|
||||
type="click",
|
||||
position=Position(600, 100),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{login_button_selector}}",
|
||||
"timeout": 5000
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Cliquer sur Se connecter"
|
||||
),
|
||||
VisualNode(
|
||||
id="end",
|
||||
type="end",
|
||||
position=Position(800, 100),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[],
|
||||
label="Fin"
|
||||
)
|
||||
]
|
||||
|
||||
# Create edges
|
||||
edges = [
|
||||
VisualEdge("e1", "start", "navigate", "out", "in"),
|
||||
VisualEdge("e2", "navigate", "username", "out", "in"),
|
||||
VisualEdge("e3", "username", "password", "out", "in"),
|
||||
VisualEdge("e4", "password", "login_button", "out", "in"),
|
||||
VisualEdge("e5", "login_button", "end", "out", "in")
|
||||
]
|
||||
|
||||
# Create workflow
|
||||
workflow = VisualWorkflow(
|
||||
id="login_template_workflow",
|
||||
name="Modèle de Connexion",
|
||||
description="Workflow de base pour se connecter à un site web",
|
||||
version="1.0.0",
|
||||
created_at=datetime.now(),
|
||||
updated_at=datetime.now(),
|
||||
created_by="system",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
variables=[],
|
||||
settings=WorkflowSettings(),
|
||||
tags=["login", "authentication", "web"],
|
||||
category="Web Automation",
|
||||
is_template=True
|
||||
)
|
||||
|
||||
# Create template parameters
|
||||
parameters = [
|
||||
TemplateParameter(
|
||||
name="login_url",
|
||||
type=ParameterType.STRING,
|
||||
description="URL de la page de connexion",
|
||||
node_id="navigate",
|
||||
parameter_name="url",
|
||||
label="URL de connexion",
|
||||
placeholder="https://example.com/login"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="username_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur du champ nom d'utilisateur",
|
||||
node_id="username",
|
||||
parameter_name="target",
|
||||
label="Champ nom d'utilisateur",
|
||||
placeholder="input[name='username']"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="username",
|
||||
type=ParameterType.STRING,
|
||||
description="Nom d'utilisateur à saisir",
|
||||
node_id="username",
|
||||
parameter_name="text",
|
||||
label="Nom d'utilisateur",
|
||||
placeholder="votre_nom_utilisateur"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="password_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur du champ mot de passe",
|
||||
node_id="password",
|
||||
parameter_name="target",
|
||||
label="Champ mot de passe",
|
||||
placeholder="input[name='password']"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="password",
|
||||
type=ParameterType.STRING,
|
||||
description="Mot de passe à saisir",
|
||||
node_id="password",
|
||||
parameter_name="text",
|
||||
label="Mot de passe",
|
||||
placeholder="votre_mot_de_passe"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="login_button_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur du bouton de connexion",
|
||||
node_id="login_button",
|
||||
parameter_name="target",
|
||||
label="Bouton de connexion",
|
||||
placeholder="button[type='submit']"
|
||||
)
|
||||
]
|
||||
|
||||
return WorkflowTemplate(
|
||||
id="login_template",
|
||||
name="Connexion à un site web",
|
||||
description="Template pour automatiser la connexion à un site web avec nom d'utilisateur et mot de passe",
|
||||
category="Web Automation",
|
||||
workflow=workflow,
|
||||
parameters=parameters,
|
||||
tags=["login", "authentication", "web", "form"],
|
||||
difficulty=TemplateDifficulty.BEGINNER,
|
||||
estimated_time=3,
|
||||
created_by="system"
|
||||
)
|
||||
|
||||
def _create_form_fill_template(self) -> WorkflowTemplate:
|
||||
"""Create the form filling template"""
|
||||
from models.visual_workflow import (
|
||||
VisualWorkflow, VisualNode, VisualEdge, Position, Size, Port,
|
||||
Variable, WorkflowSettings
|
||||
)
|
||||
|
||||
# Create nodes for a basic form filling workflow
|
||||
nodes = [
|
||||
VisualNode(
|
||||
id="start",
|
||||
type="start",
|
||||
position=Position(50, 150),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Début"
|
||||
),
|
||||
VisualNode(
|
||||
id="navigate",
|
||||
type="navigate",
|
||||
position=Position(200, 150),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"url": "{{form_url}}",
|
||||
"wait_for_load": True
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Naviguer vers le formulaire"
|
||||
),
|
||||
VisualNode(
|
||||
id="fill_name",
|
||||
type="type",
|
||||
position=Position(400, 50),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{name_selector}}",
|
||||
"text": "{{name_value}}",
|
||||
"clear_first": True
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Remplir le nom"
|
||||
),
|
||||
VisualNode(
|
||||
id="fill_email",
|
||||
type="type",
|
||||
position=Position(400, 150),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{email_selector}}",
|
||||
"text": "{{email_value}}",
|
||||
"clear_first": True
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Remplir l'email"
|
||||
),
|
||||
VisualNode(
|
||||
id="fill_message",
|
||||
type="type",
|
||||
position=Position(400, 250),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{message_selector}}",
|
||||
"text": "{{message_value}}",
|
||||
"clear_first": True
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Remplir le message"
|
||||
),
|
||||
VisualNode(
|
||||
id="submit",
|
||||
type="click",
|
||||
position=Position(600, 150),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{submit_selector}}"
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Soumettre le formulaire"
|
||||
),
|
||||
VisualNode(
|
||||
id="end",
|
||||
type="end",
|
||||
position=Position(800, 150),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[],
|
||||
label="Fin"
|
||||
)
|
||||
]
|
||||
|
||||
edges = [
|
||||
VisualEdge("e1", "start", "navigate", "out", "in"),
|
||||
VisualEdge("e2", "navigate", "fill_name", "out", "in"),
|
||||
VisualEdge("e3", "fill_name", "fill_email", "out", "in"),
|
||||
VisualEdge("e4", "fill_email", "fill_message", "out", "in"),
|
||||
VisualEdge("e5", "fill_message", "submit", "out", "in"),
|
||||
VisualEdge("e6", "submit", "end", "out", "in")
|
||||
]
|
||||
|
||||
workflow = VisualWorkflow(
|
||||
id="form_fill_template_workflow",
|
||||
name="Modèle de Remplissage de Formulaire",
|
||||
description="Workflow pour remplir automatiquement un formulaire web",
|
||||
version="1.0.0",
|
||||
created_at=datetime.now(),
|
||||
updated_at=datetime.now(),
|
||||
created_by="system",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
variables=[],
|
||||
settings=WorkflowSettings(),
|
||||
tags=["form", "fill", "web"],
|
||||
category="Web Automation",
|
||||
is_template=True
|
||||
)
|
||||
|
||||
parameters = [
|
||||
TemplateParameter(
|
||||
name="form_url",
|
||||
type=ParameterType.STRING,
|
||||
description="URL du formulaire à remplir",
|
||||
node_id="navigate",
|
||||
parameter_name="url",
|
||||
label="URL du formulaire"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="name_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur du champ nom",
|
||||
node_id="fill_name",
|
||||
parameter_name="target",
|
||||
label="Champ nom"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="name_value",
|
||||
type=ParameterType.STRING,
|
||||
description="Valeur à saisir dans le champ nom",
|
||||
node_id="fill_name",
|
||||
parameter_name="text",
|
||||
label="Nom"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="email_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur du champ email",
|
||||
node_id="fill_email",
|
||||
parameter_name="target",
|
||||
label="Champ email"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="email_value",
|
||||
type=ParameterType.STRING,
|
||||
description="Valeur à saisir dans le champ email",
|
||||
node_id="fill_email",
|
||||
parameter_name="text",
|
||||
label="Email"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="message_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur du champ message",
|
||||
node_id="fill_message",
|
||||
parameter_name="target",
|
||||
label="Champ message"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="message_value",
|
||||
type=ParameterType.STRING,
|
||||
description="Valeur à saisir dans le champ message",
|
||||
node_id="fill_message",
|
||||
parameter_name="text",
|
||||
label="Message"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="submit_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur du bouton de soumission",
|
||||
node_id="submit",
|
||||
parameter_name="target",
|
||||
label="Bouton de soumission"
|
||||
)
|
||||
]
|
||||
|
||||
return WorkflowTemplate(
|
||||
id="form_fill_template",
|
||||
name="Remplissage de formulaire",
|
||||
description="Template pour remplir automatiquement un formulaire de contact ou d'inscription",
|
||||
category="Web Automation",
|
||||
workflow=workflow,
|
||||
parameters=parameters,
|
||||
tags=["form", "contact", "web", "automation"],
|
||||
difficulty=TemplateDifficulty.BEGINNER,
|
||||
estimated_time=5,
|
||||
created_by="system"
|
||||
)
|
||||
|
||||
def _create_data_extraction_template(self) -> WorkflowTemplate:
|
||||
"""Create the data extraction template"""
|
||||
from models.visual_workflow import (
|
||||
VisualWorkflow, VisualNode, VisualEdge, Position, Size, Port,
|
||||
Variable, WorkflowSettings
|
||||
)
|
||||
|
||||
nodes = [
|
||||
VisualNode(
|
||||
id="start",
|
||||
type="start",
|
||||
position=Position(50, 150),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Début"
|
||||
),
|
||||
VisualNode(
|
||||
id="navigate",
|
||||
type="navigate",
|
||||
position=Position(200, 150),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"url": "{{target_url}}",
|
||||
"wait_for_load": True
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Naviguer vers la page"
|
||||
),
|
||||
VisualNode(
|
||||
id="extract_title",
|
||||
type="extract",
|
||||
position=Position(400, 100),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{title_selector}}",
|
||||
"attribute": "text",
|
||||
"variable": "page_title"
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Extraire le titre"
|
||||
),
|
||||
VisualNode(
|
||||
id="extract_content",
|
||||
type="extract",
|
||||
position=Position(400, 200),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{content_selector}}",
|
||||
"attribute": "text",
|
||||
"variable": "page_content"
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Extraire le contenu"
|
||||
),
|
||||
VisualNode(
|
||||
id="save_data",
|
||||
type="save",
|
||||
position=Position(600, 150),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"filename": "{{output_file}}",
|
||||
"format": "json",
|
||||
"data": {
|
||||
"title": "${page_title}",
|
||||
"content": "${page_content}",
|
||||
"extracted_at": "${current_timestamp}"
|
||||
}
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Sauvegarder les données"
|
||||
),
|
||||
VisualNode(
|
||||
id="end",
|
||||
type="end",
|
||||
position=Position(800, 150),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[],
|
||||
label="Fin"
|
||||
)
|
||||
]
|
||||
|
||||
edges = [
|
||||
VisualEdge("e1", "start", "navigate", "out", "in"),
|
||||
VisualEdge("e2", "navigate", "extract_title", "out", "in"),
|
||||
VisualEdge("e3", "extract_title", "extract_content", "out", "in"),
|
||||
VisualEdge("e4", "extract_content", "save_data", "out", "in"),
|
||||
VisualEdge("e5", "save_data", "end", "out", "in")
|
||||
]
|
||||
|
||||
workflow = VisualWorkflow(
|
||||
id="data_extraction_template_workflow",
|
||||
name="Modèle d'Extraction de Données",
|
||||
description="Workflow pour extraire des données d'une page web",
|
||||
version="1.0.0",
|
||||
created_at=datetime.now(),
|
||||
updated_at=datetime.now(),
|
||||
created_by="system",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
variables=[
|
||||
Variable("page_title", "string", "", "Titre de la page extrait"),
|
||||
Variable("page_content", "string", "", "Contenu de la page extrait"),
|
||||
Variable("current_timestamp", "string", "", "Timestamp de l'extraction")
|
||||
],
|
||||
settings=WorkflowSettings(),
|
||||
tags=["extraction", "data", "scraping"],
|
||||
category="Data Processing",
|
||||
is_template=True
|
||||
)
|
||||
|
||||
parameters = [
|
||||
TemplateParameter(
|
||||
name="target_url",
|
||||
type=ParameterType.STRING,
|
||||
description="URL de la page à analyser",
|
||||
node_id="navigate",
|
||||
parameter_name="url",
|
||||
label="URL cible"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="title_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur de l'élément titre",
|
||||
node_id="extract_title",
|
||||
parameter_name="target",
|
||||
label="Sélecteur du titre"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="content_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur de l'élément contenu",
|
||||
node_id="extract_content",
|
||||
parameter_name="target",
|
||||
label="Sélecteur du contenu"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="output_file",
|
||||
type=ParameterType.STRING,
|
||||
description="Nom du fichier de sortie",
|
||||
node_id="save_data",
|
||||
parameter_name="filename",
|
||||
label="Fichier de sortie",
|
||||
default_value="extracted_data.json"
|
||||
)
|
||||
]
|
||||
|
||||
return WorkflowTemplate(
|
||||
id="data_extraction_template",
|
||||
name="Extraction de données web",
|
||||
description="Template pour extraire et sauvegarder des données depuis une page web",
|
||||
category="Data Processing",
|
||||
workflow=workflow,
|
||||
parameters=parameters,
|
||||
tags=["extraction", "scraping", "data", "web"],
|
||||
difficulty=TemplateDifficulty.INTERMEDIATE,
|
||||
estimated_time=8,
|
||||
created_by="system"
|
||||
)
|
||||
|
||||
def _create_navigation_template(self) -> WorkflowTemplate:
|
||||
"""Create the navigation template"""
|
||||
from models.visual_workflow import (
|
||||
VisualWorkflow, VisualNode, VisualEdge, Position, Size, Port,
|
||||
Variable, WorkflowSettings
|
||||
)
|
||||
|
||||
nodes = [
|
||||
VisualNode(
|
||||
id="start",
|
||||
type="start",
|
||||
position=Position(50, 200),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Début"
|
||||
),
|
||||
VisualNode(
|
||||
id="navigate_home",
|
||||
type="navigate",
|
||||
position=Position(200, 200),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"url": "{{home_url}}",
|
||||
"wait_for_load": True
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Page d'accueil"
|
||||
),
|
||||
VisualNode(
|
||||
id="click_menu",
|
||||
type="click",
|
||||
position=Position(400, 150),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{menu_selector}}",
|
||||
"wait_after": 1000
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Cliquer sur le menu"
|
||||
),
|
||||
VisualNode(
|
||||
id="click_submenu",
|
||||
type="click",
|
||||
position=Position(400, 250),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"target": "{{submenu_selector}}",
|
||||
"wait_after": 1000
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Cliquer sur le sous-menu"
|
||||
),
|
||||
VisualNode(
|
||||
id="wait_page_load",
|
||||
type="wait",
|
||||
position=Position(600, 200),
|
||||
size=Size(150, 80),
|
||||
parameters={
|
||||
"condition": "element_visible",
|
||||
"target": "{{target_element}}",
|
||||
"timeout": 10000
|
||||
},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[Port("out", "output", "output")],
|
||||
label="Attendre le chargement"
|
||||
),
|
||||
VisualNode(
|
||||
id="end",
|
||||
type="end",
|
||||
position=Position(800, 200),
|
||||
size=Size(100, 50),
|
||||
parameters={},
|
||||
input_ports=[Port("in", "input", "input")],
|
||||
output_ports=[],
|
||||
label="Fin"
|
||||
)
|
||||
]
|
||||
|
||||
edges = [
|
||||
VisualEdge("e1", "start", "navigate_home", "out", "in"),
|
||||
VisualEdge("e2", "navigate_home", "click_menu", "out", "in"),
|
||||
VisualEdge("e3", "click_menu", "click_submenu", "out", "in"),
|
||||
VisualEdge("e4", "click_submenu", "wait_page_load", "out", "in"),
|
||||
VisualEdge("e5", "wait_page_load", "end", "out", "in")
|
||||
]
|
||||
|
||||
workflow = VisualWorkflow(
|
||||
id="navigation_template_workflow",
|
||||
name="Modèle de Navigation",
|
||||
description="Workflow pour naviguer dans un site web avec menus",
|
||||
version="1.0.0",
|
||||
created_at=datetime.now(),
|
||||
updated_at=datetime.now(),
|
||||
created_by="system",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
variables=[],
|
||||
settings=WorkflowSettings(),
|
||||
tags=["navigation", "menu", "web"],
|
||||
category="Web Automation",
|
||||
is_template=True
|
||||
)
|
||||
|
||||
parameters = [
|
||||
TemplateParameter(
|
||||
name="home_url",
|
||||
type=ParameterType.STRING,
|
||||
description="URL de la page d'accueil",
|
||||
node_id="navigate_home",
|
||||
parameter_name="url",
|
||||
label="URL d'accueil"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="menu_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur de l'élément de menu principal",
|
||||
node_id="click_menu",
|
||||
parameter_name="target",
|
||||
label="Menu principal"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="submenu_selector",
|
||||
type=ParameterType.TARGET,
|
||||
description="Sélecteur de l'élément de sous-menu",
|
||||
node_id="click_submenu",
|
||||
parameter_name="target",
|
||||
label="Sous-menu"
|
||||
),
|
||||
TemplateParameter(
|
||||
name="target_element",
|
||||
type=ParameterType.TARGET,
|
||||
description="Élément à attendre sur la page de destination",
|
||||
node_id="wait_page_load",
|
||||
parameter_name="target",
|
||||
label="Élément de destination"
|
||||
)
|
||||
]
|
||||
|
||||
return WorkflowTemplate(
|
||||
id="navigation_template",
|
||||
name="Navigation avec menus",
|
||||
description="Template pour naviguer dans un site web en utilisant les menus déroulants",
|
||||
category="Web Automation",
|
||||
workflow=workflow,
|
||||
parameters=parameters,
|
||||
tags=["navigation", "menu", "web", "click"],
|
||||
difficulty=TemplateDifficulty.BEGINNER,
|
||||
estimated_time=4,
|
||||
created_by="system"
|
||||
)
|
||||
@@ -0,0 +1,204 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Service de Capture d'Écran Thread-Safe - RPA Vision V3
|
||||
Auteur : Dom, Alice, Kiro - 09 janvier 2026
|
||||
|
||||
Service de capture d'écran thread-safe pour résoudre les problèmes
|
||||
de threading avec Flask et mss.
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import base64
|
||||
import io
|
||||
import threading
|
||||
from PIL import Image
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ThreadSafeScreenCapturer:
|
||||
"""
|
||||
Capturer d'écran thread-safe qui crée une instance mss locale
|
||||
pour chaque thread.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.method = None
|
||||
self._thread_local = threading.local()
|
||||
self._init_capture_method()
|
||||
|
||||
def _init_capture_method(self):
|
||||
"""Détermine la méthode de capture disponible."""
|
||||
try:
|
||||
import mss
|
||||
self.method = "mss"
|
||||
logger.info("✅ mss disponible - utilisation pour capture thread-safe")
|
||||
except ImportError:
|
||||
try:
|
||||
import pyautogui
|
||||
self.method = "pyautogui"
|
||||
logger.info("✅ pyautogui disponible - utilisation pour capture thread-safe")
|
||||
except ImportError:
|
||||
logger.error("❌ Aucune méthode de capture disponible")
|
||||
raise ImportError("Ni mss ni pyautogui disponibles")
|
||||
|
||||
def _get_thread_capturer(self):
|
||||
"""Obtient ou crée un capturer pour le thread actuel."""
|
||||
if not hasattr(self._thread_local, 'capturer'):
|
||||
if self.method == "mss":
|
||||
import mss
|
||||
self._thread_local.capturer = mss.mss()
|
||||
logger.debug(f"Nouvelle instance mss créée pour thread {threading.current_thread().ident}")
|
||||
else:
|
||||
import pyautogui
|
||||
self._thread_local.capturer = pyautogui
|
||||
logger.debug(f"pyautogui configuré pour thread {threading.current_thread().ident}")
|
||||
|
||||
return self._thread_local.capturer
|
||||
|
||||
def capture_screen(self) -> Optional[np.ndarray]:
|
||||
"""
|
||||
Capture l'écran de manière thread-safe.
|
||||
|
||||
Returns:
|
||||
Screenshot as numpy array (H, W, 3) RGB ou None si erreur
|
||||
"""
|
||||
try:
|
||||
capturer = self._get_thread_capturer()
|
||||
|
||||
if self.method == "mss":
|
||||
return self._capture_mss(capturer)
|
||||
else:
|
||||
return self._capture_pyautogui(capturer)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur capture thread-safe: {e}")
|
||||
return None
|
||||
|
||||
def _capture_mss(self, sct) -> np.ndarray:
|
||||
"""Capture avec mss thread-safe."""
|
||||
# Utiliser le moniteur principal (index 1)
|
||||
monitor_idx = 1 if len(sct.monitors) > 1 else 0
|
||||
monitor = sct.monitors[monitor_idx]
|
||||
|
||||
# Capturer
|
||||
sct_img = sct.grab(monitor)
|
||||
|
||||
# Convertir en numpy array
|
||||
img = np.array(sct_img)
|
||||
|
||||
# Convertir BGRA vers RGB
|
||||
if img.shape[2] == 4:
|
||||
img = img[:, :, :3][:, :, ::-1] # BGRA to RGB
|
||||
elif img.shape[2] == 3:
|
||||
img = img[:, :, ::-1] # BGR to RGB
|
||||
|
||||
if img.size == 0 or img.shape[0] == 0 or img.shape[1] == 0:
|
||||
raise ValueError("Image capturée a des dimensions invalides")
|
||||
|
||||
return img
|
||||
|
||||
def _capture_pyautogui(self, pyautogui) -> np.ndarray:
|
||||
"""Capture avec pyautogui."""
|
||||
screenshot = pyautogui.screenshot()
|
||||
img = np.array(screenshot)
|
||||
|
||||
if img.size == 0 or img.shape[0] == 0 or img.shape[1] == 0:
|
||||
raise ValueError("Image capturée a des dimensions invalides")
|
||||
|
||||
return img
|
||||
|
||||
def capture_to_base64(self, format='PNG', quality=90) -> Dict[str, any]:
|
||||
"""
|
||||
Capture l'écran et retourne l'image en base64.
|
||||
|
||||
Args:
|
||||
format: Format de l'image ('PNG' ou 'JPEG')
|
||||
quality: Qualité pour JPEG (1-100)
|
||||
|
||||
Returns:
|
||||
Dict avec 'success', 'screenshot' (base64), 'width', 'height', ou 'error'
|
||||
"""
|
||||
try:
|
||||
# Capturer l'écran
|
||||
img_array = self.capture_screen()
|
||||
if img_array is None:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Échec de la capture d\'écran'
|
||||
}
|
||||
|
||||
# Convertir en PIL Image
|
||||
pil_image = Image.fromarray(img_array)
|
||||
|
||||
# Convertir en base64
|
||||
buffer = io.BytesIO()
|
||||
if format.upper() == 'JPEG':
|
||||
pil_image.save(buffer, format='JPEG', quality=quality, optimize=True)
|
||||
else:
|
||||
pil_image.save(buffer, format='PNG', optimize=True)
|
||||
|
||||
buffer.seek(0)
|
||||
screenshot_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'screenshot': screenshot_base64,
|
||||
'width': pil_image.width,
|
||||
'height': pil_image.height,
|
||||
'format': format,
|
||||
'thread_id': threading.current_thread().ident
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur capture_to_base64: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Erreur lors de la capture: {str(e)}'
|
||||
}
|
||||
|
||||
def get_screen_info(self) -> Dict[str, any]:
|
||||
"""Retourne les informations sur l'écran."""
|
||||
try:
|
||||
capturer = self._get_thread_capturer()
|
||||
|
||||
if self.method == "mss":
|
||||
monitors = capturer.monitors
|
||||
return {
|
||||
'method': 'mss',
|
||||
'monitors_count': len(monitors),
|
||||
'primary_monitor': monitors[1] if len(monitors) > 1 else monitors[0],
|
||||
'all_monitors': monitors
|
||||
}
|
||||
else:
|
||||
import pyautogui
|
||||
size = pyautogui.size()
|
||||
return {
|
||||
'method': 'pyautogui',
|
||||
'width': size.width,
|
||||
'height': size.height
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur get_screen_info: {e}")
|
||||
return {
|
||||
'method': self.method,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
def cleanup_thread(self):
|
||||
"""Nettoie les ressources du thread actuel."""
|
||||
if hasattr(self._thread_local, 'capturer'):
|
||||
if self.method == "mss":
|
||||
try:
|
||||
self._thread_local.capturer.close()
|
||||
except Exception as e:
|
||||
logger.debug(f"Erreur cleanup mss: {e}")
|
||||
delattr(self._thread_local, 'capturer')
|
||||
logger.debug(f"Ressources nettoyées pour thread {threading.current_thread().ident}")
|
||||
|
||||
# Instance globale thread-safe
|
||||
thread_safe_capturer = ThreadSafeScreenCapturer()
|
||||
452
visual_workflow_builder/backend/services/workflow_matcher.py
Normal file
452
visual_workflow_builder/backend/services/workflow_matcher.py
Normal file
@@ -0,0 +1,452 @@
|
||||
"""
|
||||
VWB Workflow Matcher - Intégration du SemanticMatcher avec les workflows VWB
|
||||
|
||||
Ce service permet de :
|
||||
- Charger les workflows VWB depuis la base de données SQLite
|
||||
- Trouver le workflow correspondant à une commande en langage naturel
|
||||
- Utiliser les métadonnées (description, tags, triggerExamples) pour le matching
|
||||
|
||||
Usage:
|
||||
matcher = VWBWorkflowMatcher()
|
||||
result = matcher.find_workflow("créer une facture pour le client Acme")
|
||||
if result:
|
||||
print(f"Workflow trouvé: {result.workflow_name} (confiance: {result.confidence})")
|
||||
"""
|
||||
|
||||
import re
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
from dataclasses import dataclass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkflowMatch:
|
||||
"""Résultat d'un matching de workflow."""
|
||||
workflow_id: str
|
||||
workflow_name: str
|
||||
confidence: float
|
||||
extracted_params: Dict[str, str]
|
||||
match_reasons: List[str]
|
||||
description: str = ""
|
||||
tags: List[str] = None
|
||||
step_count: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkflowInfo:
|
||||
"""Informations d'un workflow pour le matching."""
|
||||
workflow_id: str
|
||||
name: str
|
||||
description: str
|
||||
tags: List[str]
|
||||
trigger_examples: List[str]
|
||||
keywords: List[str]
|
||||
step_count: int
|
||||
|
||||
|
||||
class VWBWorkflowMatcher:
|
||||
"""
|
||||
Matcher sémantique pour les workflows VWB.
|
||||
|
||||
Stratégies de matching (par ordre de priorité) :
|
||||
1. Matching exact des trigger_examples (0.9)
|
||||
2. Matching exact du nom (0.5)
|
||||
3. Matching des tags (0.3 par tag)
|
||||
4. Matching des mots-clés (Jaccard similarity * 0.4)
|
||||
5. Matching de la description (0.2)
|
||||
|
||||
Les scores sont cumulatifs et normalisés à 1.0 max.
|
||||
"""
|
||||
|
||||
# Stop words français et anglais
|
||||
STOP_WORDS = {
|
||||
'le', 'la', 'les', 'un', 'une', 'des', 'de', 'du', 'à', 'au', 'aux',
|
||||
'et', 'ou', 'mais', 'donc', 'or', 'ni', 'car', 'que', 'qui', 'quoi',
|
||||
'ce', 'cette', 'ces', 'mon', 'ma', 'mes', 'ton', 'ta', 'tes', 'son',
|
||||
'sa', 'ses', 'notre', 'votre', 'leur', 'leurs', 'je', 'tu', 'il',
|
||||
'elle', 'nous', 'vous', 'ils', 'elles', 'on', 'se', 'en', 'y',
|
||||
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
|
||||
'of', 'with', 'by', 'from', 'is', 'are', 'was', 'were', 'be', 'been',
|
||||
'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
|
||||
'should', 'may', 'might', 'must', 'shall', 'can', 'need', 'dare',
|
||||
'faire', 'fait', 'fais', 'veux', 'veut', 'peux', 'peut', 'dois', 'doit'
|
||||
}
|
||||
|
||||
def __init__(self, app=None):
|
||||
"""
|
||||
Initialiser le matcher.
|
||||
|
||||
Args:
|
||||
app: Application Flask (optionnel, pour accès à la DB)
|
||||
"""
|
||||
self.app = app
|
||||
self._workflows: Dict[str, WorkflowInfo] = {}
|
||||
self._loaded = False
|
||||
|
||||
def _ensure_loaded(self) -> None:
|
||||
"""S'assurer que les workflows sont chargés."""
|
||||
if not self._loaded:
|
||||
self.reload_workflows()
|
||||
|
||||
def reload_workflows(self) -> int:
|
||||
"""
|
||||
Recharger les workflows depuis la base de données.
|
||||
|
||||
Returns:
|
||||
Nombre de workflows chargés
|
||||
"""
|
||||
self._workflows.clear()
|
||||
|
||||
try:
|
||||
# Import ici pour éviter les imports circulaires
|
||||
from db.models import Workflow
|
||||
|
||||
workflows = Workflow.query.filter_by(is_active=True).all()
|
||||
|
||||
for wf in workflows:
|
||||
# Extraire les mots-clés
|
||||
keywords = self._extract_keywords(wf.name, wf.description, wf.tags)
|
||||
|
||||
info = WorkflowInfo(
|
||||
workflow_id=wf.id,
|
||||
name=wf.name,
|
||||
description=wf.description or "",
|
||||
tags=wf.tags or [],
|
||||
trigger_examples=wf.trigger_examples or [],
|
||||
keywords=keywords,
|
||||
step_count=wf.steps.count()
|
||||
)
|
||||
|
||||
self._workflows[wf.id] = info
|
||||
logger.debug(f"Loaded workflow: {wf.name} (tags: {wf.tags}, triggers: {len(wf.trigger_examples or [])})")
|
||||
|
||||
self._loaded = True
|
||||
logger.info(f"VWBWorkflowMatcher: {len(self._workflows)} workflows chargés")
|
||||
return len(self._workflows)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur chargement workflows: {e}")
|
||||
self._loaded = True # Marquer comme chargé même en cas d'erreur
|
||||
return 0
|
||||
|
||||
def _extract_keywords(self, name: str, description: str, tags: List[str]) -> List[str]:
|
||||
"""Extraire les mots-clés d'un workflow."""
|
||||
keywords = set()
|
||||
|
||||
# Tokeniser le nom
|
||||
keywords.update(self._tokenize(name))
|
||||
|
||||
# Tokeniser la description
|
||||
if description:
|
||||
keywords.update(self._tokenize(description))
|
||||
|
||||
# Ajouter les tags (en minuscules)
|
||||
if tags:
|
||||
keywords.update(t.lower() for t in tags)
|
||||
|
||||
return list(keywords)
|
||||
|
||||
def _tokenize(self, text: str) -> List[str]:
|
||||
"""Tokeniser un texte en mots-clés."""
|
||||
if not text:
|
||||
return []
|
||||
|
||||
# Normaliser
|
||||
text = text.lower()
|
||||
|
||||
# Supprimer la ponctuation
|
||||
text = re.sub(r'[^\w\s]', ' ', text)
|
||||
|
||||
# Découper en mots
|
||||
words = text.split()
|
||||
|
||||
# Filtrer les mots courts et les stop words
|
||||
return [w for w in words if len(w) > 2 and w not in self.STOP_WORDS]
|
||||
|
||||
def find_workflow(
|
||||
self,
|
||||
command: str,
|
||||
min_confidence: float = 0.3
|
||||
) -> Optional[WorkflowMatch]:
|
||||
"""
|
||||
Trouver le meilleur workflow correspondant à une commande.
|
||||
|
||||
Args:
|
||||
command: Commande en langage naturel
|
||||
min_confidence: Confiance minimale requise (0-1)
|
||||
|
||||
Returns:
|
||||
WorkflowMatch ou None si aucun match
|
||||
"""
|
||||
matches = self.find_workflows(command, limit=1, min_confidence=min_confidence)
|
||||
return matches[0] if matches else None
|
||||
|
||||
def find_workflows(
|
||||
self,
|
||||
command: str,
|
||||
limit: int = 5,
|
||||
min_confidence: float = 0.3
|
||||
) -> List[WorkflowMatch]:
|
||||
"""
|
||||
Trouver les workflows correspondant à une commande.
|
||||
|
||||
Args:
|
||||
command: Commande en langage naturel
|
||||
limit: Nombre max de résultats
|
||||
min_confidence: Confiance minimale requise
|
||||
|
||||
Returns:
|
||||
Liste de WorkflowMatch triés par confiance décroissante
|
||||
"""
|
||||
self._ensure_loaded()
|
||||
|
||||
if not self._workflows:
|
||||
logger.warning("Aucun workflow chargé")
|
||||
return []
|
||||
|
||||
command_lower = command.lower().strip()
|
||||
command_tokens = set(self._tokenize(command))
|
||||
|
||||
matches = []
|
||||
|
||||
for workflow_id, info in self._workflows.items():
|
||||
score, reasons, params = self._calculate_match_score(
|
||||
command_lower, command_tokens, info
|
||||
)
|
||||
|
||||
if score >= min_confidence:
|
||||
matches.append(WorkflowMatch(
|
||||
workflow_id=workflow_id,
|
||||
workflow_name=info.name,
|
||||
confidence=round(score, 3),
|
||||
extracted_params=params,
|
||||
match_reasons=reasons,
|
||||
description=info.description,
|
||||
tags=info.tags,
|
||||
step_count=info.step_count
|
||||
))
|
||||
|
||||
# Trier par confiance décroissante
|
||||
matches.sort(key=lambda m: m.confidence, reverse=True)
|
||||
|
||||
return matches[:limit]
|
||||
|
||||
def _calculate_match_score(
|
||||
self,
|
||||
command: str,
|
||||
command_tokens: set,
|
||||
info: WorkflowInfo
|
||||
) -> Tuple[float, List[str], Dict[str, str]]:
|
||||
"""
|
||||
Calculer le score de matching entre une commande et un workflow.
|
||||
|
||||
Returns:
|
||||
(score, reasons, extracted_params)
|
||||
"""
|
||||
score = 0.0
|
||||
reasons = []
|
||||
params = {}
|
||||
|
||||
# 1. Matching exact des trigger_examples (score le plus élevé)
|
||||
for example in info.trigger_examples:
|
||||
example_lower = example.lower().strip()
|
||||
if example_lower in command or command in example_lower:
|
||||
score += 0.9
|
||||
reasons.append(f"trigger_example_exact:{example}")
|
||||
break
|
||||
# Matching partiel des exemples
|
||||
example_tokens = set(self._tokenize(example))
|
||||
if example_tokens and command_tokens:
|
||||
overlap = len(example_tokens & command_tokens) / len(example_tokens)
|
||||
if overlap > 0.7:
|
||||
score += 0.6 * overlap
|
||||
reasons.append(f"trigger_example_partial:{example}")
|
||||
break
|
||||
|
||||
# 2. Matching exact du nom
|
||||
name_lower = info.name.lower()
|
||||
if name_lower in command:
|
||||
score += 0.5
|
||||
reasons.append("exact_name")
|
||||
elif any(word in command for word in name_lower.split()):
|
||||
score += 0.2
|
||||
reasons.append("partial_name")
|
||||
|
||||
# 3. Matching des tags
|
||||
matched_tags = []
|
||||
for tag in info.tags:
|
||||
tag_lower = tag.lower()
|
||||
if tag_lower in command:
|
||||
score += 0.3
|
||||
matched_tags.append(tag)
|
||||
if matched_tags:
|
||||
reasons.append(f"tags:{','.join(matched_tags)}")
|
||||
|
||||
# 4. Matching des mots-clés (Jaccard similarity)
|
||||
workflow_tokens = set(info.keywords)
|
||||
if workflow_tokens and command_tokens:
|
||||
intersection = command_tokens & workflow_tokens
|
||||
union = command_tokens | workflow_tokens
|
||||
jaccard = len(intersection) / len(union) if union else 0
|
||||
score += jaccard * 0.4
|
||||
if intersection:
|
||||
reasons.append(f"keywords:{','.join(list(intersection)[:5])}")
|
||||
|
||||
# 5. Matching de la description
|
||||
if info.description:
|
||||
desc_tokens = set(self._tokenize(info.description))
|
||||
if desc_tokens and command_tokens:
|
||||
intersection = command_tokens & desc_tokens
|
||||
if intersection:
|
||||
score += 0.2 * (len(intersection) / len(desc_tokens))
|
||||
reasons.append("description_match")
|
||||
|
||||
# 6. Extraction des paramètres
|
||||
params = self._extract_params(command)
|
||||
if params:
|
||||
score += 0.05
|
||||
reasons.append(f"params:{','.join(params.keys())}")
|
||||
|
||||
# Normaliser le score (max 1.0)
|
||||
score = min(score, 1.0)
|
||||
|
||||
return score, reasons, params
|
||||
|
||||
def _extract_params(self, command: str) -> Dict[str, str]:
|
||||
"""
|
||||
Extraire les paramètres d'une commande.
|
||||
|
||||
Utilise des heuristiques pour extraire les valeurs.
|
||||
"""
|
||||
params = {}
|
||||
|
||||
# Pattern: "client X" ou "customer X"
|
||||
client_match = re.search(r'(?:client|customer|compte)\s+([A-Za-zÀ-ÿ0-9_\-]+)', command, re.IGNORECASE)
|
||||
if client_match:
|
||||
params['client'] = client_match.group(1)
|
||||
|
||||
# Pattern: "facture N" ou "invoice N"
|
||||
invoice_match = re.search(r'(?:facture|invoice|commande|order)\s+([A-Za-z0-9_\-]+)', command, re.IGNORECASE)
|
||||
if invoice_match:
|
||||
params['invoice'] = invoice_match.group(1)
|
||||
|
||||
# Pattern: "de X à Y" ou "from X to Y"
|
||||
range_match = re.search(r'(?:de|from)\s+(\w+)\s+(?:à|to)\s+(\w+)', command, re.IGNORECASE)
|
||||
if range_match:
|
||||
params['start'] = range_match.group(1)
|
||||
params['end'] = range_match.group(2)
|
||||
|
||||
# Pattern: valeurs entre guillemets
|
||||
quoted_values = re.findall(r'"([^"]+)"', command)
|
||||
for i, value in enumerate(quoted_values):
|
||||
params[f'value{i}'] = value
|
||||
|
||||
# Pattern: nombres
|
||||
numbers = re.findall(r'\b(\d+(?:[.,]\d+)?)\b', command)
|
||||
for i, num in enumerate(numbers[:3]): # Max 3 nombres
|
||||
params[f'number{i}'] = num
|
||||
|
||||
return params
|
||||
|
||||
def suggest_workflows(self, partial: str, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Suggérer des workflows basés sur une entrée partielle.
|
||||
|
||||
Args:
|
||||
partial: Texte partiel saisi
|
||||
limit: Nombre max de suggestions
|
||||
|
||||
Returns:
|
||||
Liste de suggestions avec id, name, description
|
||||
"""
|
||||
self._ensure_loaded()
|
||||
|
||||
suggestions = []
|
||||
partial_lower = partial.lower().strip()
|
||||
partial_tokens = set(self._tokenize(partial))
|
||||
|
||||
for info in self._workflows.values():
|
||||
score = 0
|
||||
|
||||
# Match sur le nom
|
||||
if info.name.lower().startswith(partial_lower):
|
||||
score = 1.0
|
||||
elif partial_lower in info.name.lower():
|
||||
score = 0.8
|
||||
|
||||
# Match sur les tags
|
||||
for tag in info.tags:
|
||||
if partial_lower in tag.lower():
|
||||
score = max(score, 0.7)
|
||||
|
||||
# Match sur les trigger_examples
|
||||
for example in info.trigger_examples:
|
||||
if partial_lower in example.lower():
|
||||
score = max(score, 0.9)
|
||||
|
||||
# Match sur les mots-clés
|
||||
if partial_tokens:
|
||||
keyword_match = partial_tokens & set(info.keywords)
|
||||
if keyword_match:
|
||||
score = max(score, 0.5 * len(keyword_match) / len(partial_tokens))
|
||||
|
||||
if score > 0:
|
||||
suggestions.append({
|
||||
'id': info.workflow_id,
|
||||
'name': info.name,
|
||||
'description': info.description[:100] + '...' if len(info.description) > 100 else info.description,
|
||||
'tags': info.tags,
|
||||
'score': score
|
||||
})
|
||||
|
||||
# Trier par score
|
||||
suggestions.sort(key=lambda x: x['score'], reverse=True)
|
||||
|
||||
# Retirer le score du résultat
|
||||
for s in suggestions:
|
||||
del s['score']
|
||||
|
||||
return suggestions[:limit]
|
||||
|
||||
def get_workflow_info(self, workflow_id: str) -> Optional[WorkflowInfo]:
|
||||
"""Obtenir les infos d'un workflow."""
|
||||
self._ensure_loaded()
|
||||
return self._workflows.get(workflow_id)
|
||||
|
||||
def get_all_workflows(self) -> List[WorkflowInfo]:
|
||||
"""Obtenir tous les workflows."""
|
||||
self._ensure_loaded()
|
||||
return list(self._workflows.values())
|
||||
|
||||
def workflow_count(self) -> int:
|
||||
"""Nombre de workflows chargés."""
|
||||
self._ensure_loaded()
|
||||
return len(self._workflows)
|
||||
|
||||
|
||||
# Instance globale (singleton)
|
||||
_matcher_instance: Optional[VWBWorkflowMatcher] = None
|
||||
|
||||
|
||||
def get_workflow_matcher() -> VWBWorkflowMatcher:
|
||||
"""Obtenir l'instance globale du matcher."""
|
||||
global _matcher_instance
|
||||
if _matcher_instance is None:
|
||||
_matcher_instance = VWBWorkflowMatcher()
|
||||
return _matcher_instance
|
||||
|
||||
|
||||
def find_matching_workflow(command: str, min_confidence: float = 0.3) -> Optional[WorkflowMatch]:
|
||||
"""
|
||||
Fonction utilitaire pour trouver un workflow.
|
||||
|
||||
Args:
|
||||
command: Commande en langage naturel
|
||||
min_confidence: Confiance minimale
|
||||
|
||||
Returns:
|
||||
WorkflowMatch ou None
|
||||
"""
|
||||
return get_workflow_matcher().find_workflow(command, min_confidence)
|
||||
95
visual_workflow_builder/backend/services/workflow_service.py
Normal file
95
visual_workflow_builder/backend/services/workflow_service.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""
|
||||
Service pour gérer les workflows visuels
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional
|
||||
from models.visual_workflow import VisualWorkflow, VisualNode, VisualEdge, Variable, WorkflowSettings, generate_id
|
||||
|
||||
class WorkflowService:
|
||||
"""Service pour gérer les workflows en mémoire (pour les tests)"""
|
||||
|
||||
_workflows: Dict[str, VisualWorkflow] = {}
|
||||
|
||||
@classmethod
|
||||
def create_workflow(cls, name: str, description: str = "", created_by: str = "system") -> VisualWorkflow:
|
||||
"""Créer un nouveau workflow"""
|
||||
workflow_id = generate_id()
|
||||
now = datetime.now()
|
||||
|
||||
workflow = VisualWorkflow(
|
||||
id=workflow_id,
|
||||
name=name,
|
||||
description=description,
|
||||
version="1.0.0",
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
created_by=created_by,
|
||||
nodes=[],
|
||||
edges=[],
|
||||
variables=[],
|
||||
settings=WorkflowSettings()
|
||||
)
|
||||
|
||||
cls._workflows[workflow_id] = workflow
|
||||
return workflow
|
||||
|
||||
@classmethod
|
||||
def get_workflow(cls, workflow_id: str) -> Optional[VisualWorkflow]:
|
||||
"""Récupérer un workflow par ID"""
|
||||
return cls._workflows.get(workflow_id)
|
||||
|
||||
@classmethod
|
||||
def list_workflows(cls) -> List[VisualWorkflow]:
|
||||
"""Lister tous les workflows"""
|
||||
return list(cls._workflows.values())
|
||||
|
||||
@classmethod
|
||||
def update_workflow(cls, workflow_id: str, **kwargs) -> Optional[VisualWorkflow]:
|
||||
"""Mettre à jour un workflow"""
|
||||
workflow = cls._workflows.get(workflow_id)
|
||||
if not workflow:
|
||||
return None
|
||||
|
||||
# Mettre à jour les champs
|
||||
for key, value in kwargs.items():
|
||||
if hasattr(workflow, key):
|
||||
setattr(workflow, key, value)
|
||||
|
||||
workflow.updated_at = datetime.now()
|
||||
return workflow
|
||||
|
||||
@classmethod
|
||||
def delete_workflow(cls, workflow_id: str) -> bool:
|
||||
"""Supprimer un workflow"""
|
||||
if workflow_id in cls._workflows:
|
||||
del cls._workflows[workflow_id]
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def add_node(cls, workflow_id: str, node: VisualNode) -> bool:
|
||||
"""Ajouter un node à un workflow"""
|
||||
workflow = cls._workflows.get(workflow_id)
|
||||
if not workflow:
|
||||
return False
|
||||
|
||||
workflow.nodes.append(node)
|
||||
workflow.updated_at = datetime.now()
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def add_edge(cls, workflow_id: str, edge: VisualEdge) -> bool:
|
||||
"""Ajouter un edge à un workflow"""
|
||||
workflow = cls._workflows.get(workflow_id)
|
||||
if not workflow:
|
||||
return False
|
||||
|
||||
workflow.edges.append(edge)
|
||||
workflow.updated_at = datetime.now()
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def clear_all(cls):
|
||||
"""Vider tous les workflows (pour les tests)"""
|
||||
cls._workflows.clear()
|
||||
Reference in New Issue
Block a user