feat: chat unifié, GestureCatalog, Copilot, Léa UI, extraction données, vérification replay
Refonte majeure du système Agent Chat et ajout de nombreux modules : - Chat unifié : suppression du dual Workflows/Agent Libre, tout passe par /api/chat avec résolution en 3 niveaux (workflow → geste → "montre-moi") - GestureCatalog : 38 raccourcis clavier universels Windows avec matching sémantique, substitution automatique dans les replays, et endpoint /api/gestures - Mode Copilot : exécution pas-à-pas des workflows avec validation humaine via WebSocket (approve/skip/abort) avant chaque action - Léa UI (agent_v0/lea_ui/) : interface PyQt5 pour Windows avec overlay transparent pour feedback visuel pendant le replay - Data Extraction (core/extraction/) : moteur d'extraction visuelle de données (OCR + VLM → SQLite), avec schémas YAML et export CSV/Excel - ReplayVerifier (agent_v0/server_v1/) : vérification post-action par comparaison de screenshots, avec logique de retry (max 3) - IntentParser durci : meilleur fallback regex, type GREETING, patterns améliorés - Dashboard : nouvelles pages gestures, streaming, extractions - Tests : 63 tests GestureCatalog, 47 tests extraction, corrections tests existants - Dépréciation : /api/agent/plan et /api/agent/execute retournent HTTP 410, suppression du code hardcodé _plan_to_replay_actions Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -618,6 +618,50 @@ def import_workflow():
|
||||
return error_response(500, f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@workflows_bp.route('/import-core', methods=['POST'])
|
||||
def import_core_workflow():
|
||||
"""
|
||||
Import a core Workflow (from streaming/GraphBuilder) and convert to VWB format.
|
||||
|
||||
Accepts a core Workflow JSON (as produced by Workflow.to_dict() or save_to_file).
|
||||
Converts it to VisualWorkflow via GraphToVisualConverter, saves to DB,
|
||||
and returns the VWB-formatted workflow.
|
||||
|
||||
Body: core Workflow JSON dict
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return error_response(400, "Request body (core Workflow JSON) is required")
|
||||
|
||||
# Charger le core Workflow
|
||||
from core.models.workflow_graph import Workflow as CoreWorkflow
|
||||
core_wf = CoreWorkflow.from_dict(data)
|
||||
|
||||
# Convertir vers VisualWorkflow (modèle riche)
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
converter = GraphToVisualConverter()
|
||||
visual_wf_rich = converter.convert(core_wf)
|
||||
|
||||
# Convertir vers le modèle simple (utilisé par le backend VWB)
|
||||
visual_dict = visual_wf_rich.to_dict()
|
||||
visual_wf = VisualWorkflow.from_dict(visual_dict)
|
||||
|
||||
# Sauvegarder
|
||||
db.save(visual_wf)
|
||||
workflows_store[visual_wf.id] = visual_wf
|
||||
|
||||
return jsonify({
|
||||
'message': 'Core workflow imported and converted to VWB format',
|
||||
'workflow': visual_wf.to_dict(),
|
||||
'warnings': converter.warnings,
|
||||
}), 201
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return error_response(500, f"Import error: {str(e)}")
|
||||
|
||||
|
||||
@workflows_bp.route('/<workflow_id>/feedback', methods=['POST'])
|
||||
def submit_workflow_feedback(workflow_id: str):
|
||||
"""
|
||||
|
||||
@@ -15,5 +15,6 @@ from . import workflow
|
||||
from . import capture
|
||||
from . import execute
|
||||
from . import match # Matching sémantique des workflows
|
||||
from . import review # Review/Validation de workflows importés
|
||||
|
||||
__all__ = ['api_v3_bp']
|
||||
|
||||
384
visual_workflow_builder/backend/api_v3/review.py
Normal file
384
visual_workflow_builder/backend/api_v3/review.py
Normal file
@@ -0,0 +1,384 @@
|
||||
"""
|
||||
API v3 - Review/Validation de workflows importes depuis le streaming
|
||||
|
||||
Endpoints:
|
||||
GET /api/v3/workflows/pending-review -> liste les workflows en attente de review
|
||||
GET /api/v3/workflow/<id>/review -> donnees de review (workflow + screenshots)
|
||||
POST /api/v3/workflow/<id>/review -> soumettre une decision de review
|
||||
POST /api/v3/workflow/import-core -> importer un core Workflow avec review
|
||||
"""
|
||||
|
||||
from flask import jsonify, request
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
from . import api_v3_bp
|
||||
from .workflow import generate_id
|
||||
from db.models import db, Workflow, Step
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@api_v3_bp.route('/workflows/pending-review', methods=['GET'])
|
||||
def list_pending_review():
|
||||
"""
|
||||
Liste les workflows en attente de validation.
|
||||
|
||||
Filtre par source='graph_to_visual_converter' et review_status='pending_review'.
|
||||
Retourne aussi les workflows avec review_status='needs_edit'.
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"workflows": [
|
||||
{
|
||||
"id": "...",
|
||||
"name": "...",
|
||||
"description": "...",
|
||||
"step_count": 5,
|
||||
"source": "graph_to_visual_converter",
|
||||
"review_status": "pending_review",
|
||||
"created_at": "...",
|
||||
"updated_at": "..."
|
||||
}
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
"""
|
||||
try:
|
||||
workflows = Workflow.query.filter(
|
||||
Workflow.is_active == True,
|
||||
Workflow.review_status.in_(['pending_review', 'needs_edit'])
|
||||
).order_by(Workflow.created_at.desc()).all()
|
||||
|
||||
result = []
|
||||
for wf in workflows:
|
||||
result.append({
|
||||
'id': wf.id,
|
||||
'name': wf.name,
|
||||
'description': wf.description or '',
|
||||
'tags': wf.tags or [],
|
||||
'step_count': wf.steps.count(),
|
||||
'source': wf.source or 'manual',
|
||||
'review_status': wf.review_status,
|
||||
'review_feedback': wf.review_feedback,
|
||||
'created_at': wf.created_at.isoformat() if wf.created_at else None,
|
||||
'updated_at': wf.updated_at.isoformat() if wf.updated_at else None,
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflows': result,
|
||||
'total': len(result)
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur listing pending review: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@api_v3_bp.route('/workflow/<workflow_id>/review', methods=['GET'])
|
||||
def get_review_data(workflow_id: str):
|
||||
"""
|
||||
Retourne les donnees de review pour un workflow.
|
||||
|
||||
Inclut le workflow complet avec ses etapes, les screenshots
|
||||
associes (si disponibles via les ancres visuelles), et les
|
||||
metadonnees de la source.
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"workflow": { ... },
|
||||
"review_info": {
|
||||
"source": "graph_to_visual_converter",
|
||||
"review_status": "pending_review",
|
||||
"review_feedback": null,
|
||||
"reviewed_at": null,
|
||||
"step_count": 5,
|
||||
"steps_with_anchors": 3,
|
||||
"steps_without_anchors": 2
|
||||
}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
workflow = Workflow.query.get(workflow_id)
|
||||
if not workflow:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f"Workflow '{workflow_id}' non trouve"
|
||||
}), 404
|
||||
|
||||
# Compter les etapes avec/sans ancres visuelles
|
||||
steps = Step.query.filter_by(workflow_id=workflow_id).order_by(Step.order).all()
|
||||
steps_with_anchors = sum(1 for s in steps if s.anchor_id)
|
||||
steps_without_anchors = len(steps) - steps_with_anchors
|
||||
|
||||
review_info = {
|
||||
'source': workflow.source or 'manual',
|
||||
'review_status': workflow.review_status,
|
||||
'review_feedback': workflow.review_feedback,
|
||||
'reviewed_at': workflow.reviewed_at.isoformat() if workflow.reviewed_at else None,
|
||||
'step_count': len(steps),
|
||||
'steps_with_anchors': steps_with_anchors,
|
||||
'steps_without_anchors': steps_without_anchors,
|
||||
}
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflow': workflow.to_dict(),
|
||||
'review_info': review_info,
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur get review data: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@api_v3_bp.route('/workflow/<workflow_id>/review', methods=['POST'])
|
||||
def submit_review(workflow_id: str):
|
||||
"""
|
||||
Soumet une decision de review pour un workflow.
|
||||
|
||||
Request:
|
||||
{
|
||||
"status": "approved" | "rejected" | "needs_edit",
|
||||
"feedback": "Commentaire optionnel..."
|
||||
}
|
||||
|
||||
Comportement selon le status:
|
||||
- "approved" : le workflow est valide, passe en learning_state COACHING
|
||||
- "rejected" : le workflow est marque inactif (is_active=False)
|
||||
- "needs_edit": le workflow reste actif, l'utilisateur peut le modifier dans le VWB
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"workflow_id": "...",
|
||||
"review_status": "approved",
|
||||
"message": "..."
|
||||
}
|
||||
"""
|
||||
try:
|
||||
workflow = Workflow.query.get(workflow_id)
|
||||
if not workflow:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f"Workflow '{workflow_id}' non trouve"
|
||||
}), 404
|
||||
|
||||
data = request.get_json() or {}
|
||||
|
||||
status = data.get('status')
|
||||
if status not in ('approved', 'rejected', 'needs_edit'):
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': "Le champ 'status' doit etre 'approved', 'rejected' ou 'needs_edit'"
|
||||
}), 400
|
||||
|
||||
feedback = data.get('feedback', '')
|
||||
|
||||
# Mettre a jour le workflow
|
||||
workflow.review_status = status
|
||||
workflow.review_feedback = feedback
|
||||
workflow.reviewed_at = datetime.utcnow()
|
||||
workflow.updated_at = datetime.utcnow()
|
||||
|
||||
message = ''
|
||||
|
||||
if status == 'approved':
|
||||
# Passer le learning_state du workflow core vers COACHING
|
||||
_promote_to_coaching(workflow_id)
|
||||
message = f"Workflow '{workflow.name}' approuve. Le systeme peut maintenant suggerer ce workflow."
|
||||
|
||||
elif status == 'rejected':
|
||||
# Marquer comme inactif
|
||||
workflow.is_active = False
|
||||
message = f"Workflow '{workflow.name}' rejete et desactive."
|
||||
|
||||
elif status == 'needs_edit':
|
||||
# Laisser actif, l'utilisateur peut modifier
|
||||
message = f"Workflow '{workflow.name}' marque pour modification."
|
||||
|
||||
db.session.commit()
|
||||
|
||||
logger.info(f"[Review] Workflow {workflow_id} -> {status} (feedback: {feedback[:50]}...)")
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflow_id': workflow_id,
|
||||
'review_status': status,
|
||||
'message': message,
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
logger.error(f"Erreur submit review: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
def _promote_to_coaching(workflow_id: str):
|
||||
"""
|
||||
Passe le learning_state du workflow core vers COACHING.
|
||||
|
||||
Tente de mettre a jour via le LearningManager si disponible.
|
||||
Fonctionnement gracieux : si le LearningManager n'est pas disponible,
|
||||
on log un warning et on continue.
|
||||
"""
|
||||
try:
|
||||
from services.learning_integration import _get_learning_manager
|
||||
manager = _get_learning_manager()
|
||||
if manager is None:
|
||||
logger.warning(
|
||||
f"[Review] LearningManager non disponible, impossible de promouvoir "
|
||||
f"le workflow {workflow_id} vers COACHING"
|
||||
)
|
||||
return
|
||||
|
||||
# Tenter de changer l'etat
|
||||
try:
|
||||
from core.models.workflow_graph import LearningState
|
||||
manager.set_workflow_state(workflow_id, LearningState.COACHING)
|
||||
logger.info(f"[Review] Workflow {workflow_id} promu vers COACHING")
|
||||
except AttributeError:
|
||||
# set_workflow_state n'existe pas, essayer promote
|
||||
try:
|
||||
manager.promote_workflow(workflow_id)
|
||||
logger.info(f"[Review] Workflow {workflow_id} promu via promote_workflow()")
|
||||
except Exception as e2:
|
||||
logger.warning(f"[Review] Impossible de promouvoir le workflow: {e2}")
|
||||
|
||||
except ImportError as e:
|
||||
logger.warning(f"[Review] Import learning_integration impossible: {e}")
|
||||
except Exception as e:
|
||||
logger.warning(f"[Review] Erreur promotion workflow {workflow_id}: {e}")
|
||||
|
||||
|
||||
@api_v3_bp.route('/workflow/import-core', methods=['POST'])
|
||||
def import_core_workflow_v3():
|
||||
"""
|
||||
Importe un core Workflow (issu du streaming/GraphBuilder) dans la base v3.
|
||||
|
||||
Convertit via GraphToVisualConverter puis cree un Workflow SQLAlchemy
|
||||
avec source='graph_to_visual_converter' et review_status='pending_review'.
|
||||
|
||||
Body: core Workflow JSON dict (tel que produit par Workflow.to_dict())
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"workflow": { ... },
|
||||
"warnings": [...],
|
||||
"message": "..."
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': "Request body (core Workflow JSON) est requis"
|
||||
}), 400
|
||||
|
||||
# Ajouter le chemin racine pour les imports core
|
||||
core_path = str(Path(__file__).parent.parent.parent.parent)
|
||||
if core_path not in sys.path:
|
||||
sys.path.insert(0, core_path)
|
||||
|
||||
# Charger le core Workflow
|
||||
from core.models.workflow_graph import Workflow as CoreWorkflow
|
||||
core_wf = CoreWorkflow.from_dict(data)
|
||||
|
||||
# Convertir vers VisualWorkflow (modele riche)
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
converter = GraphToVisualConverter()
|
||||
visual_wf_rich = converter.convert(core_wf)
|
||||
|
||||
# Creer le workflow SQLAlchemy (v3)
|
||||
wf_id = generate_id('wf')
|
||||
workflow = Workflow(
|
||||
id=wf_id,
|
||||
name=visual_wf_rich.name,
|
||||
description=visual_wf_rich.description or 'Workflow importe depuis le streaming',
|
||||
source='graph_to_visual_converter',
|
||||
review_status='pending_review',
|
||||
)
|
||||
|
||||
if visual_wf_rich.tags:
|
||||
workflow.tags = visual_wf_rich.tags
|
||||
|
||||
db.session.add(workflow)
|
||||
|
||||
# Creer les etapes
|
||||
for idx, vnode in enumerate(visual_wf_rich.nodes):
|
||||
# Ignorer les nodes start/end purement structurels
|
||||
if vnode.type in ('start', 'end'):
|
||||
continue
|
||||
|
||||
step = Step(
|
||||
id=generate_id('step'),
|
||||
workflow_id=wf_id,
|
||||
action_type=_visual_type_to_action_type(vnode.type),
|
||||
order=idx,
|
||||
position_x=vnode.position.x,
|
||||
position_y=vnode.position.y,
|
||||
label=vnode.label or vnode.type,
|
||||
)
|
||||
step.parameters = vnode.parameters or {}
|
||||
db.session.add(step)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
logger.info(
|
||||
f"[Review] Core workflow importe -> {wf_id} "
|
||||
f"({workflow.name}, {len(visual_wf_rich.nodes)} nodes)"
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflow': workflow.to_dict(),
|
||||
'warnings': converter.warnings,
|
||||
'message': f"Workflow '{workflow.name}' importe et en attente de validation",
|
||||
}), 201
|
||||
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
traceback.print_exc()
|
||||
logger.error(f"[Review] Erreur import core workflow: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
def _visual_type_to_action_type(visual_type: str) -> str:
|
||||
"""Convertit un type visuel VWB vers un action_type v3."""
|
||||
mapping = {
|
||||
'click': 'click_anchor',
|
||||
'type': 'type_text',
|
||||
'wait': 'wait_for_anchor',
|
||||
'navigate': 'click_anchor',
|
||||
'extract': 'extract_text',
|
||||
'variable': 'type_text',
|
||||
'condition': 'visual_condition',
|
||||
'loop': 'loop_visual',
|
||||
'validate': 'keyboard_shortcut',
|
||||
'scroll': 'scroll_to_anchor',
|
||||
'screenshot': 'screenshot_evidence',
|
||||
'transform': 'type_text',
|
||||
'api': 'click_anchor',
|
||||
'database': 'db_save_data',
|
||||
}
|
||||
return mapping.get(visual_type, 'click_anchor')
|
||||
@@ -72,7 +72,9 @@ def get_state():
|
||||
'tags': wf.tags or [],
|
||||
'trigger_examples': wf.trigger_examples or [],
|
||||
'step_count': wf.steps.count(),
|
||||
'updated_at': wf.updated_at.isoformat() if wf.updated_at else None
|
||||
'updated_at': wf.updated_at.isoformat() if wf.updated_at else None,
|
||||
'source': wf.source or 'manual',
|
||||
'review_status': wf.review_status,
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
|
||||
@@ -310,6 +310,27 @@ with app.app_context():
|
||||
db.create_all()
|
||||
print("✅ [DB] Tables créées, utiliser 'flask db stamp head' pour initialiser les migrations")
|
||||
|
||||
# Migration manuelle : ajouter les colonnes review si elles n'existent pas
|
||||
from sqlalchemy import inspect as sa_inspect, text
|
||||
insp = sa_inspect(db.engine)
|
||||
if 'workflows' in insp.get_table_names():
|
||||
existing_cols = {col['name'] for col in insp.get_columns('workflows')}
|
||||
new_cols = {
|
||||
'source': "ALTER TABLE workflows ADD COLUMN source VARCHAR(64) DEFAULT 'manual'",
|
||||
'review_status': "ALTER TABLE workflows ADD COLUMN review_status VARCHAR(32)",
|
||||
'review_feedback': "ALTER TABLE workflows ADD COLUMN review_feedback TEXT",
|
||||
'reviewed_at': "ALTER TABLE workflows ADD COLUMN reviewed_at DATETIME",
|
||||
}
|
||||
for col_name, sql in new_cols.items():
|
||||
if col_name not in existing_cols:
|
||||
try:
|
||||
db.session.execute(text(sql))
|
||||
db.session.commit()
|
||||
print(f" [DB] Colonne '{col_name}' ajoutée à workflows")
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
print(f" [DB] Colonne '{col_name}' déjà existante ou erreur: {e}")
|
||||
|
||||
# Initialize VisualTargetManager with RPA Vision V3 components (optional)
|
||||
try:
|
||||
from core.capture.screen_capturer import ScreenCapturer
|
||||
@@ -339,14 +360,15 @@ except Exception as e:
|
||||
print(f"❌ Erreur lors de l'initialisation des services visuels: {e}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
port = int(os.getenv('PORT', 5000))
|
||||
debug = os.getenv('FLASK_ENV') == 'development'
|
||||
port = int(os.getenv('PORT', 5002))
|
||||
# Désactivation du mode debug pour stabiliser le laboratoire
|
||||
debug = False
|
||||
|
||||
socketio.run(
|
||||
app,
|
||||
host='0.0.0.0',
|
||||
port=port,
|
||||
debug=debug,
|
||||
use_reloader=debug,
|
||||
allow_unsafe_werkzeug=True # For development only
|
||||
debug=False,
|
||||
use_reloader=False,
|
||||
allow_unsafe_werkzeug=True
|
||||
)
|
||||
|
||||
@@ -27,6 +27,16 @@ class Workflow(db.Model):
|
||||
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
is_active = db.Column(db.Boolean, default=True)
|
||||
|
||||
# Review/Validation — workflows importés depuis le streaming
|
||||
# source: origine du workflow ('manual', 'graph_to_visual_converter', etc.)
|
||||
source = db.Column(db.String(64), default='manual')
|
||||
# review_status: 'pending_review', 'approved', 'rejected', 'needs_edit', None (pas de review)
|
||||
review_status = db.Column(db.String(32), nullable=True, default=None)
|
||||
# review_feedback: commentaire de l'utilisateur lors de la review
|
||||
review_feedback = db.Column(db.Text, nullable=True)
|
||||
# reviewed_at: date de la review
|
||||
reviewed_at = db.Column(db.DateTime, nullable=True)
|
||||
|
||||
# Relations
|
||||
steps = db.relationship('Step', backref='workflow', lazy='dynamic',
|
||||
order_by='Step.order', cascade='all, delete-orphan')
|
||||
@@ -65,7 +75,7 @@ class Workflow(db.Model):
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise le workflow complet"""
|
||||
return {
|
||||
result = {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
@@ -74,8 +84,13 @@ class Workflow(db.Model):
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None,
|
||||
'steps': [step.to_dict() for step in self.steps.order_by(Step.order).all()],
|
||||
'step_count': self.steps.count()
|
||||
'step_count': self.steps.count(),
|
||||
'source': self.source or 'manual',
|
||||
'review_status': self.review_status,
|
||||
'review_feedback': self.review_feedback,
|
||||
'reviewed_at': self.reviewed_at.isoformat() if self.reviewed_at else None,
|
||||
}
|
||||
return result
|
||||
|
||||
def __repr__(self):
|
||||
return f'<Workflow {self.id}: {self.name}>'
|
||||
|
||||
@@ -0,0 +1,382 @@
|
||||
"""
|
||||
GraphToVisual Converter — Convertit un core Workflow en VisualWorkflow VWB.
|
||||
|
||||
Inverse du VisualToGraphConverter : prend un Workflow (issu du GraphBuilder
|
||||
ou de l'exécution streaming) et produit un VisualWorkflow affichable
|
||||
dans le Visual Workflow Builder.
|
||||
|
||||
Cas d'usage :
|
||||
- Workflow appris par streaming → affichage/review dans le VWB
|
||||
- Import d'un workflow core pour édition manuelle
|
||||
- Mode validation humaine : voir et corriger un workflow auto-généré
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
# Ajouter le chemin racine pour les imports core
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
|
||||
from core.models.workflow_graph import (
|
||||
Workflow,
|
||||
WorkflowNode,
|
||||
WorkflowEdge,
|
||||
)
|
||||
|
||||
from models.visual_workflow import (
|
||||
VisualWorkflow,
|
||||
VisualNode,
|
||||
VisualEdge,
|
||||
Position,
|
||||
Size,
|
||||
Port,
|
||||
EdgeStyle,
|
||||
EdgeCondition,
|
||||
Variable,
|
||||
WorkflowSettings,
|
||||
)
|
||||
|
||||
|
||||
class GraphToVisualConverter:
|
||||
"""
|
||||
Convertit un core Workflow en VisualWorkflow VWB.
|
||||
|
||||
Le layout automatique place les nodes en grille verticale
|
||||
(de haut en bas), avec les branches condition sur les côtés.
|
||||
"""
|
||||
|
||||
# Mapping inverse : action_type (core) → visual_type (VWB)
|
||||
ACTION_TO_NODE_TYPE = {
|
||||
'mouse_click': 'click',
|
||||
'text_input': 'type',
|
||||
'wait': 'wait',
|
||||
'navigate': 'navigate',
|
||||
'extract_data': 'extract',
|
||||
'set_variable': 'variable',
|
||||
'evaluate_condition': 'condition',
|
||||
'execute_loop': 'loop',
|
||||
'key_press': 'validate',
|
||||
'scroll': 'scroll',
|
||||
'screenshot': 'screenshot',
|
||||
'transform_data': 'transform',
|
||||
'api_call': 'api',
|
||||
'database_query': 'database',
|
||||
'workflow_start': 'start',
|
||||
'workflow_end': 'end',
|
||||
}
|
||||
|
||||
# Couleurs par type de node
|
||||
NODE_COLORS = {
|
||||
'click': '#3B82F6',
|
||||
'type': '#8B5CF6',
|
||||
'wait': '#F59E0B',
|
||||
'navigate': '#10B981',
|
||||
'extract': '#06B6D4',
|
||||
'variable': '#6366F1',
|
||||
'condition': '#EF4444',
|
||||
'loop': '#F97316',
|
||||
'validate': '#14B8A6',
|
||||
'scroll': '#64748B',
|
||||
'screenshot': '#EC4899',
|
||||
'start': '#22C55E',
|
||||
'end': '#EF4444',
|
||||
}
|
||||
|
||||
# Dimensions par défaut
|
||||
DEFAULT_NODE_WIDTH = 200
|
||||
DEFAULT_NODE_HEIGHT = 80
|
||||
VERTICAL_SPACING = 120
|
||||
HORIZONTAL_SPACING = 280
|
||||
START_X = 400
|
||||
START_Y = 80
|
||||
|
||||
def __init__(self):
|
||||
self.warnings: List[str] = []
|
||||
|
||||
def convert(self, workflow: Workflow) -> VisualWorkflow:
|
||||
"""
|
||||
Convertit un core Workflow en VisualWorkflow.
|
||||
|
||||
Args:
|
||||
workflow: Le Workflow core (issu de GraphBuilder ou load_from_file)
|
||||
|
||||
Returns:
|
||||
VisualWorkflow prêt à être affiché dans le VWB
|
||||
"""
|
||||
self.warnings = []
|
||||
|
||||
# Convertir les nodes avec layout automatique
|
||||
visual_nodes = self._convert_nodes(workflow)
|
||||
|
||||
# Convertir les edges
|
||||
visual_edges = self._convert_edges(workflow)
|
||||
|
||||
# Construire le VisualWorkflow
|
||||
now = datetime.now()
|
||||
vw = VisualWorkflow(
|
||||
id=workflow.workflow_id,
|
||||
name=workflow.name or f"Workflow {workflow.workflow_id}",
|
||||
description=workflow.description or "Workflow importé depuis le core pipeline",
|
||||
version="1.0.0",
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
created_by="graph_to_visual_converter",
|
||||
nodes=visual_nodes,
|
||||
edges=visual_edges,
|
||||
variables=[],
|
||||
settings=WorkflowSettings(),
|
||||
tags=workflow.metadata.get('tags', []) if workflow.metadata else [],
|
||||
category=workflow.metadata.get('category', 'imported') if workflow.metadata else 'imported',
|
||||
is_template=False,
|
||||
)
|
||||
|
||||
return vw
|
||||
|
||||
def _convert_nodes(self, workflow: Workflow) -> List[VisualNode]:
|
||||
"""Convertit les WorkflowNodes en VisualNodes avec layout automatique."""
|
||||
visual_nodes = []
|
||||
|
||||
# Déterminer l'ordre topologique pour le layout
|
||||
ordered_ids = self._topological_order(workflow)
|
||||
|
||||
for idx, node_id in enumerate(ordered_ids):
|
||||
node = self._find_node(workflow, node_id)
|
||||
if node is None:
|
||||
continue
|
||||
|
||||
vnode = self._convert_node(node, idx, workflow)
|
||||
visual_nodes.append(vnode)
|
||||
|
||||
return visual_nodes
|
||||
|
||||
def _convert_node(self, node: WorkflowNode, index: int, workflow: Workflow) -> VisualNode:
|
||||
"""Convertit un seul WorkflowNode en VisualNode."""
|
||||
|
||||
# Déterminer le type visuel
|
||||
visual_type = self._infer_visual_type(node)
|
||||
|
||||
# Position (layout vertical simple)
|
||||
pos = self._compute_position(index, visual_type)
|
||||
|
||||
# Extraire les paramètres depuis le node core
|
||||
parameters = self._extract_parameters(node)
|
||||
|
||||
# Déterminer les ports
|
||||
input_ports, output_ports = self._create_ports(visual_type)
|
||||
|
||||
# Label
|
||||
label = node.name or node.node_id
|
||||
|
||||
# Couleur
|
||||
color = self.NODE_COLORS.get(visual_type, '#64748B')
|
||||
|
||||
return VisualNode(
|
||||
id=node.node_id,
|
||||
type=visual_type,
|
||||
position=pos,
|
||||
size=Size(width=self.DEFAULT_NODE_WIDTH, height=self.DEFAULT_NODE_HEIGHT),
|
||||
parameters=parameters,
|
||||
input_ports=input_ports,
|
||||
output_ports=output_ports,
|
||||
label=label,
|
||||
description=node.description or "",
|
||||
color=color,
|
||||
)
|
||||
|
||||
def _convert_edges(self, workflow: Workflow) -> List[VisualEdge]:
|
||||
"""Convertit les WorkflowEdges en VisualEdges."""
|
||||
visual_edges = []
|
||||
|
||||
for edge in workflow.edges:
|
||||
vedge = self._convert_edge(edge)
|
||||
visual_edges.append(vedge)
|
||||
|
||||
return visual_edges
|
||||
|
||||
def _convert_edge(self, edge: WorkflowEdge) -> VisualEdge:
|
||||
"""Convertit un seul WorkflowEdge en VisualEdge."""
|
||||
|
||||
# Déterminer les ports source/target
|
||||
source_port = "out"
|
||||
target_port = "in"
|
||||
|
||||
# Si l'edge a des métadonnées visuelles (aller-retour via le converter)
|
||||
if edge.metadata:
|
||||
source_port = edge.metadata.get('source_port', 'out')
|
||||
target_port = edge.metadata.get('target_port', 'in')
|
||||
|
||||
# Condition sur l'edge
|
||||
condition = None
|
||||
if edge.constraints and edge.constraints.pre_conditions:
|
||||
pre = edge.constraints.pre_conditions
|
||||
if 'condition_result' in pre:
|
||||
branch = 'true' if pre['condition_result'] else 'false'
|
||||
source_port = f"out_{branch}"
|
||||
condition = EdgeCondition(
|
||||
type='expression',
|
||||
expression=f"result == {branch}"
|
||||
)
|
||||
elif 'expression' in pre:
|
||||
condition = EdgeCondition(
|
||||
type='expression',
|
||||
expression=pre['expression']
|
||||
)
|
||||
|
||||
# Style
|
||||
style = EdgeStyle(color=None, width=2, dashed=bool(condition))
|
||||
|
||||
return VisualEdge(
|
||||
id=edge.edge_id,
|
||||
source=edge.from_node,
|
||||
target=edge.to_node,
|
||||
source_port=source_port,
|
||||
target_port=target_port,
|
||||
condition=condition,
|
||||
style=style,
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Helpers
|
||||
# =========================================================================
|
||||
|
||||
def _infer_visual_type(self, node: WorkflowNode) -> str:
|
||||
"""Déterminer le type visuel VWB depuis un WorkflowNode."""
|
||||
|
||||
# 1. Vérifier les métadonnées (si le node a déjà un visual_type)
|
||||
if node.metadata and 'visual_type' in node.metadata:
|
||||
return node.metadata['visual_type']
|
||||
|
||||
# 2. Chercher dans les edges sortants le type d'action
|
||||
# (le type d'action est sur l'edge dans le modèle core)
|
||||
# On ne peut pas le faire ici sans le workflow complet,
|
||||
# donc on utilise le node_type ou le label
|
||||
|
||||
# 3. Déduire depuis le node_type
|
||||
if hasattr(node, 'node_type') and node.node_type:
|
||||
reverse = self.ACTION_TO_NODE_TYPE.get(node.node_type)
|
||||
if reverse:
|
||||
return reverse
|
||||
|
||||
# 4. Heuristiques sur le nom/label
|
||||
name_lower = (node.name or "").lower()
|
||||
if any(k in name_lower for k in ['clic', 'click', 'bouton']):
|
||||
return 'click'
|
||||
if any(k in name_lower for k in ['saisie', 'type', 'input', 'texte']):
|
||||
return 'type'
|
||||
if any(k in name_lower for k in ['attente', 'wait', 'pause']):
|
||||
return 'wait'
|
||||
if 'start' in name_lower or 'début' in name_lower:
|
||||
return 'start'
|
||||
if 'end' in name_lower or 'fin' in name_lower:
|
||||
return 'end'
|
||||
|
||||
# 5. Défaut
|
||||
return 'click'
|
||||
|
||||
def _extract_parameters(self, node: WorkflowNode) -> Dict[str, Any]:
|
||||
"""Extraire les paramètres depuis un WorkflowNode."""
|
||||
params: Dict[str, Any] = {}
|
||||
|
||||
# Métadonnées visuelles (aller-retour)
|
||||
if node.metadata and 'parameters' in node.metadata:
|
||||
params.update(node.metadata['parameters'])
|
||||
|
||||
# Informations du template
|
||||
if node.template:
|
||||
if node.template.window and node.template.window.title_pattern:
|
||||
params['window_title'] = node.template.window.title_pattern
|
||||
if node.template.text and node.template.text.required_texts:
|
||||
params['text_patterns'] = node.template.text.required_texts
|
||||
|
||||
return params
|
||||
|
||||
def _create_ports(self, visual_type: str) -> tuple:
|
||||
"""Créer les ports par défaut pour un type de node."""
|
||||
input_ports = [Port(id="in", name="Entrée", type="input")]
|
||||
|
||||
if visual_type == 'condition':
|
||||
output_ports = [
|
||||
Port(id="out_true", name="Vrai", type="output"),
|
||||
Port(id="out_false", name="Faux", type="output"),
|
||||
]
|
||||
elif visual_type == 'loop':
|
||||
output_ports = [
|
||||
Port(id="out_body", name="Corps", type="output"),
|
||||
Port(id="out_exit", name="Sortie", type="output"),
|
||||
]
|
||||
elif visual_type == 'start':
|
||||
input_ports = []
|
||||
output_ports = [Port(id="out", name="Sortie", type="output")]
|
||||
elif visual_type == 'end':
|
||||
output_ports = []
|
||||
else:
|
||||
output_ports = [Port(id="out", name="Sortie", type="output")]
|
||||
|
||||
return input_ports, output_ports
|
||||
|
||||
def _compute_position(self, index: int, visual_type: str) -> Position:
|
||||
"""Calculer la position d'un node dans le layout vertical."""
|
||||
x = self.START_X
|
||||
y = self.START_Y + index * self.VERTICAL_SPACING
|
||||
|
||||
# Décaler les conditions légèrement à droite
|
||||
if visual_type == 'condition':
|
||||
x += 20
|
||||
|
||||
return Position(x=x, y=y)
|
||||
|
||||
def _topological_order(self, workflow: Workflow) -> List[str]:
|
||||
"""Ordre topologique des nodes (entry → end)."""
|
||||
# Construire le graphe d'adjacence
|
||||
adj: Dict[str, List[str]] = {}
|
||||
in_degree: Dict[str, int] = {}
|
||||
|
||||
all_ids = {n.node_id for n in workflow.nodes}
|
||||
for nid in all_ids:
|
||||
adj[nid] = []
|
||||
in_degree[nid] = 0
|
||||
|
||||
for edge in workflow.edges:
|
||||
if edge.from_node in adj and edge.to_node in in_degree:
|
||||
adj[edge.from_node].append(edge.to_node)
|
||||
in_degree[edge.to_node] += 1
|
||||
|
||||
# BFS Kahn
|
||||
queue = [nid for nid in all_ids if in_degree[nid] == 0]
|
||||
|
||||
# Prioriser les entry_nodes
|
||||
if workflow.entry_nodes:
|
||||
entries = [e for e in workflow.entry_nodes if e in all_ids]
|
||||
others = [q for q in queue if q not in entries]
|
||||
queue = entries + others
|
||||
|
||||
result = []
|
||||
while queue:
|
||||
node = queue.pop(0)
|
||||
result.append(node)
|
||||
for neighbor in adj.get(node, []):
|
||||
in_degree[neighbor] -= 1
|
||||
if in_degree[neighbor] == 0:
|
||||
queue.append(neighbor)
|
||||
|
||||
# Ajouter les nodes orphelins (pas atteints)
|
||||
for nid in all_ids:
|
||||
if nid not in result:
|
||||
result.append(nid)
|
||||
|
||||
return result
|
||||
|
||||
def _find_node(self, workflow: Workflow, node_id: str) -> Optional[WorkflowNode]:
|
||||
"""Trouver un node par ID."""
|
||||
for n in workflow.nodes:
|
||||
if n.node_id == node_id:
|
||||
return n
|
||||
return None
|
||||
|
||||
|
||||
def convert_graph_to_visual(workflow: Workflow) -> VisualWorkflow:
|
||||
"""Fonction utilitaire pour convertir un Workflow en VisualWorkflow."""
|
||||
converter = GraphToVisualConverter()
|
||||
return converter.convert(workflow)
|
||||
Reference in New Issue
Block a user