feat: unification VWB ↔ Léa — import/export bidirectionnel
- Workflows appris par Léa visibles dans le VWB ("Appris par Léa")
- Bouton "Importer" pour éditer un workflow appris
- Bouton "Exporter pour Léa" pour rendre un workflow VWB exécutable
- Conversion bidirectionnelle core ↔ VWB via learned_workflow_bridge
- Liste unifiée dans le chat Léa (merged + dédupliquée)
- reload_workflows() sur le streaming server (pas de redémarrage)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -23,4 +23,10 @@ try:
|
||||
except ImportError as e:
|
||||
print(f"⚠️ Module dag_execute désactivé: {e}")
|
||||
|
||||
# Pont workflows appris (Léa) <-> VWB
|
||||
try:
|
||||
from . import learned_workflows # noqa: F401
|
||||
except ImportError as e:
|
||||
print(f"⚠️ Module learned_workflows désactivé: {e}")
|
||||
|
||||
__all__ = ['api_v3_bp']
|
||||
|
||||
448
visual_workflow_builder/backend/api_v3/learned_workflows.py
Normal file
448
visual_workflow_builder/backend/api_v3/learned_workflows.py
Normal file
@@ -0,0 +1,448 @@
|
||||
"""
|
||||
API v3 - Pont workflows appris (Léa) <-> VWB
|
||||
|
||||
Endpoints :
|
||||
GET /api/v3/learned-workflows → liste les workflows appris (disque + streaming server)
|
||||
POST /api/v3/learned-workflows/<id>/import → importe un workflow appris dans le VWB
|
||||
POST /api/v3/workflow/<id>/export-for-lea → exporte un workflow VWB pour Léa
|
||||
|
||||
Le but : UN seul format de workflow, UN stockage (SQLite VWB), UN système de replay.
|
||||
Les workflows appris arrivent via import, l'humain les corrige dans le VWB,
|
||||
puis l'exécution utilise le même pipeline (execute-windows → streaming server).
|
||||
|
||||
Auteur : Dom, Claude — 18 mars 2026
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import requests as http_requests
|
||||
|
||||
from flask import jsonify, request
|
||||
|
||||
from . import api_v3_bp
|
||||
from .workflow import generate_id
|
||||
from db.models import db, Workflow, Step
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Chemin racine pour les imports core
|
||||
_ROOT = str(Path(__file__).resolve().parent.parent.parent.parent)
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
# URL du streaming server
|
||||
STREAMING_SERVER_URL = "http://localhost:5005"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# GET /api/v3/learned-workflows
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@api_v3_bp.route('/learned-workflows', methods=['GET'])
|
||||
def list_learned_workflows():
|
||||
"""
|
||||
Liste les workflows appris par Léa, depuis deux sources :
|
||||
1. Fichiers JSON sur disque (data/training/workflows/)
|
||||
2. Streaming server (port 5005) si disponible
|
||||
|
||||
Indique pour chaque workflow s'il a déjà été importé dans le VWB.
|
||||
|
||||
Query params:
|
||||
machine_id: Filtrer par machine (optionnel)
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"workflows": [
|
||||
{
|
||||
"workflow_id": "...",
|
||||
"name": "...",
|
||||
"nodes": 5,
|
||||
"edges": 4,
|
||||
"machine_id": "windows_pc_01",
|
||||
"learning_state": "OBSERVATION",
|
||||
"source": "learned",
|
||||
"already_imported": false,
|
||||
"vwb_workflow_id": null
|
||||
}
|
||||
],
|
||||
"streaming_server_available": true
|
||||
}
|
||||
"""
|
||||
machine_id = request.args.get('machine_id')
|
||||
|
||||
from services.learned_workflow_bridge import list_learned_workflows_from_disk
|
||||
|
||||
# Source 1 : fichiers sur disque
|
||||
disk_workflows = list_learned_workflows_from_disk()
|
||||
|
||||
# Source 2 : streaming server (plus à jour, inclut les workflows en mémoire)
|
||||
streaming_workflows = []
|
||||
streaming_available = False
|
||||
|
||||
try:
|
||||
params = {}
|
||||
if machine_id:
|
||||
params["machine_id"] = machine_id
|
||||
|
||||
resp = http_requests.get(
|
||||
f"{STREAMING_SERVER_URL}/api/v1/traces/stream/workflows",
|
||||
params=params,
|
||||
timeout=3,
|
||||
)
|
||||
if resp.ok:
|
||||
streaming_available = True
|
||||
streaming_data = resp.json().get("workflows", [])
|
||||
for sw in streaming_data:
|
||||
streaming_workflows.append({
|
||||
"workflow_id": sw.get("workflow_id", ""),
|
||||
"name": sw.get("name", sw.get("workflow_id", "")),
|
||||
"description": "",
|
||||
"machine_id": sw.get("machine_id", "default"),
|
||||
"nodes": sw.get("nodes", 0),
|
||||
"edges": sw.get("edges", 0),
|
||||
"learning_state": "OBSERVATION",
|
||||
"source": "streaming",
|
||||
})
|
||||
except http_requests.ConnectionError:
|
||||
logger.debug("Streaming server indisponible (port 5005)")
|
||||
except Exception as e:
|
||||
logger.warning("Erreur streaming server : %s", e)
|
||||
|
||||
# Fusionner : streaming a priorité (plus à jour), ajouter ceux qui sont seulement sur disque
|
||||
seen_ids = set()
|
||||
merged = []
|
||||
|
||||
for wf in streaming_workflows:
|
||||
seen_ids.add(wf["workflow_id"])
|
||||
merged.append(wf)
|
||||
|
||||
for wf in disk_workflows:
|
||||
if wf["workflow_id"] not in seen_ids:
|
||||
merged.append(wf)
|
||||
seen_ids.add(wf["workflow_id"])
|
||||
|
||||
# Filtrer par machine si demandé
|
||||
if machine_id:
|
||||
merged = [w for w in merged if w.get("machine_id") == machine_id]
|
||||
|
||||
# Enrichir : vérifier si déjà importé dans le VWB
|
||||
for wf in merged:
|
||||
existing = Workflow.query.filter(
|
||||
Workflow.description.contains(wf["workflow_id"]),
|
||||
Workflow.source.in_(["learned_import", "graph_to_visual_converter"])
|
||||
).first()
|
||||
|
||||
if existing:
|
||||
wf["already_imported"] = True
|
||||
wf["vwb_workflow_id"] = existing.id
|
||||
else:
|
||||
wf["already_imported"] = False
|
||||
wf["vwb_workflow_id"] = None
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"workflows": merged,
|
||||
"streaming_server_available": streaming_available,
|
||||
})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# POST /api/v3/learned-workflows/<workflow_id>/import
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@api_v3_bp.route('/learned-workflows/<workflow_id>/import', methods=['POST'])
|
||||
def import_learned_workflow(workflow_id: str):
|
||||
"""
|
||||
Importe un workflow appris dans le VWB pour review/édition.
|
||||
|
||||
1. Charge le workflow core JSON (depuis disque ou streaming server)
|
||||
2. Convertit les edges en Steps VWB
|
||||
3. Crée un Workflow SQLAlchemy avec source='learned_import'
|
||||
4. Retourne le workflow VWB créé
|
||||
|
||||
Body (optionnel):
|
||||
{
|
||||
"name": "Nom personnalisé", // Surcharge le nom
|
||||
"machine_id": "windows_pc" // Pour charger depuis streaming server
|
||||
}
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"workflow": { ... },
|
||||
"warnings": [...],
|
||||
"message": "..."
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = request.get_json() or {}
|
||||
|
||||
# Charger le workflow core
|
||||
core_dict = _load_core_workflow(workflow_id, data.get("machine_id"))
|
||||
if core_dict is None:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": f"Workflow '{workflow_id}' non trouvé "
|
||||
"(ni sur disque, ni sur le streaming server)"
|
||||
}), 404
|
||||
|
||||
# Convertir en steps VWB
|
||||
from services.learned_workflow_bridge import convert_learned_to_vwb_steps
|
||||
|
||||
wf_meta, steps_list, warnings = convert_learned_to_vwb_steps(core_dict)
|
||||
|
||||
# Surcharger le nom si fourni
|
||||
if data.get("name"):
|
||||
wf_meta["name"] = data["name"]
|
||||
|
||||
# Créer le workflow VWB
|
||||
wf_id = generate_id("wf")
|
||||
workflow = Workflow(
|
||||
id=wf_id,
|
||||
name=wf_meta["name"],
|
||||
description=(
|
||||
f"{wf_meta.get('description', '')}\n\n"
|
||||
f"[Importé depuis workflow appris: {workflow_id}]"
|
||||
).strip(),
|
||||
source="learned_import",
|
||||
review_status="pending_review",
|
||||
)
|
||||
|
||||
if wf_meta.get("tags"):
|
||||
workflow.tags = wf_meta["tags"]
|
||||
|
||||
db.session.add(workflow)
|
||||
|
||||
# Créer les steps
|
||||
for step_data in steps_list:
|
||||
step = Step(
|
||||
id=generate_id("step"),
|
||||
workflow_id=wf_id,
|
||||
action_type=step_data["action_type"],
|
||||
order=step_data["order"],
|
||||
position_x=step_data.get("position_x", 400),
|
||||
position_y=step_data.get("position_y", 200),
|
||||
label=step_data.get("label", step_data["action_type"]),
|
||||
)
|
||||
step.parameters = step_data.get("parameters", {})
|
||||
db.session.add(step)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
logger.info(
|
||||
"Workflow appris importé : %s → %s (%d étapes, %d warnings)",
|
||||
workflow_id, wf_id, len(steps_list), len(warnings),
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"workflow": workflow.to_dict(),
|
||||
"warnings": warnings,
|
||||
"message": (
|
||||
f"Workflow '{wf_meta['name']}' importé avec {len(steps_list)} étapes. "
|
||||
"En attente de validation."
|
||||
),
|
||||
}), 201
|
||||
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
traceback.print_exc()
|
||||
logger.error("Erreur import workflow appris %s : %s", workflow_id, e)
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
}), 500
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# POST /api/v3/workflow/<workflow_id>/export-for-lea
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@api_v3_bp.route('/workflow/<workflow_id>/export-for-lea', methods=['POST'])
|
||||
def export_for_lea(workflow_id: str):
|
||||
"""
|
||||
Exporte un workflow VWB au format core JSON pour exécution par Léa.
|
||||
|
||||
1. Lit le workflow VWB depuis SQLite
|
||||
2. Convertit les Steps VWB → core Workflow JSON
|
||||
3. Sauvegarde dans data/training/workflows/{machine_id}/
|
||||
4. Le workflow est maintenant disponible pour le streaming server
|
||||
|
||||
Body (optionnel):
|
||||
{
|
||||
"machine_id": "windows_pc_01" // Sous-dossier cible (default: "vwb_export")
|
||||
}
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"core_workflow_id": "...",
|
||||
"export_path": "data/training/workflows/...",
|
||||
"message": "..."
|
||||
}
|
||||
"""
|
||||
try:
|
||||
workflow = Workflow.query.get(workflow_id)
|
||||
if not workflow:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": f"Workflow '{workflow_id}' non trouvé"
|
||||
}), 404
|
||||
|
||||
# Récupérer les étapes
|
||||
steps = Step.query.filter_by(
|
||||
workflow_id=workflow_id
|
||||
).order_by(Step.order).all()
|
||||
|
||||
if not steps:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Le workflow n'a aucune étape"
|
||||
}), 400
|
||||
|
||||
# Préparer les données
|
||||
workflow_data = {
|
||||
"id": workflow.id,
|
||||
"name": workflow.name,
|
||||
"description": workflow.description or "",
|
||||
"tags": workflow.tags or [],
|
||||
"created_at": workflow.created_at.isoformat() if workflow.created_at else None,
|
||||
}
|
||||
steps_data = [s.to_dict() for s in steps]
|
||||
|
||||
# Convertir
|
||||
from services.learned_workflow_bridge import (
|
||||
convert_vwb_to_core_workflow,
|
||||
save_core_workflow_to_disk,
|
||||
)
|
||||
|
||||
core_wf = convert_vwb_to_core_workflow(workflow_data, steps_data)
|
||||
|
||||
# Sauvegarder sur disque
|
||||
data = request.get_json() or {}
|
||||
machine_id = data.get("machine_id", "vwb_export")
|
||||
|
||||
filepath = save_core_workflow_to_disk(core_wf, machine_id=machine_id)
|
||||
|
||||
logger.info(
|
||||
"Workflow VWB exporté pour Léa : %s → %s (%d nodes, %d edges)",
|
||||
workflow_id, filepath, len(core_wf["nodes"]), len(core_wf["edges"]),
|
||||
)
|
||||
|
||||
# Notifier le streaming server si disponible (pour rechargement)
|
||||
_notify_streaming_reload()
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"core_workflow_id": core_wf["workflow_id"],
|
||||
"export_path": str(filepath),
|
||||
"nodes_count": len(core_wf["nodes"]),
|
||||
"edges_count": len(core_wf["edges"]),
|
||||
"message": (
|
||||
f"Workflow '{workflow.name}' exporté avec "
|
||||
f"{len(core_wf['nodes'])} nœuds et {len(core_wf['edges'])} transitions. "
|
||||
f"Disponible pour Léa dans {filepath.parent.name}/."
|
||||
),
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
logger.error("Erreur export workflow %s pour Léa : %s", workflow_id, e)
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
}), 500
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _load_core_workflow(
|
||||
workflow_id: str, machine_id: Optional[str] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Charge un workflow core depuis le disque ou le streaming server.
|
||||
|
||||
Priorité :
|
||||
1. Fichiers sur disque (data/training/workflows/)
|
||||
2. Streaming server (GET workflow → to_dict)
|
||||
"""
|
||||
from services.learned_workflow_bridge import load_learned_workflow
|
||||
|
||||
# 1. Essayer le disque
|
||||
disk_wf = load_learned_workflow(workflow_id)
|
||||
if disk_wf:
|
||||
return disk_wf
|
||||
|
||||
# 2. Essayer le streaming server — récupérer le workflow complet
|
||||
# Le streaming server n'a pas d'endpoint GET /workflow/<id>,
|
||||
# mais on peut essayer de lister et trouver par ID
|
||||
try:
|
||||
params = {}
|
||||
if machine_id:
|
||||
params["machine_id"] = machine_id
|
||||
|
||||
resp = http_requests.get(
|
||||
f"{STREAMING_SERVER_URL}/api/v1/traces/stream/workflows",
|
||||
params=params,
|
||||
timeout=3,
|
||||
)
|
||||
if resp.ok:
|
||||
workflows = resp.json().get("workflows", [])
|
||||
for wf in workflows:
|
||||
if wf.get("workflow_id") == workflow_id:
|
||||
# Le listing ne contient que les métadonnées,
|
||||
# on a besoin du workflow complet.
|
||||
# Essayer l'endpoint de détail si disponible
|
||||
try:
|
||||
detail_resp = http_requests.get(
|
||||
f"{STREAMING_SERVER_URL}/api/v1/traces/stream/workflow/{workflow_id}",
|
||||
timeout=5,
|
||||
)
|
||||
if detail_resp.ok:
|
||||
return detail_resp.json()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback : charger depuis le disque avec le machine_id trouvé
|
||||
wf_machine = wf.get("machine_id", "default")
|
||||
wf_path = (
|
||||
Path(_ROOT) / "data" / "training" / "workflows"
|
||||
/ wf_machine / f"{workflow_id}.json"
|
||||
)
|
||||
if wf_path.exists():
|
||||
import json as json_mod
|
||||
with open(wf_path, "r", encoding="utf-8") as f:
|
||||
return json_mod.load(f)
|
||||
|
||||
except http_requests.ConnectionError:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning("Erreur chargement workflow streaming : %s", e)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _notify_streaming_reload():
|
||||
"""
|
||||
Notifie le streaming server de recharger ses workflows.
|
||||
|
||||
Appel non-bloquant : si le serveur est indisponible, on ignore.
|
||||
"""
|
||||
try:
|
||||
http_requests.post(
|
||||
f"{STREAMING_SERVER_URL}/api/v1/traces/stream/reload-workflows",
|
||||
timeout=2,
|
||||
)
|
||||
logger.debug("Streaming server notifié pour rechargement des workflows")
|
||||
except Exception:
|
||||
# Non-critique : le streaming server rechargera au prochain démarrage
|
||||
pass
|
||||
Reference in New Issue
Block a user