feat: WorkflowRunner, matching sémantique et replay distant (P0-4, P0-6, P0-7)
P0-4: WorkflowRunner — orchestrateur de replay intelligent - Boucle capture → match FAISS → résolution sémantique → exécution - Mode dry_run, substitution de variables, anti-boucle (max 200 steps) - Découplé de pyautogui via executor_callback P0-6: Unification des répertoires workflows - SemanticMatcher scanne data/workflows/ + data/training/workflows/ - Auto-reload sur changement de répertoire (60s) P0-7: Matching sémantique via Ollama - Pré-filtrage Jaccard + re-ranking LLM (qwen2.5:7b) - Score final : 40% Jaccard + 60% LLM, fallback si Ollama indisponible Agent Chat: exécution distante via streaming server - POST http://localhost:5005/api/v1/traces/stream/replay - Fallback sur exécution locale si serveur indisponible Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -14,7 +14,7 @@ Composants intégrés:
|
||||
Usage:
|
||||
python agent_chat/app.py
|
||||
|
||||
Puis ouvrir: http://localhost:5002
|
||||
Puis ouvrir: http://localhost:5004
|
||||
|
||||
Auteur: Dom - Janvier 2026
|
||||
"""
|
||||
@@ -28,6 +28,8 @@ from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
import requests as http_requests # Pour les appels au streaming server
|
||||
|
||||
from flask import Flask, render_template, request, jsonify
|
||||
from flask_socketio import SocketIO, emit
|
||||
|
||||
@@ -83,6 +85,11 @@ action_executor = None
|
||||
execution_loop = None
|
||||
screen_capturer = None
|
||||
|
||||
# URL du streaming server (Agent V1) pour l'exécution distante
|
||||
STREAMING_SERVER_URL = os.environ.get(
|
||||
"RPA_STREAMING_URL", "http://localhost:5005"
|
||||
)
|
||||
|
||||
execution_status = {
|
||||
"running": False,
|
||||
"workflow": None,
|
||||
@@ -99,10 +106,22 @@ def init_system():
|
||||
global intent_parser, confirmation_loop, response_generator, conversation_manager
|
||||
global autonomous_planner
|
||||
|
||||
# 1. SemanticMatcher
|
||||
# 1. SemanticMatcher — multi-répertoires (P0-6) + matching LLM (P0-7)
|
||||
# Scan data/workflows/ + data/training/workflows/ + data/training/live_sessions/workflows/
|
||||
try:
|
||||
matcher = SemanticMatcher("data/workflows")
|
||||
logger.info(f"✓ SemanticMatcher: {len(matcher.get_all_workflows())} workflows")
|
||||
matcher = SemanticMatcher(
|
||||
workflows_dir=None, # None = scan tous les répertoires par défaut
|
||||
use_llm=True, # Matching sémantique via Ollama (P0-7)
|
||||
llm_model="qwen2.5:7b",
|
||||
)
|
||||
dirs_info = matcher.get_directories()
|
||||
dirs_summary = ", ".join(
|
||||
f"{d['path']}({d['workflow_count']})" for d in dirs_info if d['exists']
|
||||
)
|
||||
logger.info(
|
||||
f"✓ SemanticMatcher: {len(matcher.get_all_workflows())} workflows "
|
||||
f"[{dirs_summary}]"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"✗ SemanticMatcher: {e}")
|
||||
matcher = None
|
||||
@@ -267,20 +286,55 @@ def api_status():
|
||||
|
||||
@app.route('/api/workflows')
|
||||
def api_workflows():
|
||||
"""Liste des workflows."""
|
||||
"""Liste des workflows (tous répertoires confondus)."""
|
||||
if not matcher:
|
||||
return jsonify({"workflows": []})
|
||||
|
||||
return jsonify({"workflows": [], "directories": []})
|
||||
|
||||
workflows = []
|
||||
for wf in matcher.get_all_workflows():
|
||||
workflows.append({
|
||||
"id": wf.workflow_id,
|
||||
"name": wf.name,
|
||||
"description": wf.description,
|
||||
"tags": wf.tags
|
||||
"tags": wf.tags,
|
||||
"source": wf.source_dir,
|
||||
})
|
||||
|
||||
return jsonify({"workflows": workflows})
|
||||
|
||||
return jsonify({
|
||||
"workflows": workflows,
|
||||
"directories": matcher.get_directories(),
|
||||
})
|
||||
|
||||
|
||||
@app.route('/api/workflows/refresh', methods=['POST'])
|
||||
def api_workflows_refresh():
|
||||
"""
|
||||
Forcer le rechargement des workflows depuis tous les répertoires.
|
||||
|
||||
Utile après qu'un nouveau workflow a été appris par le StreamProcessor.
|
||||
"""
|
||||
if not matcher:
|
||||
return jsonify({"success": False, "error": "SemanticMatcher non initialisé"})
|
||||
|
||||
try:
|
||||
count = matcher.reload_workflows()
|
||||
|
||||
# Re-injecter les workflows dans l'intent_parser (contexte LLM)
|
||||
if intent_parser:
|
||||
workflows_for_llm = [
|
||||
{"name": wf.name, "description": wf.description, "tags": wf.tags}
|
||||
for wf in matcher.get_all_workflows()
|
||||
]
|
||||
intent_parser.set_workflows(workflows_for_llm)
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"workflows_count": count,
|
||||
"directories": matcher.get_directories(),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur rechargement workflows: {e}")
|
||||
return jsonify({"success": False, "error": str(e)})
|
||||
|
||||
|
||||
@app.route('/api/search', methods=['POST'])
|
||||
@@ -893,12 +947,79 @@ def handle_cancel():
|
||||
# Exécution de workflow
|
||||
# =============================================================================
|
||||
|
||||
def _try_streaming_server_replay(workflow_id: str, params: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Tenter d'exécuter un workflow via le streaming server (Agent V1).
|
||||
|
||||
POST http://localhost:5005/api/v1/traces/stream/replay
|
||||
avec workflow_id et params.
|
||||
|
||||
Returns:
|
||||
Réponse du serveur si succès, None si indisponible ou erreur.
|
||||
"""
|
||||
try:
|
||||
resp = http_requests.post(
|
||||
f"{STREAMING_SERVER_URL}/api/v1/traces/stream/replay",
|
||||
json={
|
||||
"workflow_id": workflow_id,
|
||||
"session_id": f"chat_{datetime.now().strftime('%H%M%S')}",
|
||||
"params": params or {},
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
logger.info(f"Workflow {workflow_id} envoyé au streaming server: {data}")
|
||||
return data
|
||||
else:
|
||||
logger.debug(
|
||||
f"Streaming server refus (HTTP {resp.status_code}): "
|
||||
f"{resp.text[:200]}"
|
||||
)
|
||||
except http_requests.ConnectionError:
|
||||
logger.debug("Streaming server non disponible (connexion refusée)")
|
||||
except http_requests.Timeout:
|
||||
logger.debug("Streaming server timeout")
|
||||
except Exception as e:
|
||||
logger.debug(f"Erreur streaming server: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def execute_workflow(match, params):
|
||||
"""Exécuter un workflow avec le vrai système d'exécution."""
|
||||
"""
|
||||
Exécuter un workflow — tente d'abord le streaming server,
|
||||
puis fallback sur l'exécution locale.
|
||||
"""
|
||||
global execution_status
|
||||
|
||||
import time
|
||||
|
||||
# Tenter l'exécution via le streaming server (Agent V1 distant)
|
||||
replay_result = _try_streaming_server_replay(match.workflow_id, params)
|
||||
if replay_result:
|
||||
# Le streaming server a accepté le replay
|
||||
execution_status["running"] = True
|
||||
execution_status["workflow"] = match.workflow_name
|
||||
execution_status["progress"] = 50
|
||||
execution_status["message"] = "Envoyé au streaming server (Agent V1)"
|
||||
|
||||
socketio.emit('execution_progress', {
|
||||
"progress": 50,
|
||||
"step": "Exécution via streaming server...",
|
||||
"current": 1,
|
||||
"total": 1,
|
||||
})
|
||||
|
||||
finish_execution(
|
||||
match.workflow_name, True,
|
||||
f"Workflow envoyé au streaming server ({replay_result.get('status', 'ok')})"
|
||||
)
|
||||
return
|
||||
|
||||
# Fallback : exécution locale
|
||||
logger.info("Streaming server indisponible, exécution locale")
|
||||
|
||||
try:
|
||||
# Charger le workflow
|
||||
with open(match.workflow_path, 'r') as f:
|
||||
@@ -1257,10 +1378,10 @@ if __name__ == '__main__':
|
||||
╔════════════════════════════════════════════════════════════╗
|
||||
║ RPA Vision V3 - Interface de Commande ║
|
||||
║ ║
|
||||
║ 🌐 http://localhost:5002 ║
|
||||
║ 🌐 http://localhost:5004 ║
|
||||
║ ║
|
||||
║ Ctrl+C pour arrêter ║
|
||||
╚════════════════════════════════════════════════════════════╝
|
||||
""")
|
||||
|
||||
socketio.run(app, host='127.0.0.1', port=5002, debug=False, allow_unsafe_werkzeug=True)
|
||||
socketio.run(app, host='127.0.0.1', port=5004, debug=False, allow_unsafe_werkzeug=True)
|
||||
|
||||
Reference in New Issue
Block a user