Files
rpa_vision_v3/agent_chat/app.py
Dom c6a857b96b Refactor: Renommer command_interface en agent_chat
- command_interface/ → agent_chat/
- Mise à jour run.sh (--chat au lieu de --command)
- Mise à jour documentation

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-15 15:13:26 +01:00

479 lines
14 KiB
Python

#!/usr/bin/env python3
"""
RPA Vision V3 - Interface de Commande Web
Interface web légère pour communiquer avec le système RPA.
Style "Spotlight/Alfred" - minimaliste et efficace.
Usage:
python command_interface/app.py
Puis ouvrir: http://localhost:5002
"""
import asyncio
import json
import logging
import sys
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, List, Optional
from flask import Flask, render_template, request, jsonify
from flask_socketio import SocketIO, emit
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from core.workflow import SemanticMatcher, VariableManager
# GPU Resource Manager (optional)
try:
from core.gpu import get_gpu_resource_manager, ExecutionMode
GPU_AVAILABLE = True
except ImportError:
GPU_AVAILABLE = False
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'rpa-vision-v3-secret'
socketio = SocketIO(app, cors_allowed_origins="*")
# Global state
matcher: Optional[SemanticMatcher] = None
gpu_manager = None
execution_status = {
"running": False,
"workflow": None,
"progress": 0,
"message": "",
"can_minimize": True
}
command_history: List[Dict[str, Any]] = []
def init_system():
"""Initialiser tous les composants du système."""
global matcher, gpu_manager
# 1. SemanticMatcher
try:
matcher = SemanticMatcher("data/workflows")
logger.info(f"✓ SemanticMatcher: {len(matcher.get_all_workflows())} workflows")
except Exception as e:
logger.error(f"✗ SemanticMatcher: {e}")
matcher = None
# 2. GPU Resource Manager
if GPU_AVAILABLE:
try:
gpu_manager = get_gpu_resource_manager()
logger.info("✓ GPU Resource Manager connected")
except Exception as e:
logger.warning(f"⚠ GPU Resource Manager: {e}")
gpu_manager = None
# =============================================================================
# Routes Web
# =============================================================================
@app.route('/')
def index():
"""Page principale."""
return render_template('command.html')
@app.route('/api/status')
def api_status():
"""Statut complet du système."""
workflows_count = len(matcher.get_all_workflows()) if matcher else 0
# GPU Status
gpu_status = None
if gpu_manager:
try:
status = gpu_manager.get_status()
gpu_status = {
"mode": status.execution_mode.value,
"vlm_state": status.vlm_state.value,
"vlm_model": status.vlm_model,
"clip_device": status.clip_device,
"degraded": status.degraded_mode
}
if status.vram:
gpu_status["vram"] = {
"used_mb": status.vram.used_mb,
"total_mb": status.vram.total_mb,
"percent": round(status.vram.used_mb / status.vram.total_mb * 100, 1) if status.vram.total_mb > 0 else 0
}
except Exception as e:
logger.warning(f"GPU status error: {e}")
# Ollama Status
ollama_status = None
try:
import requests
response = requests.get("http://localhost:11434/api/tags", timeout=2)
if response.status_code == 200:
models = response.json().get('models', [])
ollama_status = {
"available": True,
"models_count": len(models)
}
except:
ollama_status = {"available": False}
return jsonify({
"status": "online",
"workflows_count": workflows_count,
"execution": execution_status,
"gpu": gpu_status,
"ollama": ollama_status
})
@app.route('/api/workflows')
def api_workflows():
"""Liste des workflows."""
if not matcher:
return jsonify({"workflows": []})
workflows = []
for wf in matcher.get_all_workflows():
workflows.append({
"id": wf.workflow_id,
"name": wf.name,
"description": wf.description,
"tags": wf.tags
})
return jsonify({"workflows": workflows})
@app.route('/api/search', methods=['POST'])
def api_search():
"""Rechercher des workflows."""
data = request.json
query = data.get('query', '')
if not matcher or not query:
return jsonify({"matches": []})
matches = matcher.find_workflows(query, limit=5, min_confidence=0.2)
results = []
for m in matches:
results.append({
"workflow_id": m.workflow_id,
"workflow_name": m.workflow_name,
"confidence": m.confidence,
"extracted_params": m.extracted_params,
"match_reason": m.match_reason
})
return jsonify({"matches": results})
@app.route('/api/execute', methods=['POST'])
def api_execute():
"""Exécuter une commande."""
global execution_status
data = request.json
command = data.get('command', '')
params = data.get('params', {})
if not matcher or not command:
return jsonify({"success": False, "error": "Invalid command"})
# Trouver le workflow
match = matcher.find_workflow(command, min_confidence=0.2)
if not match:
return jsonify({
"success": False,
"error": "Aucun workflow correspondant trouvé"
})
# Combiner les paramètres
all_params = {**match.extracted_params, **params}
# Enregistrer dans l'historique
command_history.append({
"timestamp": datetime.now().isoformat(),
"command": command,
"workflow": match.workflow_name,
"params": all_params,
"status": "started"
})
# Mettre à jour le statut
execution_status = {
"running": True,
"workflow": match.workflow_name,
"progress": 0,
"message": "Démarrage..."
}
# Notifier via WebSocket
socketio.emit('execution_started', {
"workflow": match.workflow_name,
"params": all_params
})
# Exécuter le workflow en arrière-plan
socketio.start_background_task(execute_workflow, match, all_params)
return jsonify({
"success": True,
"workflow": match.workflow_name,
"params": all_params,
"confidence": match.confidence
})
@app.route('/api/history')
def api_history():
"""Historique des commandes."""
return jsonify({"history": command_history[-20:]})
@app.route('/api/gpu/<action>', methods=['POST'])
def api_gpu_action(action):
"""Contrôler le GPU Resource Manager."""
if not gpu_manager:
return jsonify({"success": False, "error": "GPU Manager non disponible"})
async def do_action():
if action == "load-vlm":
return await gpu_manager.ensure_vlm_loaded()
elif action == "unload-vlm":
return await gpu_manager.ensure_vlm_unloaded()
elif action == "recording":
await gpu_manager.set_execution_mode(ExecutionMode.RECORDING)
return True
elif action == "autopilot":
await gpu_manager.set_execution_mode(ExecutionMode.AUTOPILOT)
return True
elif action == "idle":
await gpu_manager.set_execution_mode(ExecutionMode.IDLE)
return True
return False
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(do_action())
loop.close()
return jsonify({"success": result, "action": action})
except Exception as e:
return jsonify({"success": False, "error": str(e)})
@app.route('/api/help')
def api_help():
"""Aide et mode d'emploi."""
help_content = {
"title": "RPA Vision V3 - Mode d'emploi",
"sections": [
{
"title": "🎯 Commandes en langage naturel",
"content": """
Tapez simplement ce que vous voulez faire en français ou anglais.
Le système trouvera automatiquement le workflow correspondant.
**Exemples :**
- "facturer le client Acme"
- "exporter le rapport en PDF"
- "créer une facture pour Client ABC"
- "facturer les clients de A à Z"
"""
},
{
"title": "📋 Paramètres",
"content": """
Les paramètres sont extraits automatiquement de votre commande.
Vous pouvez aussi les spécifier manuellement dans le formulaire.
**Paramètres courants :**
- `client` : Nom du client
- `format` : Format d'export (pdf, excel)
- `start`, `end` : Plage de valeurs
"""
},
{
"title": "⌨️ Raccourcis clavier",
"content": """
- `Entrée` : Exécuter la commande
- `Échap` : Annuler / Fermer
- `↑` / `↓` : Naviguer dans l'historique
- `Ctrl+M` : Minimiser l'interface
"""
},
{
"title": "🔄 Pendant l'exécution",
"content": """
L'interface peut être minimisée pendant l'exécution.
Le workflow s'exécute en arrière-plan.
Vous serez notifié à la fin de l'exécution.
"""
}
]
}
return jsonify(help_content)
# =============================================================================
# WebSocket Events
# =============================================================================
@socketio.on('connect')
def handle_connect():
"""Client connecté."""
logger.info("Client connected")
emit('status', execution_status)
@socketio.on('disconnect')
def handle_disconnect():
"""Client déconnecté."""
logger.info("Client disconnected")
@socketio.on('cancel_execution')
def handle_cancel():
"""Annuler l'exécution."""
global execution_status
execution_status["running"] = False
execution_status["message"] = "Annulé"
emit('execution_cancelled', {}, broadcast=True)
# =============================================================================
# Exécution de workflow
# =============================================================================
def execute_workflow(match, params):
"""Exécuter un workflow avec le vrai système."""
global execution_status
import time
try:
# Charger le workflow
with open(match.workflow_path, 'r') as f:
workflow_data = json.load(f)
# Créer le VariableManager et injecter les paramètres
var_manager = VariableManager()
var_manager.set_variables(params)
# Substituer les variables
workflow_data = var_manager.substitute_dict(workflow_data)
# Obtenir les étapes (edges)
edges = workflow_data.get("edges", [])
total_steps = len(edges) if edges else 5
# Étape 1: Initialisation
update_progress(10, "Initialisation", 1, total_steps + 2)
time.sleep(0.5)
# Étape 2: Préparation GPU (si disponible)
if gpu_manager and GPU_AVAILABLE:
update_progress(20, "Préparation GPU...", 2, total_steps + 2)
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(gpu_manager.set_execution_mode(ExecutionMode.AUTOPILOT))
loop.close()
except Exception as e:
logger.warning(f"GPU mode change failed: {e}")
# Exécuter chaque étape du workflow
for i, edge in enumerate(edges):
if not execution_status["running"]:
break
action = edge.get("action", {})
action_type = action.get("type", "unknown")
progress = int(20 + (i + 1) / total_steps * 70)
step_name = f"Étape {i+1}: {action_type}"
update_progress(progress, step_name, i + 3, total_steps + 2)
# Simuler l'exécution de l'action
# TODO: Connecter au vrai ActionExecutor
time.sleep(0.8)
# Finalisation
update_progress(95, "Finalisation...", total_steps + 1, total_steps + 2)
time.sleep(0.3)
# Terminé avec succès
finish_execution(match.workflow_name, True, "Workflow terminé avec succès")
except Exception as e:
logger.error(f"Execution error: {e}")
finish_execution(match.workflow_name, False, f"Erreur: {str(e)}")
def update_progress(progress: int, message: str, current: int, total: int):
"""Mettre à jour la progression."""
global execution_status
execution_status["progress"] = progress
execution_status["message"] = message
socketio.emit('execution_progress', {
"progress": progress,
"step": message,
"current": current,
"total": total
})
def finish_execution(workflow_name: str, success: bool, message: str):
"""Terminer l'exécution."""
global execution_status
execution_status["running"] = False
execution_status["progress"] = 100 if success else 0
execution_status["message"] = message
# Mettre à jour l'historique
if command_history:
command_history[-1]["status"] = "completed" if success else "failed"
socketio.emit('execution_completed', {
"workflow": workflow_name,
"success": success,
"message": message
})
# =============================================================================
# Main
# =============================================================================
if __name__ == '__main__':
init_system()
print("""
╔════════════════════════════════════════════════════════════╗
║ RPA Vision V3 - Interface de Commande ║
║ ║
║ 🌐 http://localhost:5002 ║
║ ║
║ Ctrl+C pour arrêter ║
╚════════════════════════════════════════════════════════════╝
""")
socketio.run(app, host='127.0.0.1', port=5002, debug=False, allow_unsafe_werkzeug=True)