v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
43
visual_workflow_builder/backend/api/__init__.py
Normal file
43
visual_workflow_builder/backend/api/__init__.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
API package for Visual Workflow Builder
|
||||
|
||||
This package contains all REST API endpoints and WebSocket handlers.
|
||||
"""
|
||||
|
||||
from .workflows import workflows_bp
|
||||
from .errors import (
|
||||
APIError,
|
||||
ValidationError,
|
||||
NotFoundError,
|
||||
BadRequestError,
|
||||
ConflictError,
|
||||
InternalServerError,
|
||||
error_response,
|
||||
ErrorCode
|
||||
)
|
||||
from .validation import (
|
||||
validate_workflow_data,
|
||||
validate_update_data,
|
||||
validate_node_data,
|
||||
validate_edge_data,
|
||||
validate_variable_data,
|
||||
validate_settings_data
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'workflows_bp',
|
||||
'APIError',
|
||||
'ValidationError',
|
||||
'NotFoundError',
|
||||
'BadRequestError',
|
||||
'ConflictError',
|
||||
'InternalServerError',
|
||||
'error_response',
|
||||
'ErrorCode',
|
||||
'validate_workflow_data',
|
||||
'validate_update_data',
|
||||
'validate_node_data',
|
||||
'validate_edge_data',
|
||||
'validate_variable_data',
|
||||
'validate_settings_data'
|
||||
]
|
||||
426
visual_workflow_builder/backend/api/analytics.py
Normal file
426
visual_workflow_builder/backend/api/analytics.py
Normal file
@@ -0,0 +1,426 @@
|
||||
"""
|
||||
Analytics API endpoints for Visual Workflow Builder.
|
||||
|
||||
Provides analytics data and metrics for workflows executed through the visual builder.
|
||||
|
||||
Exigence: 18.3
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from flask import Blueprint, request, jsonify
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
# Ajouter le chemin racine pour importer les modules core
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
||||
|
||||
try:
|
||||
from core.analytics.analytics_system import get_analytics_system
|
||||
from core.analytics.integration.execution_integration import get_analytics_integration
|
||||
ANALYTICS_AVAILABLE = True
|
||||
except ImportError:
|
||||
ANALYTICS_AVAILABLE = False
|
||||
|
||||
from services.execution_integration import get_executor
|
||||
from services.serialization import WorkflowDatabase
|
||||
|
||||
# Blueprint pour les endpoints Analytics
|
||||
analytics_bp = Blueprint('analytics', __name__)
|
||||
|
||||
|
||||
@analytics_bp.route('/workflow/<workflow_id>/metrics', methods=['GET'])
|
||||
def get_workflow_metrics(workflow_id: str):
|
||||
"""
|
||||
Récupère les métriques d'un workflow.
|
||||
|
||||
Exigence: 18.3
|
||||
|
||||
Query Parameters:
|
||||
- hours: Fenêtre de temps en heures (défaut: 24)
|
||||
- metric_type: Type de métrique (execution, step, performance)
|
||||
"""
|
||||
try:
|
||||
if not ANALYTICS_AVAILABLE:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Analytics system not available'
|
||||
}), 503
|
||||
|
||||
hours = int(request.args.get('hours', 24))
|
||||
metric_type = request.args.get('metric_type', 'execution')
|
||||
|
||||
# Récupérer les métriques via l'exécuteur
|
||||
executor = get_executor()
|
||||
analytics_data = executor.get_workflow_analytics(workflow_id, hours)
|
||||
|
||||
if analytics_data is None:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'No analytics data available'
|
||||
}), 404
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflow_id': workflow_id,
|
||||
'time_window_hours': hours,
|
||||
'metric_type': metric_type,
|
||||
'data': analytics_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@analytics_bp.route('/workflow/<workflow_id>/performance', methods=['GET'])
|
||||
def get_workflow_performance(workflow_id: str):
|
||||
"""
|
||||
Récupère les métriques de performance d'un workflow.
|
||||
|
||||
Exigence: 18.3
|
||||
"""
|
||||
try:
|
||||
if not ANALYTICS_AVAILABLE:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Analytics system not available'
|
||||
}), 503
|
||||
|
||||
hours = int(request.args.get('hours', 24))
|
||||
|
||||
analytics_system = get_analytics_system()
|
||||
|
||||
# Calculer la fenêtre de temps
|
||||
end_time = datetime.now()
|
||||
start_time = end_time - timedelta(hours=hours)
|
||||
|
||||
# Analyser les performances
|
||||
performance_stats = analytics_system.performance_analyzer.analyze_performance(
|
||||
workflow_id=workflow_id,
|
||||
start_time=start_time,
|
||||
end_time=end_time
|
||||
)
|
||||
|
||||
# Calculer le taux de succès
|
||||
success_stats = analytics_system.success_rate_calculator.calculate_success_rate(
|
||||
workflow_id=workflow_id,
|
||||
time_window_hours=hours
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflow_id': workflow_id,
|
||||
'time_window_hours': hours,
|
||||
'performance': performance_stats.to_dict() if performance_stats else None,
|
||||
'success_rate': success_stats.to_dict() if success_stats else None
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@analytics_bp.route('/workflow/<workflow_id>/executions', methods=['GET'])
|
||||
def get_workflow_executions(workflow_id: str):
|
||||
"""
|
||||
Récupère l'historique des exécutions d'un workflow.
|
||||
|
||||
Exigence: 18.3
|
||||
"""
|
||||
try:
|
||||
executor = get_executor()
|
||||
executions = executor.list_executions(workflow_id=workflow_id)
|
||||
|
||||
# Ajouter des métriques calculées
|
||||
for execution in executions:
|
||||
if execution.get('analytics_data'):
|
||||
# Enrichir avec des métriques calculées
|
||||
execution['calculated_metrics'] = _calculate_execution_metrics(execution)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflow_id': workflow_id,
|
||||
'executions': executions,
|
||||
'total_count': len(executions)
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@analytics_bp.route('/dashboard/workflows', methods=['GET'])
|
||||
def get_workflows_dashboard():
|
||||
"""
|
||||
Récupère les données du dashboard pour tous les workflows.
|
||||
|
||||
Exigence: 18.3
|
||||
"""
|
||||
try:
|
||||
if not ANALYTICS_AVAILABLE:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Analytics system not available'
|
||||
}), 503
|
||||
|
||||
hours = int(request.args.get('hours', 24))
|
||||
|
||||
# Récupérer tous les workflows
|
||||
try:
|
||||
db = WorkflowDatabase()
|
||||
all_workflows = db.list_workflows()
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Database error: {str(e)}'
|
||||
}), 500
|
||||
|
||||
dashboard_data = {
|
||||
'summary': {
|
||||
'total_workflows': len(all_workflows),
|
||||
'time_window_hours': hours,
|
||||
'generated_at': datetime.now().isoformat()
|
||||
},
|
||||
'workflows': []
|
||||
}
|
||||
|
||||
try:
|
||||
analytics_system = get_analytics_system()
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Analytics system error: {str(e)}'
|
||||
}), 503
|
||||
|
||||
# Collecter les métriques pour chaque workflow
|
||||
for workflow_info in all_workflows:
|
||||
workflow_id = workflow_info['workflow_id']
|
||||
|
||||
try:
|
||||
# Métriques de performance
|
||||
end_time = datetime.now()
|
||||
start_time = end_time - timedelta(hours=hours)
|
||||
|
||||
performance_stats = analytics_system.performance_analyzer.analyze_performance(
|
||||
workflow_id=workflow_id,
|
||||
start_time=start_time,
|
||||
end_time=end_time
|
||||
)
|
||||
|
||||
success_stats = analytics_system.success_rate_calculator.calculate_success_rate(
|
||||
workflow_id=workflow_id,
|
||||
time_window_hours=hours
|
||||
)
|
||||
|
||||
# Exécutions récentes
|
||||
executor = get_executor()
|
||||
recent_executions = executor.list_executions(workflow_id=workflow_id)[:5] # 5 plus récentes
|
||||
|
||||
workflow_metrics = {
|
||||
'workflow_id': workflow_id,
|
||||
'name': workflow_info.get('name', 'Unnamed Workflow'),
|
||||
'performance': performance_stats.to_dict() if performance_stats else None,
|
||||
'success_rate': success_stats.to_dict() if success_stats else None,
|
||||
'recent_executions': recent_executions,
|
||||
'last_execution': recent_executions[0] if recent_executions else None
|
||||
}
|
||||
|
||||
dashboard_data['workflows'].append(workflow_metrics)
|
||||
|
||||
except Exception as e:
|
||||
# Continuer même si un workflow échoue
|
||||
dashboard_data['workflows'].append({
|
||||
'workflow_id': workflow_id,
|
||||
'name': workflow_info.get('name', 'Unnamed Workflow'),
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'dashboard': dashboard_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@analytics_bp.route('/dashboard/summary', methods=['GET'])
|
||||
def get_dashboard_summary():
|
||||
"""
|
||||
Récupère un résumé global des métriques.
|
||||
|
||||
Exigence: 18.3
|
||||
"""
|
||||
try:
|
||||
if not ANALYTICS_AVAILABLE:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Analytics system not available'
|
||||
}), 503
|
||||
|
||||
hours = int(request.args.get('hours', 24))
|
||||
|
||||
analytics_system = get_analytics_system()
|
||||
executor = get_executor()
|
||||
|
||||
# Statistiques globales
|
||||
end_time = datetime.now()
|
||||
start_time = end_time - timedelta(hours=hours)
|
||||
|
||||
# Compter les exécutions totales
|
||||
all_executions = executor.list_executions()
|
||||
recent_executions = [
|
||||
exec for exec in all_executions
|
||||
if exec.get('start_time') and
|
||||
datetime.fromisoformat(exec['start_time']) >= start_time
|
||||
]
|
||||
|
||||
successful_executions = [
|
||||
exec for exec in recent_executions
|
||||
if exec.get('status') == 'completed'
|
||||
]
|
||||
|
||||
failed_executions = [
|
||||
exec for exec in recent_executions
|
||||
if exec.get('status') == 'failed'
|
||||
]
|
||||
|
||||
# Calculer les métriques de résumé
|
||||
total_executions = len(recent_executions)
|
||||
success_rate = (len(successful_executions) / total_executions * 100) if total_executions > 0 else 0
|
||||
|
||||
# Durée moyenne
|
||||
durations = [
|
||||
exec.get('duration_ms', 0) for exec in successful_executions
|
||||
if exec.get('duration_ms')
|
||||
]
|
||||
avg_duration = sum(durations) / len(durations) if durations else 0
|
||||
|
||||
summary = {
|
||||
'time_window_hours': hours,
|
||||
'total_executions': total_executions,
|
||||
'successful_executions': len(successful_executions),
|
||||
'failed_executions': len(failed_executions),
|
||||
'success_rate_percent': round(success_rate, 2),
|
||||
'average_duration_ms': round(avg_duration, 2),
|
||||
'generated_at': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'summary': summary
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@analytics_bp.route('/insights', methods=['GET'])
|
||||
def get_analytics_insights():
|
||||
"""
|
||||
Récupère les insights Analytics générés automatiquement.
|
||||
|
||||
Exigence: 18.3
|
||||
"""
|
||||
try:
|
||||
if not ANALYTICS_AVAILABLE:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Analytics system not available'
|
||||
}), 503
|
||||
|
||||
hours = int(request.args.get('hours', 168)) # 1 semaine par défaut
|
||||
|
||||
try:
|
||||
analytics_system = get_analytics_system()
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Analytics system error: {str(e)}'
|
||||
}), 503
|
||||
|
||||
end_time = datetime.now()
|
||||
start_time = end_time - timedelta(hours=hours)
|
||||
|
||||
# Générer les insights
|
||||
try:
|
||||
insights = analytics_system.insight_generator.generate_insights(
|
||||
start_time=start_time,
|
||||
end_time=end_time
|
||||
)
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Insights generation error: {str(e)}'
|
||||
}), 500
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'time_window_hours': hours,
|
||||
'insights': [insight.to_dict() for insight in insights],
|
||||
'generated_at': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
def _calculate_execution_metrics(execution: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Calcule des métriques supplémentaires pour une exécution.
|
||||
|
||||
Args:
|
||||
execution: Données d'exécution
|
||||
|
||||
Returns:
|
||||
Métriques calculées
|
||||
"""
|
||||
metrics = {}
|
||||
|
||||
try:
|
||||
# Efficacité (steps completed / steps total)
|
||||
steps_completed = execution.get('steps_completed', 0)
|
||||
steps_total = execution.get('steps_total', 0)
|
||||
if steps_total > 0:
|
||||
metrics['efficiency_percent'] = round((steps_completed / steps_total) * 100, 2)
|
||||
|
||||
# Vitesse (steps par seconde)
|
||||
duration_ms = execution.get('duration_ms', 0)
|
||||
if duration_ms > 0 and steps_completed > 0:
|
||||
duration_sec = duration_ms / 1000
|
||||
metrics['steps_per_second'] = round(steps_completed / duration_sec, 2)
|
||||
|
||||
# Statut de santé
|
||||
if execution.get('status') == 'completed':
|
||||
metrics['health_status'] = 'healthy'
|
||||
elif execution.get('status') == 'failed':
|
||||
metrics['health_status'] = 'unhealthy'
|
||||
else:
|
||||
metrics['health_status'] = 'unknown'
|
||||
|
||||
except Exception as e:
|
||||
metrics['calculation_error'] = str(e)
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
# Fonction pour enregistrer le blueprint
|
||||
def register_analytics_blueprint(app):
|
||||
"""Enregistre le blueprint Analytics dans l'application Flask."""
|
||||
app.register_blueprint(analytics_bp)
|
||||
243
visual_workflow_builder/backend/api/anchor_images.py
Normal file
243
visual_workflow_builder/backend/api/anchor_images.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""
|
||||
API REST pour la gestion des images d'ancres visuelles.
|
||||
|
||||
Auteur : Dom, Alice, Kiro - 21 janvier 2026
|
||||
|
||||
Endpoints :
|
||||
POST /api/anchor-images - Upload d'une image d'ancre
|
||||
GET /api/anchor-images/{id}/thumbnail - Récupérer la miniature
|
||||
GET /api/anchor-images/{id}/original - Récupérer l'image originale
|
||||
GET /api/anchor-images/{id}/metadata - Récupérer les métadonnées
|
||||
DELETE /api/anchor-images/{id} - Supprimer une ancre
|
||||
GET /api/anchor-images - Lister les ancres
|
||||
GET /api/anchor-images/stats - Statistiques de stockage
|
||||
"""
|
||||
|
||||
from flask import Blueprint, request, jsonify, send_file, abort
|
||||
from services.anchor_image_service import (
|
||||
save_anchor_image,
|
||||
get_thumbnail_path,
|
||||
get_original_path,
|
||||
get_anchor_metadata,
|
||||
delete_anchor_image,
|
||||
list_anchor_images,
|
||||
get_storage_stats,
|
||||
)
|
||||
|
||||
anchor_images_bp = Blueprint('anchor_images', __name__)
|
||||
|
||||
|
||||
@anchor_images_bp.route('/api/anchor-images', methods=['POST'])
|
||||
def upload_anchor_image():
|
||||
"""
|
||||
Upload d'une image d'ancre visuelle.
|
||||
|
||||
Body JSON attendu:
|
||||
{
|
||||
"image_base64": "data:image/png;base64,...",
|
||||
"bounding_box": {"x": 100, "y": 200, "width": 150, "height": 50},
|
||||
"anchor_id": "anchor_xxx" (optionnel),
|
||||
"metadata": {...} (optionnel)
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"success": true,
|
||||
"anchor_id": "anchor_xxx",
|
||||
"thumbnail_url": "/api/anchor-images/anchor_xxx/thumbnail",
|
||||
"original_url": "/api/anchor-images/anchor_xxx/original",
|
||||
"metadata": {...}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Données JSON requises'
|
||||
}), 400
|
||||
|
||||
image_base64 = data.get('image_base64')
|
||||
bounding_box = data.get('bounding_box')
|
||||
|
||||
if not image_base64:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'image_base64 est requis'
|
||||
}), 400
|
||||
|
||||
if not bounding_box:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'bounding_box est requis'
|
||||
}), 400
|
||||
|
||||
# Valider le bounding_box
|
||||
required_keys = ['x', 'y', 'width', 'height']
|
||||
for key in required_keys:
|
||||
if key not in bounding_box:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'bounding_box.{key} est requis'
|
||||
}), 400
|
||||
|
||||
anchor_id = data.get('anchor_id')
|
||||
metadata = data.get('metadata')
|
||||
|
||||
result = save_anchor_image(anchor_id, image_base64, bounding_box, metadata)
|
||||
|
||||
return jsonify(result), 201
|
||||
|
||||
except ValueError as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 400
|
||||
except Exception as e:
|
||||
print(f"❌ Erreur upload anchor image: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Erreur serveur: {str(e)}'
|
||||
}), 500
|
||||
|
||||
|
||||
@anchor_images_bp.route('/api/anchor-images/<anchor_id>/thumbnail', methods=['GET'])
|
||||
def get_thumbnail(anchor_id: str):
|
||||
"""
|
||||
Récupérer la miniature d'une ancre.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
Image JPEG (fichier binaire)
|
||||
"""
|
||||
path = get_thumbnail_path(anchor_id)
|
||||
|
||||
if not path:
|
||||
abort(404, description=f"Ancre '{anchor_id}' non trouvée")
|
||||
|
||||
return send_file(
|
||||
path,
|
||||
mimetype='image/jpeg',
|
||||
as_attachment=False,
|
||||
download_name=f'{anchor_id}_thumbnail.jpg'
|
||||
)
|
||||
|
||||
|
||||
@anchor_images_bp.route('/api/anchor-images/<anchor_id>/original', methods=['GET'])
|
||||
def get_original(anchor_id: str):
|
||||
"""
|
||||
Récupérer l'image originale d'une ancre.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
Image PNG (fichier binaire)
|
||||
"""
|
||||
path = get_original_path(anchor_id)
|
||||
|
||||
if not path:
|
||||
abort(404, description=f"Ancre '{anchor_id}' non trouvée")
|
||||
|
||||
return send_file(
|
||||
path,
|
||||
mimetype='image/png',
|
||||
as_attachment=False,
|
||||
download_name=f'{anchor_id}_original.png'
|
||||
)
|
||||
|
||||
|
||||
@anchor_images_bp.route('/api/anchor-images/<anchor_id>/metadata', methods=['GET'])
|
||||
def get_metadata(anchor_id: str):
|
||||
"""
|
||||
Récupérer les métadonnées d'une ancre.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
JSON avec les métadonnées
|
||||
"""
|
||||
metadata = get_anchor_metadata(anchor_id)
|
||||
|
||||
if not metadata:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f"Ancre '{anchor_id}' non trouvée"
|
||||
}), 404
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'metadata': metadata
|
||||
})
|
||||
|
||||
|
||||
@anchor_images_bp.route('/api/anchor-images/<anchor_id>', methods=['DELETE'])
|
||||
def delete_anchor(anchor_id: str):
|
||||
"""
|
||||
Supprimer une ancre et ses fichiers associés.
|
||||
|
||||
Args:
|
||||
anchor_id: ID de l'ancre
|
||||
|
||||
Returns:
|
||||
JSON de confirmation
|
||||
"""
|
||||
deleted = delete_anchor_image(anchor_id)
|
||||
|
||||
if not deleted:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f"Ancre '{anchor_id}' non trouvée"
|
||||
}), 404
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': f"Ancre '{anchor_id}' supprimée"
|
||||
})
|
||||
|
||||
|
||||
@anchor_images_bp.route('/api/anchor-images', methods=['GET'])
|
||||
def list_anchors():
|
||||
"""
|
||||
Lister toutes les images d'ancres stockées.
|
||||
|
||||
Query params:
|
||||
limit: Nombre maximum d'ancres à retourner (défaut: 100)
|
||||
offset: Décalage pour la pagination (défaut: 0)
|
||||
|
||||
Returns:
|
||||
JSON avec la liste des ancres
|
||||
"""
|
||||
limit = request.args.get('limit', 100, type=int)
|
||||
offset = request.args.get('offset', 0, type=int)
|
||||
|
||||
all_anchors = list_anchor_images()
|
||||
paginated = all_anchors[offset:offset + limit]
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'anchors': paginated,
|
||||
'total': len(all_anchors),
|
||||
'limit': limit,
|
||||
'offset': offset
|
||||
})
|
||||
|
||||
|
||||
@anchor_images_bp.route('/api/anchor-images/stats', methods=['GET'])
|
||||
def storage_stats():
|
||||
"""
|
||||
Obtenir les statistiques de stockage.
|
||||
|
||||
Returns:
|
||||
JSON avec les statistiques
|
||||
"""
|
||||
stats = get_storage_stats()
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'stats': stats
|
||||
})
|
||||
444
visual_workflow_builder/backend/api/element_detection.py
Normal file
444
visual_workflow_builder/backend/api/element_detection.py
Normal file
@@ -0,0 +1,444 @@
|
||||
"""
|
||||
API endpoints pour la détection d'éléments UI
|
||||
Intégration avec le UIDetector du core RPA Vision V3
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from flask import Blueprint, request, jsonify
|
||||
from typing import Dict, Any, List
|
||||
import asyncio
|
||||
import logging
|
||||
import base64
|
||||
from io import BytesIO
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add project root to path to import core modules (best-effort)
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
|
||||
|
||||
try:
|
||||
from core.detection.ui_detector import UIDetector
|
||||
from core.models import Point, BBox, UIElement
|
||||
CORE_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"Warning: Core modules not available (element_detection): {e}")
|
||||
CORE_AVAILABLE = False
|
||||
UIDetector = None # type: ignore
|
||||
Point = None # type: ignore
|
||||
BBox = None # type: ignore
|
||||
UIElement = None # type: ignore
|
||||
from core.capture.screen_capturer import ScreenCapturer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Créer le blueprint pour les endpoints de détection
|
||||
element_detection_bp = Blueprint('element_detection', __name__, url_prefix='/api/detection')
|
||||
|
||||
# Instance globale du UIDetector (sera initialisée dans app.py)
|
||||
ui_detector: UIDetector = None
|
||||
screen_capturer: ScreenCapturer = None
|
||||
|
||||
def init_element_detection(detector: UIDetector, capturer: ScreenCapturer):
|
||||
"""Initialise le UIDetector avec les dépendances"""
|
||||
global ui_detector, screen_capturer
|
||||
ui_detector = detector
|
||||
screen_capturer = capturer
|
||||
logger.info("UIDetector initialisé pour l'API")
|
||||
|
||||
@element_detection_bp.route('/elements', methods=['POST'])
|
||||
def detect_elements():
|
||||
"""
|
||||
Détecte les éléments UI dans une capture d'écran
|
||||
|
||||
Body:
|
||||
{
|
||||
"screenshot": "base64_image_data",
|
||||
"region": {"x": 0, "y": 0, "width": 1920, "height": 1080}, // optionnel
|
||||
"element_types": ["button", "input", "link"], // optionnel
|
||||
"confidence_threshold": 0.7 // optionnel
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"elements": [
|
||||
{
|
||||
"id": "element_1",
|
||||
"type": "button",
|
||||
"bounds": {"x": 100, "y": 200, "width": 80, "height": 30},
|
||||
"confidence": 0.95,
|
||||
"text": "Cliquer ici",
|
||||
"attributes": {
|
||||
"tag_name": "button",
|
||||
"class_name": "btn btn-primary",
|
||||
"is_clickable": true
|
||||
},
|
||||
"visual_features": {
|
||||
"color_dominant": "#007bff",
|
||||
"has_border": true,
|
||||
"has_shadow": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"processing_time": 1.23,
|
||||
"total_elements": 15,
|
||||
"filtered_elements": 5
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not ui_detector:
|
||||
return jsonify({'error': 'UIDetector non initialisé'}), 500
|
||||
|
||||
data = request.get_json()
|
||||
if not data or 'screenshot' not in data:
|
||||
return jsonify({'error': 'Screenshot requis'}), 400
|
||||
|
||||
# Décoder l'image base64
|
||||
try:
|
||||
screenshot_data = data['screenshot']
|
||||
if screenshot_data.startswith('data:image'):
|
||||
screenshot_data = screenshot_data.split(',')[1]
|
||||
|
||||
image_bytes = base64.b64decode(screenshot_data)
|
||||
image = Image.open(BytesIO(image_bytes))
|
||||
image_array = np.array(image)
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({'error': f'Erreur de décodage image: {str(e)}'}), 400
|
||||
|
||||
# Paramètres optionnels
|
||||
region = data.get('region')
|
||||
element_types = data.get('element_types', [])
|
||||
confidence_threshold = data.get('confidence_threshold', 0.7)
|
||||
|
||||
# Convertir la région si fournie
|
||||
detection_region = None
|
||||
if region:
|
||||
detection_region = BBox(
|
||||
x=region['x'],
|
||||
y=region['y'],
|
||||
width=region['width'],
|
||||
height=region['height']
|
||||
)
|
||||
|
||||
# Effectuer la détection (opération async)
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
detected_elements = loop.run_until_complete(
|
||||
ui_detector.detect_elements(
|
||||
image_array,
|
||||
region=detection_region,
|
||||
element_types=element_types if element_types else None
|
||||
)
|
||||
)
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
|
||||
# Filtrer par confiance
|
||||
filtered_elements = [
|
||||
elem for elem in detected_elements
|
||||
if elem.confidence >= confidence_threshold
|
||||
]
|
||||
|
||||
# Convertir en dictionnaires pour JSON
|
||||
elements_data = []
|
||||
for elem in filtered_elements:
|
||||
element_dict = {
|
||||
'id': f"element_{elem.bounds.x}_{elem.bounds.y}",
|
||||
'type': elem.element_type,
|
||||
'bounds': {
|
||||
'x': elem.bounds.x,
|
||||
'y': elem.bounds.y,
|
||||
'width': elem.bounds.width,
|
||||
'height': elem.bounds.height
|
||||
},
|
||||
'confidence': round(elem.confidence, 3),
|
||||
'text': elem.text_content or '',
|
||||
'attributes': elem.attributes or {},
|
||||
'visual_features': {
|
||||
'color_dominant': getattr(elem, 'dominant_color', '#000000'),
|
||||
'has_border': getattr(elem, 'has_border', False),
|
||||
'has_shadow': getattr(elem, 'has_shadow', False)
|
||||
}
|
||||
}
|
||||
elements_data.append(element_dict)
|
||||
|
||||
result = {
|
||||
'elements': elements_data,
|
||||
'processing_time': round(processing_time, 3),
|
||||
'total_elements': len(detected_elements),
|
||||
'filtered_elements': len(filtered_elements)
|
||||
}
|
||||
|
||||
logger.info(f"Détection terminée: {len(filtered_elements)} éléments trouvés en {processing_time:.2f}s")
|
||||
return jsonify(result), 200
|
||||
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Erreur de validation: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la détection: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@element_detection_bp.route('/element-at-position', methods=['POST'])
|
||||
def detect_element_at_position():
|
||||
"""
|
||||
Détecte l'élément UI à une position spécifique
|
||||
|
||||
Body:
|
||||
{
|
||||
"position": {"x": 100, "y": 200},
|
||||
"screenshot": "base64_image_data", // optionnel, capture automatique si absent
|
||||
"tolerance": 5 // optionnel, tolérance en pixels
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"element": {
|
||||
"id": "element_1",
|
||||
"type": "button",
|
||||
"bounds": {"x": 95, "y": 195, "width": 80, "height": 30},
|
||||
"confidence": 0.95,
|
||||
"text": "Cliquer ici",
|
||||
"attributes": {...},
|
||||
"visual_features": {...}
|
||||
},
|
||||
"processing_time": 0.45
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not ui_detector:
|
||||
return jsonify({'error': 'UIDetector non initialisé'}), 500
|
||||
|
||||
data = request.get_json()
|
||||
if not data or 'position' not in data:
|
||||
return jsonify({'error': 'Position requise'}), 400
|
||||
|
||||
position_data = data['position']
|
||||
if 'x' not in position_data or 'y' not in position_data:
|
||||
return jsonify({'error': 'Coordonnées x et y requises'}), 400
|
||||
|
||||
position = Point(x=position_data['x'], y=position_data['y'])
|
||||
tolerance = data.get('tolerance', 5)
|
||||
|
||||
# Obtenir l'image
|
||||
if 'screenshot' in data:
|
||||
# Utiliser l'image fournie
|
||||
try:
|
||||
screenshot_data = data['screenshot']
|
||||
if screenshot_data.startswith('data:image'):
|
||||
screenshot_data = screenshot_data.split(',')[1]
|
||||
|
||||
image_bytes = base64.b64decode(screenshot_data)
|
||||
image = Image.open(BytesIO(image_bytes))
|
||||
image_array = np.array(image)
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({'error': f'Erreur de décodage image: {str(e)}'}), 400
|
||||
else:
|
||||
# Capturer l'écran automatiquement
|
||||
if not screen_capturer:
|
||||
return jsonify({'error': 'ScreenCapturer non disponible'}), 500
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
screenshot_result = loop.run_until_complete(
|
||||
screen_capturer.capture_screen()
|
||||
)
|
||||
image_array = screenshot_result.image_array
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
# Effectuer la détection à la position
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
element = loop.run_until_complete(
|
||||
ui_detector.detect_element_at_position(
|
||||
image_array,
|
||||
position,
|
||||
tolerance=tolerance
|
||||
)
|
||||
)
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
|
||||
if not element:
|
||||
return jsonify({
|
||||
'element': None,
|
||||
'processing_time': round(processing_time, 3),
|
||||
'message': 'Aucun élément trouvé à cette position'
|
||||
}), 200
|
||||
|
||||
# Convertir en dictionnaire pour JSON
|
||||
element_dict = {
|
||||
'id': f"element_{element.bounds.x}_{element.bounds.y}",
|
||||
'type': element.element_type,
|
||||
'bounds': {
|
||||
'x': element.bounds.x,
|
||||
'y': element.bounds.y,
|
||||
'width': element.bounds.width,
|
||||
'height': element.bounds.height
|
||||
},
|
||||
'confidence': round(element.confidence, 3),
|
||||
'text': element.text_content or '',
|
||||
'attributes': element.attributes or {},
|
||||
'visual_features': {
|
||||
'color_dominant': getattr(element, 'dominant_color', '#000000'),
|
||||
'has_border': getattr(element, 'has_border', False),
|
||||
'has_shadow': getattr(element, 'has_shadow', False)
|
||||
}
|
||||
}
|
||||
|
||||
result = {
|
||||
'element': element_dict,
|
||||
'processing_time': round(processing_time, 3)
|
||||
}
|
||||
|
||||
logger.info(f"Élément détecté à ({position.x}, {position.y}): {element.element_type}")
|
||||
return jsonify(result), 200
|
||||
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Erreur de validation: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la détection: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@element_detection_bp.route('/element-types', methods=['GET'])
|
||||
def get_supported_element_types():
|
||||
"""
|
||||
Obtient la liste des types d'éléments supportés
|
||||
|
||||
Returns:
|
||||
{
|
||||
"element_types": [
|
||||
{
|
||||
"type": "button",
|
||||
"description": "Boutons cliquables",
|
||||
"confidence_threshold": 0.8
|
||||
},
|
||||
{
|
||||
"type": "input",
|
||||
"description": "Champs de saisie",
|
||||
"confidence_threshold": 0.7
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not ui_detector:
|
||||
return jsonify({'error': 'UIDetector non initialisé'}), 500
|
||||
|
||||
# Types d'éléments supportés (à adapter selon l'implémentation du UIDetector)
|
||||
element_types = [
|
||||
{
|
||||
'type': 'button',
|
||||
'description': 'Boutons cliquables',
|
||||
'confidence_threshold': 0.8
|
||||
},
|
||||
{
|
||||
'type': 'input',
|
||||
'description': 'Champs de saisie de texte',
|
||||
'confidence_threshold': 0.7
|
||||
},
|
||||
{
|
||||
'type': 'link',
|
||||
'description': 'Liens hypertexte',
|
||||
'confidence_threshold': 0.75
|
||||
},
|
||||
{
|
||||
'type': 'image',
|
||||
'description': 'Images et icônes',
|
||||
'confidence_threshold': 0.6
|
||||
},
|
||||
{
|
||||
'type': 'text',
|
||||
'description': 'Texte statique',
|
||||
'confidence_threshold': 0.5
|
||||
},
|
||||
{
|
||||
'type': 'dropdown',
|
||||
'description': 'Listes déroulantes',
|
||||
'confidence_threshold': 0.8
|
||||
},
|
||||
{
|
||||
'type': 'checkbox',
|
||||
'description': 'Cases à cocher',
|
||||
'confidence_threshold': 0.85
|
||||
},
|
||||
{
|
||||
'type': 'radio',
|
||||
'description': 'Boutons radio',
|
||||
'confidence_threshold': 0.85
|
||||
}
|
||||
]
|
||||
|
||||
return jsonify({'element_types': element_types}), 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la récupération des types: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@element_detection_bp.route('/health', methods=['GET'])
|
||||
def health_check():
|
||||
"""
|
||||
Vérification de santé du service de détection
|
||||
|
||||
Returns:
|
||||
{
|
||||
"status": "healthy",
|
||||
"detector_initialized": true,
|
||||
"capturer_available": true
|
||||
}
|
||||
"""
|
||||
try:
|
||||
status = {
|
||||
'status': 'healthy' if ui_detector else 'unhealthy',
|
||||
'detector_initialized': ui_detector is not None,
|
||||
'capturer_available': screen_capturer is not None
|
||||
}
|
||||
|
||||
return jsonify(status), 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du health check: {e}")
|
||||
return jsonify({
|
||||
'status': 'error',
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
# Gestionnaire d'erreurs pour le blueprint
|
||||
@element_detection_bp.errorhandler(404)
|
||||
def not_found(error):
|
||||
return jsonify({'error': 'Endpoint non trouvé'}), 404
|
||||
|
||||
@element_detection_bp.errorhandler(405)
|
||||
def method_not_allowed(error):
|
||||
return jsonify({'error': 'Méthode non autorisée'}), 405
|
||||
|
||||
@element_detection_bp.errorhandler(500)
|
||||
def internal_error(error):
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
161
visual_workflow_builder/backend/api/errors.py
Normal file
161
visual_workflow_builder/backend/api/errors.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
Error handling for API
|
||||
|
||||
Defines custom exceptions and error response formatting.
|
||||
"""
|
||||
|
||||
from flask import jsonify
|
||||
from typing import Dict, Any
|
||||
import logging
|
||||
|
||||
|
||||
class APIError(Exception):
|
||||
"""Base class for API errors"""
|
||||
def __init__(self, message: str, status_code: int = 500):
|
||||
super().__init__(message)
|
||||
self.message = message
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class ValidationError(APIError):
|
||||
"""Raised when request data validation fails"""
|
||||
def __init__(self, message: str):
|
||||
super().__init__(message, 400)
|
||||
|
||||
|
||||
class NotFoundError(APIError):
|
||||
"""Raised when a resource is not found"""
|
||||
def __init__(self, message: str):
|
||||
super().__init__(message, 404)
|
||||
|
||||
|
||||
class BadRequestError(APIError):
|
||||
"""Raised when request is malformed"""
|
||||
def __init__(self, message: str):
|
||||
super().__init__(message, 400)
|
||||
|
||||
|
||||
class ConflictError(APIError):
|
||||
"""Raised when there's a conflict (e.g., duplicate resource)"""
|
||||
def __init__(self, message: str):
|
||||
super().__init__(message, 409)
|
||||
|
||||
|
||||
class InternalServerError(APIError):
|
||||
"""Raised for internal server errors"""
|
||||
def __init__(self, message: str):
|
||||
super().__init__(message, 500)
|
||||
|
||||
|
||||
def error_response(status_code: int, message: str, details: Dict[str, Any] = None) -> tuple:
|
||||
"""
|
||||
Create a standardized error response
|
||||
|
||||
Args:
|
||||
status_code: HTTP status code
|
||||
message: Error message
|
||||
details: Optional additional error details
|
||||
|
||||
Returns:
|
||||
Tuple of (response, status_code)
|
||||
"""
|
||||
response = {
|
||||
'error': {
|
||||
'code': status_code,
|
||||
'message': message
|
||||
}
|
||||
}
|
||||
|
||||
if details:
|
||||
response['error']['details'] = details
|
||||
|
||||
return jsonify(response), status_code
|
||||
|
||||
|
||||
# Error code constants
|
||||
class ErrorCode:
|
||||
"""Standard error codes"""
|
||||
|
||||
# Validation errors (1000-1999)
|
||||
MISSING_REQUIRED_PARAMETER = 1001
|
||||
INVALID_PARAMETER_TYPE = 1002
|
||||
INVALID_VARIABLE_REFERENCE = 1003
|
||||
CIRCULAR_DEPENDENCY = 1004
|
||||
DISCONNECTED_NODE = 1005
|
||||
INVALID_EDGE_CONNECTION = 1006
|
||||
DUPLICATE_VARIABLE_NAME = 1007
|
||||
|
||||
# Serialization errors (2000-2999)
|
||||
INVALID_JSON_FORMAT = 2001
|
||||
MISSING_REQUIRED_FIELD = 2002
|
||||
VERSION_INCOMPATIBLE = 2003
|
||||
DESERIALIZATION_FAILED = 2004
|
||||
|
||||
# Execution errors (3000-3999)
|
||||
CONVERSION_FAILED = 3001
|
||||
EXECUTION_FAILED = 3002
|
||||
TARGET_NOT_FOUND = 3003
|
||||
TIMEOUT_EXCEEDED = 3004
|
||||
|
||||
# Network errors (4000-4999)
|
||||
CONNECTION_FAILED = 4001
|
||||
TIMEOUT = 4002
|
||||
SERVER_ERROR = 4003
|
||||
|
||||
# Resource errors (5000-5999)
|
||||
RESOURCE_NOT_FOUND = 5001
|
||||
RESOURCE_ALREADY_EXISTS = 5002
|
||||
RESOURCE_LOCKED = 5003
|
||||
|
||||
|
||||
def get_error_message(error_code: int) -> str:
|
||||
"""Get a human-readable message for an error code"""
|
||||
messages = {
|
||||
ErrorCode.MISSING_REQUIRED_PARAMETER: "A required parameter is missing",
|
||||
ErrorCode.INVALID_PARAMETER_TYPE: "Parameter has invalid type",
|
||||
ErrorCode.INVALID_VARIABLE_REFERENCE: "Variable reference is invalid",
|
||||
ErrorCode.CIRCULAR_DEPENDENCY: "Circular dependency detected",
|
||||
ErrorCode.DISCONNECTED_NODE: "Node is not connected to the workflow",
|
||||
ErrorCode.INVALID_EDGE_CONNECTION: "Edge connection is invalid",
|
||||
ErrorCode.DUPLICATE_VARIABLE_NAME: "Variable name already exists",
|
||||
ErrorCode.INVALID_JSON_FORMAT: "JSON format is invalid",
|
||||
ErrorCode.MISSING_REQUIRED_FIELD: "Required field is missing",
|
||||
ErrorCode.VERSION_INCOMPATIBLE: "Version is incompatible",
|
||||
ErrorCode.DESERIALIZATION_FAILED: "Failed to deserialize data",
|
||||
ErrorCode.CONVERSION_FAILED: "Failed to convert workflow",
|
||||
ErrorCode.EXECUTION_FAILED: "Workflow execution failed",
|
||||
ErrorCode.TARGET_NOT_FOUND: "Target element not found",
|
||||
ErrorCode.TIMEOUT_EXCEEDED: "Operation timeout exceeded",
|
||||
ErrorCode.CONNECTION_FAILED: "Connection failed",
|
||||
ErrorCode.TIMEOUT: "Request timeout",
|
||||
ErrorCode.SERVER_ERROR: "Internal server error",
|
||||
ErrorCode.RESOURCE_NOT_FOUND: "Resource not found",
|
||||
ErrorCode.RESOURCE_ALREADY_EXISTS: "Resource already exists",
|
||||
ErrorCode.RESOURCE_LOCKED: "Resource is locked"
|
||||
}
|
||||
|
||||
return messages.get(error_code, "Unknown error")
|
||||
|
||||
|
||||
def handle_api_error(func):
|
||||
"""
|
||||
Decorator to handle API errors consistently
|
||||
|
||||
Catches APIError exceptions and returns proper JSON responses
|
||||
"""
|
||||
from functools import wraps
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except APIError as e:
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.error(f"API Error in {func.__name__}: {e.message}")
|
||||
return error_response(e.status_code, e.message)
|
||||
except Exception as e:
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.error(f"Unexpected error in {func.__name__}: {str(e)}")
|
||||
return error_response(500, "Internal server error")
|
||||
|
||||
return wrapper
|
||||
439
visual_workflow_builder/backend/api/import_export.py
Normal file
439
visual_workflow_builder/backend/api/import_export.py
Normal file
@@ -0,0 +1,439 @@
|
||||
"""
|
||||
API endpoints pour l'import/export de workflows
|
||||
"""
|
||||
|
||||
from flask import Blueprint, request, jsonify, send_file
|
||||
from werkzeug.utils import secure_filename
|
||||
import json
|
||||
import yaml
|
||||
import tempfile
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
from models.visual_workflow import VisualWorkflow
|
||||
from services.workflow_service import WorkflowService
|
||||
|
||||
import_export_bp = Blueprint('import_export', __name__)
|
||||
|
||||
class ImportExportService:
|
||||
"""Service pour l'import/export de workflows"""
|
||||
|
||||
@staticmethod
|
||||
def export_workflow(workflow: VisualWorkflow, format_type: str = 'json',
|
||||
include_metadata: bool = True,
|
||||
include_template_info: bool = True,
|
||||
minify: bool = False) -> Dict[str, Any]:
|
||||
"""
|
||||
Exporter un workflow vers un format donné
|
||||
|
||||
Args:
|
||||
workflow: Le workflow à exporter
|
||||
format_type: Format d'export ('json' ou 'yaml')
|
||||
include_metadata: Inclure les métadonnées
|
||||
include_template_info: Inclure les infos de template
|
||||
minify: Minifier le JSON
|
||||
|
||||
Returns:
|
||||
Dict contenant les données d'export
|
||||
"""
|
||||
# Préparer les données d'export
|
||||
export_data = {
|
||||
'version': '1.0.0',
|
||||
'name': workflow.name,
|
||||
'description': workflow.description,
|
||||
'nodes': [node.to_dict() for node in workflow.nodes],
|
||||
'edges': [edge.to_dict() for edge in workflow.edges],
|
||||
'variables': [var.to_dict() for var in workflow.variables]
|
||||
}
|
||||
|
||||
# Ajouter les métadonnées si demandé
|
||||
if include_metadata:
|
||||
export_data['metadata'] = {
|
||||
'exported_at': datetime.now().isoformat(),
|
||||
'exported_by': 'Visual Workflow Builder',
|
||||
'node_count': len(workflow.nodes),
|
||||
'edge_count': len(workflow.edges),
|
||||
'variable_count': len(workflow.variables)
|
||||
}
|
||||
|
||||
# Ajouter les infos de template si demandé
|
||||
if include_template_info and hasattr(workflow, 'template_id') and workflow.template_id:
|
||||
export_data['template'] = {
|
||||
'id': workflow.template_id,
|
||||
'name': getattr(workflow, 'template_name', None)
|
||||
}
|
||||
|
||||
return export_data
|
||||
|
||||
@staticmethod
|
||||
def import_workflow(data: str, filename: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Importer un workflow depuis des données
|
||||
|
||||
Args:
|
||||
data: Données du workflow (JSON ou YAML)
|
||||
filename: Nom du fichier (pour détecter le format)
|
||||
|
||||
Returns:
|
||||
Dict avec le résultat de l'import
|
||||
"""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
try:
|
||||
# Détecter le format
|
||||
format_type = ImportExportService._detect_format(data, filename)
|
||||
|
||||
# Parser les données
|
||||
if format_type == 'yaml':
|
||||
parsed_data = yaml.safe_load(data)
|
||||
else:
|
||||
parsed_data = json.loads(data)
|
||||
|
||||
# Valider la structure
|
||||
validation_result = ImportExportService._validate_structure(parsed_data)
|
||||
errors.extend(validation_result['errors'])
|
||||
warnings.extend(validation_result['warnings'])
|
||||
|
||||
if errors:
|
||||
return {
|
||||
'success': False,
|
||||
'errors': errors,
|
||||
'warnings': warnings
|
||||
}
|
||||
|
||||
# Migrer si nécessaire
|
||||
migrated_data = ImportExportService._migrate_workflow(parsed_data, warnings)
|
||||
|
||||
# Créer le workflow
|
||||
workflow = ImportExportService._create_workflow_from_data(migrated_data)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'workflow': workflow.to_dict(),
|
||||
'warnings': warnings
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
errors.append(f"Erreur de parsing: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'errors': errors,
|
||||
'warnings': warnings
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _detect_format(data: str, filename: Optional[str] = None) -> str:
|
||||
"""Détecter le format du fichier"""
|
||||
if filename:
|
||||
ext = filename.lower().split('.')[-1]
|
||||
if ext in ['yaml', 'yml']:
|
||||
return 'yaml'
|
||||
|
||||
# Détecter par le contenu
|
||||
stripped = data.strip()
|
||||
if stripped.startswith('{') or stripped.startswith('['):
|
||||
return 'json'
|
||||
|
||||
return 'yaml'
|
||||
|
||||
@staticmethod
|
||||
def _validate_structure(data: Dict[str, Any]) -> Dict[str, List[str]]:
|
||||
"""Valider la structure du workflow"""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
# Vérifications obligatoires
|
||||
if not isinstance(data, dict):
|
||||
errors.append('Le fichier doit contenir un objet JSON valide')
|
||||
return {'errors': errors, 'warnings': warnings}
|
||||
|
||||
if 'nodes' not in data or not isinstance(data['nodes'], list):
|
||||
errors.append('Le workflow doit contenir un tableau de nodes')
|
||||
|
||||
if 'edges' not in data or not isinstance(data['edges'], list):
|
||||
errors.append('Le workflow doit contenir un tableau d\'edges')
|
||||
|
||||
# Valider les nodes
|
||||
if 'nodes' in data:
|
||||
for i, node in enumerate(data['nodes']):
|
||||
if not isinstance(node, dict):
|
||||
errors.append(f'Node {i}: Doit être un objet')
|
||||
continue
|
||||
|
||||
if 'id' not in node:
|
||||
errors.append(f'Node {i}: ID manquant')
|
||||
|
||||
if 'type' not in node:
|
||||
errors.append(f'Node {i}: Type manquant')
|
||||
|
||||
if 'position' not in node or not isinstance(node['position'], dict):
|
||||
errors.append(f'Node {i}: Position manquante ou invalide')
|
||||
elif 'x' not in node['position'] or 'y' not in node['position']:
|
||||
errors.append(f'Node {i}: Position doit contenir x et y')
|
||||
|
||||
# Valider les edges
|
||||
if 'edges' in data:
|
||||
for i, edge in enumerate(data['edges']):
|
||||
if not isinstance(edge, dict):
|
||||
errors.append(f'Edge {i}: Doit être un objet')
|
||||
continue
|
||||
|
||||
if 'id' not in edge:
|
||||
errors.append(f'Edge {i}: ID manquant')
|
||||
|
||||
if 'source' not in edge:
|
||||
errors.append(f'Edge {i}: Source manquante')
|
||||
|
||||
if 'target' not in edge:
|
||||
errors.append(f'Edge {i}: Target manquante')
|
||||
|
||||
# Avertissements
|
||||
if 'name' not in data or not data['name']:
|
||||
warnings.append('Le workflow n\'a pas de nom')
|
||||
|
||||
if 'version' not in data:
|
||||
warnings.append('Version du workflow non spécifiée')
|
||||
|
||||
return {'errors': errors, 'warnings': warnings}
|
||||
|
||||
@staticmethod
|
||||
def _migrate_workflow(data: Dict[str, Any], warnings: List[str]) -> Dict[str, Any]:
|
||||
"""Migrer un workflow vers la version actuelle"""
|
||||
# Migration de version si nécessaire
|
||||
if 'version' not in data or data['version'] < '1.0.0':
|
||||
warnings.append('Workflow migré depuis une version antérieure')
|
||||
|
||||
# Migrations spécifiques
|
||||
for node in data.get('nodes', []):
|
||||
# Assurer que chaque node a une structure data
|
||||
if 'data' not in node:
|
||||
node['data'] = {
|
||||
'label': node.get('label', node.get('type', 'Node')),
|
||||
'parameters': node.get('parameters', {})
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _create_workflow_from_data(data: Dict[str, Any]) -> VisualWorkflow:
|
||||
"""Créer un workflow depuis les données importées"""
|
||||
from models.visual_workflow import VisualNode, VisualEdge, WorkflowVariable
|
||||
|
||||
# Créer le workflow
|
||||
workflow = VisualWorkflow(
|
||||
name=data.get('name', 'Workflow Importé'),
|
||||
description=data.get('description', ''),
|
||||
nodes=[],
|
||||
edges=[],
|
||||
variables=[]
|
||||
)
|
||||
|
||||
# Ajouter les nodes
|
||||
for node_data in data.get('nodes', []):
|
||||
node = VisualNode(
|
||||
id=node_data['id'],
|
||||
type=node_data['type'],
|
||||
position=node_data['position'],
|
||||
data=node_data.get('data', {})
|
||||
)
|
||||
workflow.nodes.append(node)
|
||||
|
||||
# Ajouter les edges
|
||||
for edge_data in data.get('edges', []):
|
||||
edge = VisualEdge(
|
||||
id=edge_data['id'],
|
||||
source=edge_data['source'],
|
||||
target=edge_data['target'],
|
||||
source_handle=edge_data.get('sourceHandle'),
|
||||
target_handle=edge_data.get('targetHandle'),
|
||||
data=edge_data.get('data', {})
|
||||
)
|
||||
workflow.edges.append(edge)
|
||||
|
||||
# Ajouter les variables
|
||||
for var_data in data.get('variables', []):
|
||||
variable = WorkflowVariable(
|
||||
id=var_data.get('id', f"var-{len(workflow.variables)}"),
|
||||
name=var_data['name'],
|
||||
value=var_data.get('value', ''),
|
||||
type=var_data.get('type', 'string'),
|
||||
description=var_data.get('description', '')
|
||||
)
|
||||
workflow.variables.append(variable)
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
@import_export_bp.route('/workflows/<workflow_id>/export', methods=['GET'])
|
||||
def export_workflow(workflow_id: str):
|
||||
"""
|
||||
Exporter un workflow
|
||||
|
||||
Query parameters:
|
||||
- format: json ou yaml (défaut: json)
|
||||
- include_metadata: true/false (défaut: true)
|
||||
- include_template_info: true/false (défaut: true)
|
||||
- minify: true/false (défaut: false)
|
||||
- download: true/false (défaut: false) - force le téléchargement
|
||||
"""
|
||||
try:
|
||||
# Récupérer le workflow
|
||||
workflow = WorkflowService.get_workflow(workflow_id)
|
||||
if not workflow:
|
||||
return jsonify({'error': 'Workflow non trouvé'}), 404
|
||||
|
||||
# Paramètres d'export
|
||||
format_type = request.args.get('format', 'json').lower()
|
||||
include_metadata = request.args.get('include_metadata', 'true').lower() == 'true'
|
||||
include_template_info = request.args.get('include_template_info', 'true').lower() == 'true'
|
||||
minify = request.args.get('minify', 'false').lower() == 'true'
|
||||
download = request.args.get('download', 'false').lower() == 'true'
|
||||
|
||||
if format_type not in ['json', 'yaml']:
|
||||
return jsonify({'error': 'Format non supporté'}), 400
|
||||
|
||||
# Exporter
|
||||
export_data = ImportExportService.export_workflow(
|
||||
workflow, format_type, include_metadata, include_template_info, minify
|
||||
)
|
||||
|
||||
# Sérialiser
|
||||
if format_type == 'yaml':
|
||||
content = yaml.dump(export_data, default_flow_style=False, allow_unicode=True)
|
||||
mimetype = 'application/x-yaml'
|
||||
extension = 'yaml'
|
||||
else:
|
||||
if minify:
|
||||
content = json.dumps(export_data, ensure_ascii=False)
|
||||
else:
|
||||
content = json.dumps(export_data, indent=2, ensure_ascii=False)
|
||||
mimetype = 'application/json'
|
||||
extension = 'json'
|
||||
|
||||
# Nom du fichier
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
filename = f"workflow_{secure_filename(workflow.name or 'untitled')}_{timestamp}.{extension}"
|
||||
|
||||
if download:
|
||||
# Créer un fichier temporaire
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix=f'.{extension}', delete=False) as f:
|
||||
f.write(content)
|
||||
temp_path = f.name
|
||||
|
||||
return send_file(
|
||||
temp_path,
|
||||
as_attachment=True,
|
||||
download_name=filename,
|
||||
mimetype=mimetype
|
||||
)
|
||||
else:
|
||||
# Retourner les données
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'data': content,
|
||||
'filename': filename,
|
||||
'format': format_type
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({'error': str(e)}), 500
|
||||
|
||||
|
||||
@import_export_bp.route('/workflows/import', methods=['POST'])
|
||||
def import_workflow():
|
||||
"""
|
||||
Importer un workflow
|
||||
|
||||
Accepte:
|
||||
- Fichier uploadé (multipart/form-data)
|
||||
- Données JSON/YAML dans le body
|
||||
"""
|
||||
try:
|
||||
data = None
|
||||
filename = None
|
||||
|
||||
# Vérifier si c'est un upload de fichier
|
||||
if 'file' in request.files:
|
||||
file = request.files['file']
|
||||
if file.filename == '':
|
||||
return jsonify({'error': 'Aucun fichier sélectionné'}), 400
|
||||
|
||||
filename = secure_filename(file.filename)
|
||||
data = file.read().decode('utf-8')
|
||||
|
||||
# Sinon, vérifier le body
|
||||
elif request.is_json:
|
||||
# Données JSON directes
|
||||
data = json.dumps(request.get_json())
|
||||
elif request.content_type and 'yaml' in request.content_type:
|
||||
# Données YAML directes
|
||||
data = request.get_data(as_text=True)
|
||||
else:
|
||||
# Essayer de lire comme texte
|
||||
data = request.get_data(as_text=True)
|
||||
if not data:
|
||||
return jsonify({'error': 'Aucune donnée fournie'}), 400
|
||||
|
||||
# Importer
|
||||
result = ImportExportService.import_workflow(data, filename)
|
||||
|
||||
if result['success']:
|
||||
# Sauvegarder le workflow importé
|
||||
workflow_data = result['workflow']
|
||||
workflow = WorkflowService.create_workflow(
|
||||
workflow_data['name'],
|
||||
workflow_data['description']
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'workflow': workflow.to_dict(),
|
||||
'warnings': result.get('warnings', [])
|
||||
})
|
||||
else:
|
||||
return jsonify(result), 400
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'errors': [str(e)]
|
||||
}), 500
|
||||
|
||||
|
||||
@import_export_bp.route('/workflows/validate', methods=['POST'])
|
||||
def validate_workflow_data():
|
||||
"""
|
||||
Valider des données de workflow sans les importer
|
||||
"""
|
||||
try:
|
||||
data = None
|
||||
filename = None
|
||||
|
||||
# Récupérer les données
|
||||
if 'file' in request.files:
|
||||
file = request.files['file']
|
||||
filename = secure_filename(file.filename)
|
||||
data = file.read().decode('utf-8')
|
||||
else:
|
||||
data = request.get_data(as_text=True)
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': 'Aucune donnée fournie'}), 400
|
||||
|
||||
# Valider seulement
|
||||
result = ImportExportService.import_workflow(data, filename)
|
||||
|
||||
# Ne pas sauvegarder, juste retourner la validation
|
||||
return jsonify({
|
||||
'valid': result['success'],
|
||||
'errors': result.get('errors', []),
|
||||
'warnings': result.get('warnings', [])
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'valid': False,
|
||||
'errors': [str(e)]
|
||||
}), 500
|
||||
21
visual_workflow_builder/backend/api/node_types.py
Normal file
21
visual_workflow_builder/backend/api/node_types.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""
|
||||
Node Types API Blueprint
|
||||
|
||||
Provides REST endpoints for node type definitions.
|
||||
"""
|
||||
|
||||
from flask import Blueprint, jsonify
|
||||
|
||||
node_types_bp = Blueprint('node_types', __name__)
|
||||
|
||||
@node_types_bp.route('/', methods=['GET'])
|
||||
def list_node_types():
|
||||
"""List all available node types"""
|
||||
# TODO: Implement in Phase 2
|
||||
return jsonify([])
|
||||
|
||||
@node_types_bp.route('/<node_type>', methods=['GET'])
|
||||
def get_node_type(node_type):
|
||||
"""Get a specific node type definition"""
|
||||
# TODO: Implement in Phase 2
|
||||
return jsonify({'type': node_type})
|
||||
667
visual_workflow_builder/backend/api/real_demo.py
Normal file
667
visual_workflow_builder/backend/api/real_demo.py
Normal file
@@ -0,0 +1,667 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
API REST pour la Démonstration Réelle - RPA Vision V3
|
||||
Auteur : Dom, Alice, Kiro - 8 janvier 2026
|
||||
|
||||
Endpoints pour la capture d'écran réelle et l'interaction avec le système.
|
||||
"""
|
||||
|
||||
from flask import Blueprint, jsonify, request
|
||||
from typing import Dict, Any, List
|
||||
import logging
|
||||
import time
|
||||
|
||||
# Import du service de capture d'écran réel
|
||||
try:
|
||||
from services.real_screen_capture import real_capture_service
|
||||
except ImportError:
|
||||
# Fallback si le service n'est pas disponible
|
||||
real_capture_service = None
|
||||
|
||||
# Import des modules RPA Vision V3 pour l'exécution d'actions
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Ajouter le chemin vers le répertoire racine du projet
|
||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..'))
|
||||
if project_root not in sys.path:
|
||||
sys.path.insert(0, project_root)
|
||||
|
||||
try:
|
||||
from core.execution.action_executor import ActionExecutor
|
||||
from core.models.workflow_graph import Action, ActionType
|
||||
from core.models.screen_state import ScreenState
|
||||
RPA_CORE_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"Warning: Modules RPA Core non disponibles: {e}")
|
||||
RPA_CORE_AVAILABLE = False
|
||||
ActionExecutor = None
|
||||
Action = None
|
||||
ActionType = None
|
||||
ScreenState = None
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Créer le blueprint
|
||||
real_demo_bp = Blueprint('real_demo', __name__, url_prefix='/api/real-demo')
|
||||
|
||||
# Instance globale de l'exécuteur d'actions
|
||||
if RPA_CORE_AVAILABLE:
|
||||
action_executor = ActionExecutor()
|
||||
else:
|
||||
action_executor = None
|
||||
|
||||
@real_demo_bp.route('/monitors', methods=['GET'])
|
||||
def get_monitors():
|
||||
"""
|
||||
Obtenir la liste des moniteurs disponibles
|
||||
|
||||
Returns:
|
||||
JSON: Liste des moniteurs avec leurs propriétés
|
||||
"""
|
||||
try:
|
||||
if real_capture_service is None:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Service de capture d'écran non disponible"
|
||||
}), 503
|
||||
|
||||
monitors = real_capture_service.get_monitors()
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"monitors": monitors,
|
||||
"selected_monitor": real_capture_service.selected_monitor
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la récupération des moniteurs: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/monitors/<int:monitor_id>/select', methods=['POST'])
|
||||
def select_monitor(monitor_id: int):
|
||||
"""
|
||||
Sélectionner un moniteur pour la capture
|
||||
|
||||
Args:
|
||||
monitor_id: ID du moniteur à sélectionner
|
||||
|
||||
Returns:
|
||||
JSON: Statut de la sélection
|
||||
"""
|
||||
try:
|
||||
success = real_capture_service.select_monitor(monitor_id)
|
||||
if success:
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": f"Moniteur {monitor_id} sélectionné",
|
||||
"selected_monitor": monitor_id
|
||||
})
|
||||
else:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": f"Moniteur {monitor_id} invalide"
|
||||
}), 400
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la sélection du moniteur: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/capture', methods=['POST'])
|
||||
def capture_single():
|
||||
"""
|
||||
Effectuer une capture d'écran unique (snapshot)
|
||||
|
||||
Body JSON (optionnel):
|
||||
- monitor_id: ID du moniteur à capturer (défaut: moniteur sélectionné)
|
||||
- detect_elements: Détecter les éléments UI (défaut: false)
|
||||
|
||||
Returns:
|
||||
JSON: Screenshot en base64 et métadonnées
|
||||
"""
|
||||
try:
|
||||
data = request.get_json() or {}
|
||||
monitor_id = data.get('monitor_id')
|
||||
detect_elements = data.get('detect_elements', False)
|
||||
|
||||
# Effectuer la capture unique
|
||||
screenshot = real_capture_service.capture_single(monitor_id)
|
||||
|
||||
if screenshot:
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"screenshot": screenshot,
|
||||
"monitor_id": monitor_id if monitor_id is not None else real_capture_service.selected_monitor,
|
||||
"elements": [] if not detect_elements else [] # TODO: ajouter détection si demandé
|
||||
})
|
||||
else:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Échec de la capture d'écran"
|
||||
}), 500
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la capture unique: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@real_demo_bp.route('/capture/start', methods=['POST'])
|
||||
def start_capture():
|
||||
"""
|
||||
Démarrer la capture d'écran en temps réel
|
||||
|
||||
Body JSON (optionnel):
|
||||
- interval: Intervalle de capture en secondes (défaut: 1.0)
|
||||
|
||||
Returns:
|
||||
JSON: Statut du démarrage
|
||||
"""
|
||||
try:
|
||||
data = request.get_json() or {}
|
||||
interval = float(data.get('interval', 1.0))
|
||||
|
||||
# Valider l'intervalle
|
||||
if interval < 0.1 or interval > 10.0:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "L'intervalle doit être entre 0.1 et 10.0 secondes"
|
||||
}), 400
|
||||
|
||||
success = real_capture_service.start_capture(interval)
|
||||
if success:
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": f"Capture démarrée (intervalle: {interval}s)",
|
||||
"interval": interval
|
||||
})
|
||||
else:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Capture déjà en cours"
|
||||
}), 409
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du démarrage de la capture: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/capture/stop', methods=['POST'])
|
||||
def stop_capture():
|
||||
"""
|
||||
Arrêter la capture d'écran
|
||||
|
||||
Returns:
|
||||
JSON: Statut de l'arrêt
|
||||
"""
|
||||
try:
|
||||
success = real_capture_service.stop_capture()
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": "Capture arrêtée" if success else "Capture n'était pas active"
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de l'arrêt de la capture: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/capture/status', methods=['GET'])
|
||||
def get_capture_status():
|
||||
"""
|
||||
Obtenir le statut de la capture
|
||||
|
||||
Returns:
|
||||
JSON: Statut détaillé de la capture
|
||||
"""
|
||||
try:
|
||||
status = real_capture_service.get_status()
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"status": status
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la récupération du statut: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/capture/screenshot', methods=['GET'])
|
||||
def get_current_screenshot():
|
||||
"""
|
||||
Obtenir la capture d'écran actuelle
|
||||
|
||||
Returns:
|
||||
JSON: Screenshot en base64 et éléments détectés
|
||||
"""
|
||||
try:
|
||||
screenshot_base64 = real_capture_service.get_current_screenshot_base64()
|
||||
detected_elements = real_capture_service.get_detected_elements()
|
||||
|
||||
if screenshot_base64 is None:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Aucune capture d'écran disponible"
|
||||
}), 404
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"screenshot": screenshot_base64,
|
||||
"elements": detected_elements,
|
||||
"timestamp": time.time()
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la récupération de la capture: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/elements', methods=['GET'])
|
||||
def get_detected_elements():
|
||||
"""
|
||||
Obtenir les éléments UI détectés sur l'écran actuel
|
||||
|
||||
Returns:
|
||||
JSON: Liste des éléments UI détectés
|
||||
"""
|
||||
try:
|
||||
elements = real_capture_service.get_detected_elements()
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"elements": elements,
|
||||
"count": len(elements)
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la récupération des éléments: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/interact/click', methods=['POST'])
|
||||
def perform_click():
|
||||
"""
|
||||
Effectuer un clic sur l'écran réel
|
||||
|
||||
Body JSON:
|
||||
- x: Coordonnée X du clic
|
||||
- y: Coordonnée Y du clic
|
||||
- element_id: (optionnel) ID de l'élément à cliquer
|
||||
|
||||
Returns:
|
||||
JSON: Résultat de l'interaction
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Données JSON requises"
|
||||
}), 400
|
||||
|
||||
# Méthode 1: Clic par coordonnées
|
||||
if 'x' in data and 'y' in data:
|
||||
x = float(data['x'])
|
||||
y = float(data['y'])
|
||||
|
||||
# Utiliser pyautogui pour le clic réel
|
||||
try:
|
||||
import pyautogui
|
||||
pyautogui.click(x, y)
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": f"Clic effectué à ({x}, {y})",
|
||||
"method": "coordinates",
|
||||
"coordinates": {"x": x, "y": y}
|
||||
})
|
||||
except ImportError:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "pyautogui non disponible pour les interactions réelles"
|
||||
}), 500
|
||||
|
||||
# Méthode 2: Clic par élément ID
|
||||
elif 'element_id' in data:
|
||||
element_id = data['element_id']
|
||||
elements = real_capture_service.get_detected_elements()
|
||||
|
||||
# Trouver l'élément
|
||||
target_element = None
|
||||
for element in elements:
|
||||
if element.get('id') == element_id:
|
||||
target_element = element
|
||||
break
|
||||
|
||||
if not target_element:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": f"Élément {element_id} non trouvé"
|
||||
}), 404
|
||||
|
||||
# Calculer le centre de l'élément
|
||||
bbox = target_element.get('bbox', {})
|
||||
x = bbox.get('x', 0) + bbox.get('width', 0) / 2
|
||||
y = bbox.get('y', 0) + bbox.get('height', 0) / 2
|
||||
|
||||
# Effectuer le clic
|
||||
try:
|
||||
import pyautogui
|
||||
pyautogui.click(x, y)
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": f"Clic effectué sur l'élément {element_id}",
|
||||
"method": "element",
|
||||
"element_id": element_id,
|
||||
"coordinates": {"x": x, "y": y}
|
||||
})
|
||||
except ImportError:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "pyautogui non disponible pour les interactions réelles"
|
||||
}), 500
|
||||
|
||||
else:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Coordonnées (x, y) ou element_id requis"
|
||||
}), 400
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du clic: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/interact/type', methods=['POST'])
|
||||
def perform_typing():
|
||||
"""
|
||||
Effectuer une saisie de texte sur l'écran réel
|
||||
|
||||
Body JSON:
|
||||
- text: Texte à saisir
|
||||
- x, y: (optionnel) Coordonnées où cliquer avant de saisir
|
||||
- element_id: (optionnel) ID de l'élément où saisir
|
||||
|
||||
Returns:
|
||||
JSON: Résultat de l'interaction
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Données JSON requises"
|
||||
}), 400
|
||||
|
||||
text = data.get('text', '')
|
||||
if not text:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Texte à saisir requis"
|
||||
}), 400
|
||||
|
||||
try:
|
||||
import pyautogui
|
||||
|
||||
# Si coordonnées ou élément spécifié, cliquer d'abord
|
||||
if 'x' in data and 'y' in data:
|
||||
x = float(data['x'])
|
||||
y = float(data['y'])
|
||||
pyautogui.click(x, y)
|
||||
time.sleep(0.2) # Attendre que le focus soit donné
|
||||
|
||||
elif 'element_id' in data:
|
||||
element_id = data['element_id']
|
||||
elements = real_capture_service.get_detected_elements()
|
||||
|
||||
# Trouver l'élément
|
||||
target_element = None
|
||||
for element in elements:
|
||||
if element.get('id') == element_id:
|
||||
target_element = element
|
||||
break
|
||||
|
||||
if not target_element:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": f"Élément {element_id} non trouvé"
|
||||
}), 404
|
||||
|
||||
# Cliquer sur l'élément
|
||||
bbox = target_element.get('bbox', {})
|
||||
x = bbox.get('x', 0) + bbox.get('width', 0) / 2
|
||||
y = bbox.get('y', 0) + bbox.get('height', 0) / 2
|
||||
pyautogui.click(x, y)
|
||||
time.sleep(0.2)
|
||||
|
||||
# Saisir le texte
|
||||
pyautogui.write(text, interval=0.05)
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": f"Texte saisi: {text[:50]}{'...' if len(text) > 50 else ''}",
|
||||
"text_length": len(text)
|
||||
})
|
||||
|
||||
except ImportError:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "pyautogui non disponible pour les interactions réelles"
|
||||
}), 500
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la saisie: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/workflow/execute', methods=['POST'])
|
||||
def execute_workflow():
|
||||
"""
|
||||
Exécuter un workflow simple sur l'écran réel
|
||||
|
||||
Body JSON:
|
||||
- actions: Liste d'actions à exécuter
|
||||
- validate_elements: (optionnel) Valider la présence des éléments avant exécution
|
||||
|
||||
Returns:
|
||||
JSON: Résultat de l'exécution du workflow
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Données JSON requises"
|
||||
}), 400
|
||||
|
||||
actions = data.get('actions', [])
|
||||
if not actions:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Liste d'actions requise"
|
||||
}), 400
|
||||
|
||||
validate_elements = data.get('validate_elements', True)
|
||||
|
||||
# Obtenir l'état actuel de l'écran
|
||||
screenshot_base64 = real_capture_service.get_current_screenshot_base64()
|
||||
detected_elements = real_capture_service.get_detected_elements()
|
||||
|
||||
if not screenshot_base64:
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Aucune capture d'écran disponible"
|
||||
}), 400
|
||||
|
||||
# Créer un ScreenState temporaire
|
||||
screen_state = ScreenState(
|
||||
timestamp=time.time(),
|
||||
screenshot_path="", # Pas de fichier, image en mémoire
|
||||
screenshot_data=None, # Sera rempli si nécessaire
|
||||
ui_elements=detected_elements,
|
||||
metadata={"source": "real_demo_workflow"}
|
||||
)
|
||||
|
||||
results = []
|
||||
|
||||
# Exécuter chaque action
|
||||
for i, action_data in enumerate(actions):
|
||||
try:
|
||||
action_type = action_data.get('type')
|
||||
if action_type == 'click':
|
||||
# Action de clic
|
||||
if 'x' in action_data and 'y' in action_data:
|
||||
import pyautogui
|
||||
x = float(action_data['x'])
|
||||
y = float(action_data['y'])
|
||||
pyautogui.click(x, y)
|
||||
|
||||
results.append({
|
||||
"action_index": i,
|
||||
"type": "click",
|
||||
"success": True,
|
||||
"message": f"Clic à ({x}, {y})",
|
||||
"coordinates": {"x": x, "y": y}
|
||||
})
|
||||
else:
|
||||
results.append({
|
||||
"action_index": i,
|
||||
"type": "click",
|
||||
"success": False,
|
||||
"error": "Coordonnées x, y requises pour le clic"
|
||||
})
|
||||
|
||||
elif action_type == 'type':
|
||||
# Action de saisie
|
||||
text = action_data.get('text', '')
|
||||
if text:
|
||||
import pyautogui
|
||||
pyautogui.write(text, interval=0.05)
|
||||
|
||||
results.append({
|
||||
"action_index": i,
|
||||
"type": "type",
|
||||
"success": True,
|
||||
"message": f"Texte saisi: {text[:30]}{'...' if len(text) > 30 else ''}",
|
||||
"text_length": len(text)
|
||||
})
|
||||
else:
|
||||
results.append({
|
||||
"action_index": i,
|
||||
"type": "type",
|
||||
"success": False,
|
||||
"error": "Texte requis pour la saisie"
|
||||
})
|
||||
|
||||
elif action_type == 'wait':
|
||||
# Action d'attente
|
||||
duration = float(action_data.get('duration', 1.0))
|
||||
time.sleep(duration)
|
||||
|
||||
results.append({
|
||||
"action_index": i,
|
||||
"type": "wait",
|
||||
"success": True,
|
||||
"message": f"Attente de {duration}s",
|
||||
"duration": duration
|
||||
})
|
||||
|
||||
else:
|
||||
results.append({
|
||||
"action_index": i,
|
||||
"type": action_type,
|
||||
"success": False,
|
||||
"error": f"Type d'action non supporté: {action_type}"
|
||||
})
|
||||
|
||||
# Petite pause entre les actions
|
||||
time.sleep(0.2)
|
||||
|
||||
except Exception as action_error:
|
||||
logger.error(f"Erreur lors de l'exécution de l'action {i}: {action_error}")
|
||||
results.append({
|
||||
"action_index": i,
|
||||
"type": action_data.get('type', 'unknown'),
|
||||
"success": False,
|
||||
"error": str(action_error)
|
||||
})
|
||||
|
||||
# Calculer le résumé
|
||||
successful_actions = sum(1 for r in results if r.get('success', False))
|
||||
total_actions = len(results)
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": f"Workflow exécuté: {successful_actions}/{total_actions} actions réussies",
|
||||
"results": results,
|
||||
"summary": {
|
||||
"total_actions": total_actions,
|
||||
"successful_actions": successful_actions,
|
||||
"failed_actions": total_actions - successful_actions,
|
||||
"success_rate": successful_actions / total_actions if total_actions > 0 else 0
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de l'exécution du workflow: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
@real_demo_bp.route('/safety/emergency-stop', methods=['POST'])
|
||||
def emergency_stop():
|
||||
"""
|
||||
Arrêt d'urgence - Stoppe toutes les interactions en cours
|
||||
|
||||
Returns:
|
||||
JSON: Confirmation de l'arrêt d'urgence
|
||||
"""
|
||||
try:
|
||||
# Arrêter la capture
|
||||
real_capture_service.stop_capture()
|
||||
|
||||
# Déplacer la souris dans un coin pour déclencher le failsafe de pyautogui
|
||||
try:
|
||||
import pyautogui
|
||||
pyautogui.moveTo(0, 0)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"message": "Arrêt d'urgence activé - Toutes les interactions stoppées"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de l'arrêt d'urgence: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
# Gestionnaire d'erreur pour le blueprint
|
||||
@real_demo_bp.errorhandler(Exception)
|
||||
def handle_error(error):
|
||||
"""Gestionnaire d'erreur global pour l'API real_demo"""
|
||||
logger.error(f"Erreur non gérée dans real_demo API: {error}", exc_info=True)
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Erreur interne du serveur",
|
||||
"details": str(error)
|
||||
}), 500
|
||||
499
visual_workflow_builder/backend/api/screen_capture.py
Normal file
499
visual_workflow_builder/backend/api/screen_capture.py
Normal file
@@ -0,0 +1,499 @@
|
||||
"""
|
||||
Screen Capture API - Visual Workflow Builder
|
||||
|
||||
Provides endpoints for screen capture and UI element detection
|
||||
to support interactive target selection.
|
||||
|
||||
Exigences: 4.1, 4.2, 4.3, 4.4, 4.5
|
||||
"""
|
||||
|
||||
from flask import Blueprint, request, jsonify, send_file
|
||||
from flask_cors import cross_origin
|
||||
import sys
|
||||
import os
|
||||
import io
|
||||
import base64
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
# Add parent directory to path to import core modules
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
|
||||
|
||||
try:
|
||||
from core.capture.screen_capturer import ScreenCapturer
|
||||
from core.detection.ui_detector import UIDetector
|
||||
from core.models.screen_state import ScreenState
|
||||
from core.embedding.fusion_engine import FusionEngine
|
||||
CORE_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"Warning: Core modules not available: {e}")
|
||||
CORE_AVAILABLE = False
|
||||
|
||||
screen_capture_bp = Blueprint('screen_capture', __name__)
|
||||
|
||||
# Initialize components if available
|
||||
ui_detector = UIDetector() if CORE_AVAILABLE else None
|
||||
|
||||
def get_screen_capturer():
|
||||
"""Create a new ScreenCapturer instance for each request to avoid threading issues"""
|
||||
if CORE_AVAILABLE:
|
||||
return ScreenCapturer()
|
||||
return None
|
||||
|
||||
|
||||
@screen_capture_bp.route('/capture', methods=['POST'])
|
||||
@cross_origin()
|
||||
def capture_screen():
|
||||
"""
|
||||
Capture the current screen
|
||||
|
||||
Request body:
|
||||
{
|
||||
"region": { // Optional - capture specific region
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"width": 1920,
|
||||
"height": 1080
|
||||
}
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"image": "base64_encoded_image",
|
||||
"width": 1920,
|
||||
"height": 1080,
|
||||
"format": "png"
|
||||
}
|
||||
"""
|
||||
if not CORE_AVAILABLE:
|
||||
return jsonify({
|
||||
'error': 'Screen capture not available',
|
||||
'message': 'Core modules are not properly configured'
|
||||
}), 503
|
||||
|
||||
try:
|
||||
data = request.get_json() or {}
|
||||
region = data.get('region')
|
||||
|
||||
# Create a new capturer instance for this request
|
||||
screen_capturer = get_screen_capturer()
|
||||
if not screen_capturer:
|
||||
return jsonify({
|
||||
'error': 'Screen capture not available',
|
||||
'message': 'Could not initialize screen capturer'
|
||||
}), 503
|
||||
|
||||
# Capture screen
|
||||
if region:
|
||||
# Note: capture_region not implemented in ScreenCapturer, use full capture
|
||||
screenshot_array = screen_capturer.capture()
|
||||
else:
|
||||
screenshot_array = screen_capturer.capture()
|
||||
|
||||
if screenshot_array is None:
|
||||
return jsonify({
|
||||
'error': 'Capture failed',
|
||||
'message': 'Could not capture screen'
|
||||
}), 500
|
||||
|
||||
# Convert numpy array to PIL Image
|
||||
from PIL import Image
|
||||
screenshot = Image.fromarray(screenshot_array)
|
||||
|
||||
# Convert to base64
|
||||
img_buffer = io.BytesIO()
|
||||
screenshot.save(img_buffer, format='PNG')
|
||||
img_buffer.seek(0)
|
||||
img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8')
|
||||
|
||||
return jsonify({
|
||||
'image': f'data:image/png;base64,{img_base64}',
|
||||
'width': screenshot.width,
|
||||
'height': screenshot.height,
|
||||
'format': 'png'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'error': 'Capture failed',
|
||||
'message': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@screen_capture_bp.route('/detect-elements', methods=['POST'])
|
||||
@cross_origin()
|
||||
def detect_elements():
|
||||
"""
|
||||
Detect UI elements in a screenshot
|
||||
|
||||
Request body:
|
||||
{
|
||||
"image": "base64_encoded_image", // Optional - will capture if not provided
|
||||
"region": { // Optional
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"width": 1920,
|
||||
"height": 1080
|
||||
}
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"elements": [
|
||||
{
|
||||
"id": "element-1",
|
||||
"type": "button",
|
||||
"bounds": {"x": 100, "y": 200, "width": 80, "height": 30},
|
||||
"text": "Submit",
|
||||
"confidence": 0.95,
|
||||
"selectors": {
|
||||
"css": "button.submit",
|
||||
"xpath": "//button[@class='submit']"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
if not CORE_AVAILABLE or not ui_detector:
|
||||
return jsonify({
|
||||
'error': 'UI detection not available',
|
||||
'message': 'Core modules are not properly configured'
|
||||
}), 503
|
||||
|
||||
try:
|
||||
data = request.get_json() or {}
|
||||
|
||||
# Get or capture screenshot
|
||||
screenshot_array = None
|
||||
if 'image' in data:
|
||||
# Decode base64 image
|
||||
img_data = data['image']
|
||||
if img_data.startswith('data:image'):
|
||||
img_data = img_data.split(',')[1]
|
||||
img_bytes = base64.b64decode(img_data)
|
||||
|
||||
# Convert to numpy array
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
img = Image.open(io.BytesIO(img_bytes))
|
||||
screenshot_array = np.array(img)
|
||||
else:
|
||||
# Capture new screenshot
|
||||
screen_capturer = get_screen_capturer()
|
||||
if not screen_capturer:
|
||||
return jsonify({
|
||||
'error': 'Screen capture not available',
|
||||
'message': 'Could not initialize screen capturer'
|
||||
}), 503
|
||||
|
||||
region = data.get('region')
|
||||
if region:
|
||||
# Note: capture_region not implemented in ScreenCapturer, use full capture
|
||||
screenshot_array = screen_capturer.capture()
|
||||
else:
|
||||
screenshot_array = screen_capturer.capture()
|
||||
|
||||
if screenshot_array is None:
|
||||
return jsonify({
|
||||
'error': 'Screenshot capture failed',
|
||||
'message': 'Could not obtain screenshot'
|
||||
}), 500
|
||||
|
||||
# Create ScreenState for detection
|
||||
try:
|
||||
screen_state = ScreenState(
|
||||
screenshot=screenshot_array,
|
||||
timestamp=datetime.now(),
|
||||
window_title="Visual Workflow Builder",
|
||||
resolution=(screenshot_array.shape[1], screenshot_array.shape[0])
|
||||
)
|
||||
|
||||
# Detect UI elements using the core system
|
||||
detected_elements = ui_detector.detect_elements(screen_state)
|
||||
|
||||
# Format response
|
||||
formatted_elements = []
|
||||
for i, element in enumerate(detected_elements):
|
||||
formatted_element = {
|
||||
'id': f'element-{i}',
|
||||
'type': getattr(element, 'element_type', 'generic'),
|
||||
'bounds': {
|
||||
'x': int(element.bbox.x),
|
||||
'y': int(element.bbox.y),
|
||||
'width': int(element.bbox.width),
|
||||
'height': int(element.bbox.height)
|
||||
},
|
||||
'text': getattr(element, 'text', ''),
|
||||
'confidence': getattr(element, 'confidence', 0.8),
|
||||
'selectors': generate_selectors({
|
||||
'type': getattr(element, 'element_type', 'generic'),
|
||||
'text': getattr(element, 'text', ''),
|
||||
'id': getattr(element, 'element_id', ''),
|
||||
'classes': getattr(element, 'classes', []),
|
||||
'attributes': getattr(element, 'attributes', {})
|
||||
})
|
||||
}
|
||||
formatted_elements.append(formatted_element)
|
||||
|
||||
return jsonify({
|
||||
'elements': formatted_elements,
|
||||
'count': len(formatted_elements)
|
||||
})
|
||||
|
||||
except Exception as detection_error:
|
||||
# Fallback to mock elements for testing
|
||||
print(f"Detection error: {detection_error}")
|
||||
formatted_elements = [
|
||||
{
|
||||
'id': 'mock-element-1',
|
||||
'type': 'button',
|
||||
'bounds': {'x': 100, 'y': 100, 'width': 80, 'height': 30},
|
||||
'text': 'Test Button',
|
||||
'confidence': 0.9,
|
||||
'selectors': {
|
||||
'css': 'button.test',
|
||||
'xpath': '//button[@class="test"]',
|
||||
'text': 'Test Button'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
return jsonify({
|
||||
'elements': formatted_elements,
|
||||
'count': len(formatted_elements),
|
||||
'warning': 'Using mock data due to detection error'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'error': 'Detection failed',
|
||||
'message': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@screen_capture_bp.route('/element-at-point', methods=['POST'])
|
||||
@cross_origin()
|
||||
def element_at_point():
|
||||
"""
|
||||
Get the UI element at a specific point
|
||||
|
||||
Request body:
|
||||
{
|
||||
"x": 500,
|
||||
"y": 300,
|
||||
"image": "base64_encoded_image" // Optional
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"element": {
|
||||
"id": "element-1",
|
||||
"type": "button",
|
||||
"bounds": {"x": 100, "y": 200, "width": 80, "height": 30},
|
||||
"text": "Submit",
|
||||
"confidence": 0.95,
|
||||
"selectors": {
|
||||
"css": "button.submit",
|
||||
"xpath": "//button[@class='submit']",
|
||||
"text": "Submit"
|
||||
},
|
||||
"properties": {
|
||||
"tag": "button",
|
||||
"classes": ["btn", "btn-primary"],
|
||||
"id": "submit-btn",
|
||||
"attributes": {
|
||||
"type": "submit",
|
||||
"data-testid": "submit-button"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not CORE_AVAILABLE or not ui_detector:
|
||||
return jsonify({
|
||||
'error': 'UI detection not available',
|
||||
'message': 'Core modules are not properly configured'
|
||||
}), 503
|
||||
|
||||
try:
|
||||
data = request.get_json()
|
||||
x = data.get('x')
|
||||
y = data.get('y')
|
||||
|
||||
if x is None or y is None:
|
||||
return jsonify({
|
||||
'error': 'Invalid request',
|
||||
'message': 'x and y coordinates are required'
|
||||
}), 400
|
||||
|
||||
# Get or capture screenshot
|
||||
if 'image' in data:
|
||||
img_data = data['image']
|
||||
if img_data.startswith('data:image'):
|
||||
img_data = img_data.split(',')[1]
|
||||
img_bytes = base64.b64decode(img_data)
|
||||
screenshot = io.BytesIO(img_bytes)
|
||||
else:
|
||||
screen_capturer = get_screen_capturer()
|
||||
if not screen_capturer:
|
||||
return jsonify({
|
||||
'error': 'Screen capture not available',
|
||||
'message': 'Could not initialize screen capturer'
|
||||
}), 503
|
||||
screenshot = screen_capturer.capture()
|
||||
|
||||
# Detect all elements
|
||||
# Note: UIDetector.detect expects a file path, not an image object
|
||||
# For now, return mock elements
|
||||
elements = []
|
||||
|
||||
# For now, return a mock element at the clicked point
|
||||
mock_element = {
|
||||
'id': f'element-{x}-{y}',
|
||||
'type': 'generic',
|
||||
'bounds': {'x': x-10, 'y': y-10, 'width': 20, 'height': 20},
|
||||
'text': f'Element at ({x},{y})',
|
||||
'confidence': 0.8,
|
||||
'selectors': {
|
||||
'css': f'[data-x="{x}"][data-y="{y}"]',
|
||||
'xpath': f'//*[@data-x="{x}" and @data-y="{y}"]',
|
||||
'text': f'Element at ({x},{y})'
|
||||
},
|
||||
'properties': {
|
||||
'tag': 'div',
|
||||
'classes': [],
|
||||
'id': f'elem-{x}-{y}',
|
||||
'attributes': {'data-x': str(x), 'data-y': str(y)},
|
||||
'text': f'Element at ({x},{y})',
|
||||
'bounds': {'x': x-10, 'y': y-10, 'width': 20, 'height': 20},
|
||||
'visible': True,
|
||||
'enabled': True
|
||||
}
|
||||
}
|
||||
|
||||
return jsonify({
|
||||
'element': mock_element
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'error': 'Detection failed',
|
||||
'message': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
def generate_selectors(element: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""
|
||||
Generate multiple selector strategies for an element
|
||||
|
||||
Exigence: 4.5
|
||||
"""
|
||||
selectors = {}
|
||||
|
||||
# Extract element properties
|
||||
elem_type = element.get('type', 'unknown')
|
||||
text = element.get('text', '')
|
||||
elem_id = element.get('id', '')
|
||||
classes = element.get('classes', [])
|
||||
attributes = element.get('attributes', {})
|
||||
|
||||
# CSS selectors (in order of preference)
|
||||
if elem_id:
|
||||
selectors['css_id'] = f'#{elem_id}'
|
||||
|
||||
if 'data-testid' in attributes:
|
||||
selectors['css_testid'] = f'[data-testid="{attributes["data-testid"]}"]'
|
||||
|
||||
if classes:
|
||||
selectors['css_class'] = f'{elem_type}.{".".join(classes)}'
|
||||
|
||||
if text:
|
||||
selectors['css_text'] = f'{elem_type}:contains("{text}")'
|
||||
|
||||
# Default CSS selector
|
||||
selectors['css'] = elem_id if elem_id else (
|
||||
f'{elem_type}.{classes[0]}' if classes else elem_type
|
||||
)
|
||||
|
||||
# XPath selectors
|
||||
if elem_id:
|
||||
selectors['xpath_id'] = f'//{elem_type}[@id="{elem_id}"]'
|
||||
|
||||
if text:
|
||||
selectors['xpath_text'] = f'//{elem_type}[contains(text(), "{text}")]'
|
||||
|
||||
if 'data-testid' in attributes:
|
||||
selectors['xpath_testid'] = f'//{elem_type}[@data-testid="{attributes["data-testid"]}"]'
|
||||
|
||||
# Default XPath
|
||||
selectors['xpath'] = selectors.get('xpath_id') or selectors.get('xpath_testid') or f'//{elem_type}'
|
||||
|
||||
# Text-based selector
|
||||
if text:
|
||||
selectors['text'] = text
|
||||
|
||||
return selectors
|
||||
|
||||
|
||||
def extract_properties(element: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Extract detailed properties from an element
|
||||
|
||||
Exigence: 4.5
|
||||
"""
|
||||
return {
|
||||
'tag': element.get('type', 'unknown'),
|
||||
'classes': element.get('classes', []),
|
||||
'id': element.get('id', ''),
|
||||
'attributes': element.get('attributes', {}),
|
||||
'text': element.get('text', ''),
|
||||
'bounds': element.get('bounds', {}),
|
||||
'visible': element.get('visible', True),
|
||||
'enabled': element.get('enabled', True),
|
||||
}
|
||||
|
||||
|
||||
@screen_capture_bp.route('/validate-selector', methods=['POST'])
|
||||
@cross_origin()
|
||||
def validate_selector():
|
||||
"""
|
||||
Validate a selector and count matching elements
|
||||
|
||||
Request body:
|
||||
{
|
||||
"selector": "button.submit",
|
||||
"type": "css", // or "xpath"
|
||||
"image": "base64_encoded_image" // Optional
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"valid": true,
|
||||
"count": 1,
|
||||
"elements": [...] // Optional - matching elements
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
selector = data.get('selector')
|
||||
selector_type = data.get('type', 'css')
|
||||
|
||||
if not selector:
|
||||
return jsonify({
|
||||
'error': 'Invalid request',
|
||||
'message': 'selector is required'
|
||||
}), 400
|
||||
|
||||
# For now, return a mock validation
|
||||
# In production, this would use the actual UI detection
|
||||
return jsonify({
|
||||
'valid': True,
|
||||
'count': 1,
|
||||
'message': f'{selector_type.upper()} selector is valid'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'error': 'Validation failed',
|
||||
'message': str(e)
|
||||
}), 500
|
||||
279
visual_workflow_builder/backend/api/self_healing.py
Normal file
279
visual_workflow_builder/backend/api/self_healing.py
Normal file
@@ -0,0 +1,279 @@
|
||||
"""Self-healing API endpoints for Visual Workflow Builder."""
|
||||
|
||||
from flask import Blueprint, request, jsonify
|
||||
from typing import Dict, Any
|
||||
import logging
|
||||
|
||||
from services.self_healing_integration import get_self_healing_service
|
||||
from models.self_healing_config import SelfHealingConfig, RecoveryStrategy, RecoveryMode
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Create blueprint
|
||||
self_healing_bp = Blueprint('self_healing', __name__, url_prefix='/api/self-healing')
|
||||
|
||||
|
||||
@self_healing_bp.route('/config/defaults/<node_type>', methods=['GET'])
|
||||
def get_default_config(node_type: str):
|
||||
"""
|
||||
Get default self-healing configuration for a node type.
|
||||
|
||||
Args:
|
||||
node_type: Type of node (click, type, wait, etc.)
|
||||
"""
|
||||
try:
|
||||
service = get_self_healing_service()
|
||||
config = service.configure_node_self_healing(node_type)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'config': config.to_dict(),
|
||||
'available_strategies': [s.value for s in RecoveryStrategy],
|
||||
'available_modes': [m.value for m in RecoveryMode]
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get default config for {node_type}: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/config/validate', methods=['POST'])
|
||||
def validate_config():
|
||||
"""
|
||||
Validate a self-healing configuration.
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
# Validate required fields
|
||||
if not data:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Configuration data required'
|
||||
}), 400
|
||||
|
||||
# Try to create config from data
|
||||
config = SelfHealingConfig.from_dict(data)
|
||||
|
||||
# Perform validation
|
||||
errors = []
|
||||
|
||||
if config.max_attempts < 1 or config.max_attempts > 10:
|
||||
errors.append('max_attempts must be between 1 and 10')
|
||||
|
||||
if config.confidence_threshold < 0.0 or config.confidence_threshold > 1.0:
|
||||
errors.append('confidence_threshold must be between 0.0 and 1.0')
|
||||
|
||||
if config.strategy_timeout < 1.0 or config.strategy_timeout > 300.0:
|
||||
errors.append('strategy_timeout must be between 1.0 and 300.0 seconds')
|
||||
|
||||
if not config.enabled_strategies:
|
||||
errors.append('At least one recovery strategy must be enabled')
|
||||
|
||||
return jsonify({
|
||||
'success': len(errors) == 0,
|
||||
'errors': errors,
|
||||
'config': config.to_dict() if len(errors) == 0 else None
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to validate config: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/suggestions', methods=['POST'])
|
||||
def get_recovery_suggestions():
|
||||
"""
|
||||
Get recovery suggestions for a node.
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
# Validate required fields
|
||||
required_fields = ['workflow_id', 'node_id', 'action_info', 'screenshot_path']
|
||||
for field in required_fields:
|
||||
if field not in data:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Missing required field: {field}'
|
||||
}), 400
|
||||
|
||||
service = get_self_healing_service()
|
||||
suggestions = service.get_recovery_suggestions(
|
||||
workflow_id=data['workflow_id'],
|
||||
node_id=data['node_id'],
|
||||
action_info=data['action_info'],
|
||||
screenshot_path=data['screenshot_path']
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'suggestions': suggestions
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get recovery suggestions: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/notifications', methods=['GET'])
|
||||
def get_notifications():
|
||||
"""
|
||||
Get recovery notifications.
|
||||
"""
|
||||
try:
|
||||
workflow_id = request.args.get('workflow_id')
|
||||
limit = int(request.args.get('limit', 50))
|
||||
|
||||
service = get_self_healing_service()
|
||||
notifications = service.get_notifications(
|
||||
workflow_id=workflow_id,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'notifications': notifications
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get notifications: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/notifications', methods=['DELETE'])
|
||||
def clear_notifications():
|
||||
"""
|
||||
Clear recovery notifications.
|
||||
"""
|
||||
try:
|
||||
workflow_id = request.args.get('workflow_id')
|
||||
|
||||
service = get_self_healing_service()
|
||||
service.clear_notifications(workflow_id=workflow_id)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': 'Notifications cleared'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to clear notifications: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/statistics', methods=['GET'])
|
||||
def get_statistics():
|
||||
"""
|
||||
Get recovery statistics.
|
||||
"""
|
||||
try:
|
||||
service = get_self_healing_service()
|
||||
statistics = service.get_statistics()
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'statistics': statistics
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get statistics: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/insights', methods=['GET'])
|
||||
def get_insights():
|
||||
"""
|
||||
Get insights from recovery patterns.
|
||||
"""
|
||||
try:
|
||||
service = get_self_healing_service()
|
||||
insights = service.get_insights()
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'insights': insights
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get insights: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/status', methods=['GET'])
|
||||
def get_status():
|
||||
"""
|
||||
Get self-healing system status.
|
||||
"""
|
||||
try:
|
||||
service = get_self_healing_service()
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'status': {
|
||||
'enabled': service.enabled,
|
||||
'core_available': service.enabled,
|
||||
'notifications_count': len(service.notifications),
|
||||
'statistics': service.statistics.to_dict()
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get status: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@self_healing_bp.route('/test', methods=['POST'])
|
||||
def test_recovery():
|
||||
"""
|
||||
Test recovery functionality with mock data.
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
# Mock recovery test
|
||||
mock_result = {
|
||||
'success': True,
|
||||
'strategy_used': 'SemanticVariantStrategy',
|
||||
'confidence': 0.85,
|
||||
'execution_time': 2.3,
|
||||
'message': '✅ Test de récupération réussi avec variante sémantique',
|
||||
'test_mode': True
|
||||
}
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'result': mock_result,
|
||||
'message': 'Test de récupération terminé avec succès'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to test recovery: {e}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
226
visual_workflow_builder/backend/api/templates.py
Normal file
226
visual_workflow_builder/backend/api/templates.py
Normal file
@@ -0,0 +1,226 @@
|
||||
"""
|
||||
Templates API Blueprint
|
||||
|
||||
Provides REST endpoints for workflow template management.
|
||||
"""
|
||||
|
||||
from flask import Blueprint, request, jsonify
|
||||
import logging
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from services.template_service import TemplateService
|
||||
from services.serialization import WorkflowSerializer
|
||||
from api.errors import handle_api_error
|
||||
|
||||
templates_bp = Blueprint('templates', __name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Initialize services
|
||||
template_service = TemplateService()
|
||||
workflow_serializer = WorkflowSerializer()
|
||||
|
||||
@templates_bp.route('/', methods=['GET'])
|
||||
def list_templates():
|
||||
"""List all templates with optional filtering"""
|
||||
try:
|
||||
# Get query parameters
|
||||
category = request.args.get('category')
|
||||
difficulty = request.args.get('difficulty')
|
||||
|
||||
# Get templates
|
||||
templates = template_service.list_templates(category=category, difficulty=difficulty)
|
||||
|
||||
# Convert to dict format
|
||||
result = []
|
||||
for template in templates:
|
||||
template_dict = template.to_dict()
|
||||
# Remove the full workflow from list view for performance
|
||||
template_dict.pop('workflow', None)
|
||||
result.append(template_dict)
|
||||
|
||||
return jsonify({
|
||||
'templates': result,
|
||||
'count': len(result)
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing templates: {e}")
|
||||
return handle_api_error(e)
|
||||
|
||||
@templates_bp.route('/', methods=['POST'])
|
||||
def create_template():
|
||||
"""Create a new template"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': 'No data provided'}), 400
|
||||
|
||||
# Create template
|
||||
template = template_service.create_template(data)
|
||||
|
||||
logger.info(f"Created template: {template.id}")
|
||||
return jsonify(template.to_dict()), 201
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Template validation error: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating template: {e}")
|
||||
return handle_api_error(e)
|
||||
|
||||
@templates_bp.route('/<template_id>', methods=['GET'])
|
||||
def get_template(template_id):
|
||||
"""Get a specific template"""
|
||||
try:
|
||||
template = template_service.get_template(template_id)
|
||||
|
||||
if not template:
|
||||
return jsonify({'error': 'Template not found'}), 404
|
||||
|
||||
return jsonify(template.to_dict())
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting template {template_id}: {e}")
|
||||
return handle_api_error(e)
|
||||
|
||||
@templates_bp.route('/<template_id>', methods=['PUT'])
|
||||
def update_template(template_id):
|
||||
"""Update an existing template"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': 'No data provided'}), 400
|
||||
|
||||
template = template_service.update_template(template_id, data)
|
||||
|
||||
if not template:
|
||||
return jsonify({'error': 'Template not found'}), 404
|
||||
|
||||
logger.info(f"Updated template: {template_id}")
|
||||
return jsonify(template.to_dict())
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Template validation error: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating template {template_id}: {e}")
|
||||
return handle_api_error(e)
|
||||
|
||||
@templates_bp.route('/<template_id>', methods=['DELETE'])
|
||||
def delete_template(template_id):
|
||||
"""Delete a template"""
|
||||
try:
|
||||
success = template_service.delete_template(template_id)
|
||||
|
||||
if not success:
|
||||
return jsonify({'error': 'Template not found'}), 404
|
||||
|
||||
logger.info(f"Deleted template: {template_id}")
|
||||
return jsonify({'message': 'Template deleted successfully'})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting template {template_id}: {e}")
|
||||
return handle_api_error(e)
|
||||
|
||||
@templates_bp.route('/<template_id>/instantiate', methods=['POST'])
|
||||
def instantiate_template(template_id):
|
||||
"""Create a workflow from a template"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': 'No data provided'}), 400
|
||||
|
||||
# Extract parameters
|
||||
parameters = data.get('parameters', {})
|
||||
workflow_name = data.get('name', f'Workflow from template {template_id}')
|
||||
created_by = data.get('created_by', 'user')
|
||||
|
||||
# Create workflow instance
|
||||
workflow = template_service.instantiate_template(
|
||||
template_id, parameters, workflow_name, created_by
|
||||
)
|
||||
|
||||
if not workflow:
|
||||
return jsonify({'error': 'Template not found'}), 404
|
||||
|
||||
# Save the workflow
|
||||
workflow_data = workflow_serializer.serialize(workflow)
|
||||
workflow_id = workflow_serializer.save_workflow(workflow_data)
|
||||
|
||||
logger.info(f"Instantiated template {template_id} as workflow {workflow_id}")
|
||||
return jsonify({
|
||||
'workflow_id': workflow_id,
|
||||
'workflow': workflow.to_dict()
|
||||
}), 201
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Template instantiation error: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
except Exception as e:
|
||||
logger.error(f"Error instantiating template {template_id}: {e}")
|
||||
return handle_api_error(e)
|
||||
|
||||
@templates_bp.route('/from-workflow', methods=['POST'])
|
||||
def create_template_from_workflow():
|
||||
"""Create a template from an existing workflow"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': 'No data provided'}), 400
|
||||
|
||||
# Extract required fields
|
||||
workflow_id = data.get('workflow_id')
|
||||
template_name = data.get('template_name')
|
||||
template_description = data.get('template_description')
|
||||
category = data.get('category', 'Custom')
|
||||
parameters = data.get('parameters', [])
|
||||
|
||||
if not all([workflow_id, template_name, template_description]):
|
||||
return jsonify({
|
||||
'error': 'workflow_id, template_name, and template_description are required'
|
||||
}), 400
|
||||
|
||||
# Load the workflow
|
||||
workflow_data = workflow_serializer.load_workflow(workflow_id)
|
||||
if not workflow_data:
|
||||
return jsonify({'error': 'Workflow not found'}), 404
|
||||
|
||||
workflow = workflow_serializer.deserialize(workflow_data)
|
||||
|
||||
# Create template
|
||||
template = template_service.create_template_from_workflow(
|
||||
workflow, template_name, template_description, category, parameters
|
||||
)
|
||||
|
||||
logger.info(f"Created template {template.id} from workflow {workflow_id}")
|
||||
return jsonify(template.to_dict()), 201
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Template creation error: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating template from workflow: {e}")
|
||||
return handle_api_error(e)
|
||||
|
||||
@templates_bp.route('/categories', methods=['GET'])
|
||||
def get_template_categories():
|
||||
"""Get all available template categories"""
|
||||
try:
|
||||
templates = template_service.list_templates()
|
||||
categories = list(set(template.category for template in templates))
|
||||
categories.sort()
|
||||
|
||||
return jsonify({
|
||||
'categories': categories
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting template categories: {e}")
|
||||
return handle_api_error(e)
|
||||
@@ -167,6 +167,101 @@ def get_status():
|
||||
}), 500
|
||||
|
||||
|
||||
@ui_detection_bp.route('/intelligent-click', methods=['POST'])
|
||||
@cross_origin()
|
||||
def intelligent_click():
|
||||
"""
|
||||
Trouve une ancre et effectue un clic intelligent.
|
||||
|
||||
Request body (JSON):
|
||||
- anchor_image_base64: Image de l'ancre à trouver (requis)
|
||||
- anchor_bbox: Bounding box originale {x, y, width, height} (optionnel)
|
||||
- method: Méthode de matching 'template', 'clip', 'hybrid' (défaut: 'template')
|
||||
- click_type: Type de clic 'left', 'right', 'double' (défaut: 'left')
|
||||
- execute_click: Effectuer le clic ou juste retourner les coordonnées (défaut: true)
|
||||
- threshold: Seuil de détection (défaut: 0.35)
|
||||
|
||||
Response:
|
||||
- success: bool
|
||||
- result: {
|
||||
found: bool,
|
||||
coordinates: {x, y},
|
||||
confidence: float,
|
||||
clicked: bool,
|
||||
method: str,
|
||||
search_time_ms: float
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if not data or 'anchor_image_base64' not in data:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'anchor_image_base64 est requis'
|
||||
}), 400
|
||||
|
||||
anchor_image_base64 = data['anchor_image_base64']
|
||||
anchor_bbox = data.get('anchor_bbox')
|
||||
method = data.get('method', 'template')
|
||||
click_type = data.get('click_type', 'left')
|
||||
execute_click = data.get('execute_click', True)
|
||||
threshold = data.get('threshold', 0.35)
|
||||
|
||||
# Importer le service d'exécution intelligente
|
||||
from services.intelligent_executor import find_and_click
|
||||
|
||||
# Trouver l'ancre
|
||||
result = find_and_click(
|
||||
anchor_image_base64=anchor_image_base64,
|
||||
anchor_bbox=anchor_bbox,
|
||||
method=method,
|
||||
detection_threshold=threshold
|
||||
)
|
||||
|
||||
# Effectuer le clic si demandé et trouvé
|
||||
clicked = False
|
||||
if execute_click and result['found'] and result['coordinates']:
|
||||
try:
|
||||
import pyautogui
|
||||
x, y = result['coordinates']['x'], result['coordinates']['y']
|
||||
|
||||
if click_type == 'left':
|
||||
pyautogui.click(x, y)
|
||||
elif click_type == 'right':
|
||||
pyautogui.rightClick(x, y)
|
||||
elif click_type == 'double':
|
||||
pyautogui.doubleClick(x, y)
|
||||
|
||||
clicked = True
|
||||
print(f"✅ Clic intelligent {click_type} à ({x}, {y}) - confiance: {result['confidence']:.2f}")
|
||||
|
||||
except Exception as click_err:
|
||||
print(f"❌ Erreur clic: {click_err}")
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'result': {
|
||||
'found': result['found'],
|
||||
'coordinates': result['coordinates'],
|
||||
'bbox': result.get('bbox'),
|
||||
'confidence': result['confidence'],
|
||||
'clicked': clicked,
|
||||
'method': result.get('method', method),
|
||||
'search_time_ms': result.get('search_time_ms', 0),
|
||||
'candidates': result.get('candidates', [])
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
@ui_detection_bp.route('/find-element', methods=['POST'])
|
||||
@cross_origin()
|
||||
def find_element():
|
||||
|
||||
137
visual_workflow_builder/backend/api/validation.py
Normal file
137
visual_workflow_builder/backend/api/validation.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""backend/api/validation.py
|
||||
|
||||
Validation légère pour les payloads de l'API workflows.
|
||||
|
||||
Auteur : Dom, Alice, Kiro - 08 janvier 2026
|
||||
|
||||
Patch #1:
|
||||
- Ce module manquait et bloquait le boot via api/__init__.py
|
||||
- On reste volontairement permissif (on valide les essentiels)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, Iterable
|
||||
|
||||
from .errors import ValidationError
|
||||
|
||||
|
||||
_ALLOWED_UPDATE_FIELDS = {
|
||||
"name",
|
||||
"description",
|
||||
"nodes",
|
||||
"edges",
|
||||
"variables",
|
||||
"settings",
|
||||
"tags",
|
||||
"category",
|
||||
"is_template",
|
||||
# Champs additionnels envoyés par le frontend VWB
|
||||
"id",
|
||||
"steps",
|
||||
"connections",
|
||||
}
|
||||
|
||||
|
||||
def _ensure_dict(data: Any, context: str = "payload") -> Dict[str, Any]:
|
||||
"""S'assure que les données sont un dictionnaire."""
|
||||
if not isinstance(data, dict):
|
||||
raise ValidationError(f"{context} doit être un objet")
|
||||
return data
|
||||
|
||||
|
||||
def _ensure_list(value: Any, context: str) -> Iterable[Any]:
|
||||
"""S'assure que la valeur est une liste."""
|
||||
if value is None:
|
||||
return []
|
||||
if not isinstance(value, list):
|
||||
raise ValidationError(f"{context} doit être un tableau")
|
||||
return value
|
||||
|
||||
|
||||
def validate_workflow_data(data: Any) -> None:
|
||||
"""Valide les données d'un workflow lors de la création."""
|
||||
data = _ensure_dict(data, "workflow")
|
||||
|
||||
# Champs requis (création)
|
||||
if "name" not in data or not str(data.get("name") or "").strip():
|
||||
raise ValidationError("Le champ 'name' est requis")
|
||||
if "created_by" not in data or not str(data.get("created_by") or "").strip():
|
||||
raise ValidationError("Le champ 'created_by' est requis")
|
||||
|
||||
# Champs structurés optionnels
|
||||
if "nodes" in data:
|
||||
for n in _ensure_list(data.get("nodes"), "nodes"):
|
||||
validate_node_data(n)
|
||||
|
||||
if "edges" in data:
|
||||
for e in _ensure_list(data.get("edges"), "edges"):
|
||||
validate_edge_data(e)
|
||||
|
||||
if "variables" in data:
|
||||
for v in _ensure_list(data.get("variables"), "variables"):
|
||||
validate_variable_data(v)
|
||||
|
||||
if "settings" in data and data.get("settings") is not None:
|
||||
validate_settings_data(data.get("settings"))
|
||||
|
||||
if "tags" in data and data.get("tags") is not None and not isinstance(data.get("tags"), list):
|
||||
raise ValidationError("Le champ 'tags' doit être un tableau")
|
||||
|
||||
|
||||
def validate_update_data(data: Any) -> None:
|
||||
"""Valide les données d'un workflow lors de la mise à jour."""
|
||||
data = _ensure_dict(data, "update")
|
||||
|
||||
unknown = set(data.keys()) - _ALLOWED_UPDATE_FIELDS
|
||||
if unknown:
|
||||
raise ValidationError(f"Champ(s) inconnu(s) dans la mise à jour: {', '.join(sorted(unknown))}")
|
||||
|
||||
if "nodes" in data:
|
||||
for n in _ensure_list(data.get("nodes"), "nodes"):
|
||||
validate_node_data(n)
|
||||
|
||||
if "edges" in data:
|
||||
for e in _ensure_list(data.get("edges"), "edges"):
|
||||
validate_edge_data(e)
|
||||
|
||||
if "variables" in data:
|
||||
for v in _ensure_list(data.get("variables"), "variables"):
|
||||
validate_variable_data(v)
|
||||
|
||||
if "settings" in data and data.get("settings") is not None:
|
||||
validate_settings_data(data.get("settings"))
|
||||
|
||||
if "tags" in data and data.get("tags") is not None and not isinstance(data.get("tags"), list):
|
||||
raise ValidationError("Le champ 'tags' doit être un tableau")
|
||||
|
||||
|
||||
def validate_node_data(node: Any) -> None:
|
||||
"""Valide les données d'un nœud."""
|
||||
node = _ensure_dict(node, "node")
|
||||
if "id" not in node or not str(node.get("id") or "").strip():
|
||||
raise ValidationError("Le champ 'id' du nœud est requis")
|
||||
if "type" not in node or not str(node.get("type") or "").strip():
|
||||
# Les nœuds ReactFlow ont toujours un type (default est ok)
|
||||
raise ValidationError("Le champ 'type' du nœud est requis")
|
||||
|
||||
|
||||
def validate_edge_data(edge: Any) -> None:
|
||||
"""Valide les données d'une connexion."""
|
||||
edge = _ensure_dict(edge, "edge")
|
||||
if "source" not in edge or not str(edge.get("source") or "").strip():
|
||||
raise ValidationError("Le champ 'source' de la connexion est requis")
|
||||
if "target" not in edge or not str(edge.get("target") or "").strip():
|
||||
raise ValidationError("Le champ 'target' de la connexion est requis")
|
||||
|
||||
|
||||
def validate_variable_data(variable: Any) -> None:
|
||||
"""Valide les données d'une variable."""
|
||||
variable = _ensure_dict(variable, "variable")
|
||||
if "name" not in variable and "key" not in variable:
|
||||
raise ValidationError("Le champ 'name' de la variable est requis")
|
||||
|
||||
|
||||
def validate_settings_data(settings: Any) -> None:
|
||||
"""Valide les données des paramètres."""
|
||||
_ensure_dict(settings, "settings")
|
||||
417
visual_workflow_builder/backend/api/visual_targets.py
Normal file
417
visual_workflow_builder/backend/api/visual_targets.py
Normal file
@@ -0,0 +1,417 @@
|
||||
"""
|
||||
API endpoints pour la gestion des cibles visuelles
|
||||
Intégration avec le VisualTargetManager du core RPA Vision V3
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from flask import Blueprint, request, jsonify
|
||||
from typing import Dict, Any, List
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add project root to path to import core modules (best-effort)
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
|
||||
|
||||
try:
|
||||
from core.visual.visual_target_manager import VisualTargetManager, VisualTarget, ValidationResult
|
||||
from core.models import Point, BBox
|
||||
from core.capture.screen_capturer import ScreenCapturer
|
||||
from core.detection.ui_detector import UIDetector
|
||||
from core.embedding.fusion_engine import FusionEngine
|
||||
CORE_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f"Warning: Core modules not available (visual_targets): {e}")
|
||||
CORE_AVAILABLE = False
|
||||
VisualTargetManager = None # type: ignore
|
||||
VisualTarget = None # type: ignore
|
||||
ValidationResult = None # type: ignore
|
||||
Point = None # type: ignore
|
||||
BBox = None # type: ignore
|
||||
ScreenCapturer = None # type: ignore
|
||||
UIDetector = None # type: ignore
|
||||
FusionEngine = None # type: ignore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Créer le blueprint pour les endpoints visuels
|
||||
visual_targets_bp = Blueprint('visual_targets', __name__, url_prefix='/api/visual')
|
||||
|
||||
# Instance globale du VisualTargetManager (sera initialisée dans app.py)
|
||||
visual_target_manager: VisualTargetManager = None
|
||||
|
||||
def init_visual_target_manager(screen_capturer: ScreenCapturer,
|
||||
ui_detector: UIDetector,
|
||||
fusion_engine: FusionEngine):
|
||||
"""Initialise le VisualTargetManager avec les dépendances"""
|
||||
if not CORE_AVAILABLE or VisualTargetManager is None:
|
||||
raise RuntimeError("Core RPA modules not available - cannot init VisualTargetManager")
|
||||
global visual_target_manager
|
||||
visual_target_manager = VisualTargetManager(screen_capturer, ui_detector, fusion_engine)
|
||||
logger.info("VisualTargetManager initialisé pour l'API")
|
||||
|
||||
@visual_targets_bp.route('/targets', methods=['POST'])
|
||||
def create_visual_target():
|
||||
"""
|
||||
Capture et crée une nouvelle cible visuelle à la position donnée
|
||||
|
||||
Body:
|
||||
{
|
||||
"position": {"x": 100, "y": 200}
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"embedding": [...],
|
||||
"screenshot": "base64_image",
|
||||
"bounding_box": {"x": 95, "y": 195, "width": 50, "height": 30},
|
||||
"confidence": 1.0,
|
||||
"contextual_info": {...},
|
||||
"signature": "visual_abc123_def456",
|
||||
"metadata": {...},
|
||||
"created_at": "2024-01-07T10:30:00",
|
||||
"validation_count": 0
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not visual_target_manager:
|
||||
return jsonify({'error': 'VisualTargetManager non initialisé'}), 500
|
||||
|
||||
data = request.get_json()
|
||||
if not data or 'position' not in data:
|
||||
return jsonify({'error': 'Position requise'}), 400
|
||||
|
||||
position_data = data['position']
|
||||
if 'x' not in position_data or 'y' not in position_data:
|
||||
return jsonify({'error': 'Coordonnées x et y requises'}), 400
|
||||
|
||||
# Créer l'objet Point
|
||||
position = Point(x=position_data['x'], y=position_data['y'])
|
||||
|
||||
# Capturer et sélectionner l'élément (opération async)
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
target = loop.run_until_complete(
|
||||
visual_target_manager.capture_and_select_element(position)
|
||||
)
|
||||
|
||||
# Convertir en dictionnaire pour JSON
|
||||
target_dict = target.to_dict()
|
||||
|
||||
logger.info(f"Cible visuelle créée: {target.signature}")
|
||||
return jsonify(target_dict), 201
|
||||
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Erreur de validation: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la création de cible: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@visual_targets_bp.route('/targets/<signature>/validate', methods=['POST'])
|
||||
def validate_visual_target(signature: str):
|
||||
"""
|
||||
Valide qu'une cible visuelle est toujours présente et accessible
|
||||
|
||||
Returns:
|
||||
{
|
||||
"is_valid": true,
|
||||
"confidence": 0.95,
|
||||
"current_position": {"x": 95, "y": 195, "width": 50, "height": 30},
|
||||
"suggestions": [...],
|
||||
"issues": []
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not visual_target_manager:
|
||||
return jsonify({'error': 'VisualTargetManager non initialisé'}), 500
|
||||
|
||||
# Récupérer la cible depuis le cache
|
||||
target = visual_target_manager.get_cached_target(signature)
|
||||
if not target:
|
||||
return jsonify({'error': 'Cible non trouvée'}), 404
|
||||
|
||||
# Valider la cible (opération async)
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
validation_result = loop.run_until_complete(
|
||||
visual_target_manager.validate_target(target)
|
||||
)
|
||||
|
||||
# Convertir en dictionnaire pour JSON
|
||||
result_dict = {
|
||||
'is_valid': validation_result.is_valid,
|
||||
'confidence': validation_result.confidence,
|
||||
'current_position': validation_result.current_position.__dict__ if validation_result.current_position else None,
|
||||
'suggestions': [s.to_dict() for s in validation_result.suggestions] if validation_result.suggestions else [],
|
||||
'issues': validation_result.issues or []
|
||||
}
|
||||
|
||||
logger.debug(f"Validation de {signature}: {'valide' if validation_result.is_valid else 'invalide'}")
|
||||
return jsonify(result_dict), 200
|
||||
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la validation: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@visual_targets_bp.route('/targets/<signature>', methods=['PUT'])
|
||||
def update_visual_target(signature: str):
|
||||
"""
|
||||
Met à jour la capture d'écran d'une cible visuelle
|
||||
|
||||
Returns:
|
||||
{
|
||||
"embedding": [...],
|
||||
"screenshot": "base64_image_updated",
|
||||
"bounding_box": {"x": 95, "y": 195, "width": 50, "height": 30},
|
||||
"confidence": 0.95,
|
||||
...
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not visual_target_manager:
|
||||
return jsonify({'error': 'VisualTargetManager non initialisé'}), 500
|
||||
|
||||
# Récupérer la cible depuis le cache
|
||||
target = visual_target_manager.get_cached_target(signature)
|
||||
if not target:
|
||||
return jsonify({'error': 'Cible non trouvée'}), 404
|
||||
|
||||
# Mettre à jour la capture (opération async)
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
updated_target = loop.run_until_complete(
|
||||
visual_target_manager.update_target_screenshot(target)
|
||||
)
|
||||
|
||||
# Convertir en dictionnaire pour JSON
|
||||
target_dict = updated_target.to_dict()
|
||||
|
||||
logger.info(f"Cible mise à jour: {signature}")
|
||||
return jsonify(target_dict), 200
|
||||
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Erreur lors de la mise à jour: {e}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la mise à jour: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@visual_targets_bp.route('/targets/<signature>/similar', methods=['GET'])
|
||||
def find_similar_elements(signature: str):
|
||||
"""
|
||||
Trouve des éléments similaires à la cible donnée
|
||||
|
||||
Returns:
|
||||
[
|
||||
{
|
||||
"embedding": [...],
|
||||
"screenshot": "base64_image",
|
||||
"bounding_box": {"x": 200, "y": 300, "width": 50, "height": 30},
|
||||
"confidence": 0.87,
|
||||
...
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
try:
|
||||
if not visual_target_manager:
|
||||
return jsonify({'error': 'VisualTargetManager non initialisé'}), 500
|
||||
|
||||
# Récupérer la cible depuis le cache
|
||||
target = visual_target_manager.get_cached_target(signature)
|
||||
if not target:
|
||||
return jsonify({'error': 'Cible non trouvée'}), 404
|
||||
|
||||
# Rechercher des éléments similaires (opération async)
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
similar_targets = loop.run_until_complete(
|
||||
visual_target_manager.find_similar_elements(target)
|
||||
)
|
||||
|
||||
# Convertir en liste de dictionnaires pour JSON
|
||||
targets_list = [t.to_dict() for t in similar_targets]
|
||||
|
||||
logger.info(f"Trouvé {len(similar_targets)} éléments similaires à {signature}")
|
||||
return jsonify(targets_list), 200
|
||||
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la recherche: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@visual_targets_bp.route('/targets/<signature>', methods=['DELETE'])
|
||||
def delete_visual_target(signature: str):
|
||||
"""
|
||||
Supprime une cible visuelle du cache et arrête sa validation
|
||||
|
||||
Returns:
|
||||
{
|
||||
"message": "Cible supprimée avec succès"
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not visual_target_manager:
|
||||
return jsonify({'error': 'VisualTargetManager non initialisé'}), 500
|
||||
|
||||
# Vérifier que la cible existe
|
||||
target = visual_target_manager.get_cached_target(signature)
|
||||
if not target:
|
||||
return jsonify({'error': 'Cible non trouvée'}), 404
|
||||
|
||||
# Arrêter la validation et supprimer du cache
|
||||
visual_target_manager.stop_validation(signature)
|
||||
|
||||
logger.info(f"Cible supprimée: {signature}")
|
||||
return jsonify({'message': 'Cible supprimée avec succès'}), 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la suppression: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@visual_targets_bp.route('/targets', methods=['GET'])
|
||||
def list_visual_targets():
|
||||
"""
|
||||
Liste toutes les cibles visuelles en cache
|
||||
|
||||
Returns:
|
||||
{
|
||||
"targets": [
|
||||
{
|
||||
"signature": "visual_abc123_def456",
|
||||
"confidence": 0.95,
|
||||
"created_at": "2024-01-07T10:30:00",
|
||||
"last_validated": "2024-01-07T10:35:00",
|
||||
"validation_count": 5,
|
||||
"metadata": {...}
|
||||
},
|
||||
...
|
||||
],
|
||||
"count": 2
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not visual_target_manager:
|
||||
return jsonify({'error': 'VisualTargetManager non initialisé'}), 500
|
||||
|
||||
# Récupérer toutes les cibles du cache
|
||||
targets_summary = []
|
||||
|
||||
for signature, target in visual_target_manager._target_cache.items():
|
||||
summary = {
|
||||
'signature': target.signature,
|
||||
'confidence': target.confidence,
|
||||
'created_at': target.created_at.isoformat(),
|
||||
'last_validated': target.last_validated.isoformat() if target.last_validated else None,
|
||||
'validation_count': target.validation_count,
|
||||
'metadata': target.metadata
|
||||
}
|
||||
targets_summary.append(summary)
|
||||
|
||||
return jsonify({
|
||||
'targets': targets_summary,
|
||||
'count': len(targets_summary)
|
||||
}), 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la liste des cibles: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@visual_targets_bp.route('/targets/clear', methods=['POST'])
|
||||
def clear_visual_targets():
|
||||
"""
|
||||
Vide le cache de toutes les cibles visuelles
|
||||
|
||||
Returns:
|
||||
{
|
||||
"message": "Cache vidé avec succès",
|
||||
"cleared_count": 5
|
||||
}
|
||||
"""
|
||||
try:
|
||||
if not visual_target_manager:
|
||||
return jsonify({'error': 'VisualTargetManager non initialisé'}), 500
|
||||
|
||||
# Compter les cibles avant de vider
|
||||
count_before = len(visual_target_manager._target_cache)
|
||||
|
||||
# Vider le cache
|
||||
visual_target_manager.clear_cache()
|
||||
|
||||
logger.info(f"Cache vidé: {count_before} cibles supprimées")
|
||||
return jsonify({
|
||||
'message': 'Cache vidé avec succès',
|
||||
'cleared_count': count_before
|
||||
}), 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du vidage du cache: {e}")
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
|
||||
@visual_targets_bp.route('/health', methods=['GET'])
|
||||
def health_check():
|
||||
"""
|
||||
Vérification de santé du service de cibles visuelles
|
||||
|
||||
Returns:
|
||||
{
|
||||
"status": "healthy",
|
||||
"manager_initialized": true,
|
||||
"cached_targets": 3,
|
||||
"active_validations": 3
|
||||
}
|
||||
"""
|
||||
try:
|
||||
status = {
|
||||
'status': 'healthy' if visual_target_manager else 'unhealthy',
|
||||
'manager_initialized': visual_target_manager is not None,
|
||||
'cached_targets': len(visual_target_manager._target_cache) if visual_target_manager else 0,
|
||||
'active_validations': len(visual_target_manager._validation_tasks) if visual_target_manager else 0
|
||||
}
|
||||
|
||||
return jsonify(status), 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors du health check: {e}")
|
||||
return jsonify({
|
||||
'status': 'error',
|
||||
'error': str(e)
|
||||
}), 500
|
||||
|
||||
# Gestionnaire d'erreurs pour le blueprint
|
||||
@visual_targets_bp.errorhandler(404)
|
||||
def not_found(error):
|
||||
return jsonify({'error': 'Endpoint non trouvé'}), 404
|
||||
|
||||
@visual_targets_bp.errorhandler(405)
|
||||
def method_not_allowed(error):
|
||||
return jsonify({'error': 'Méthode non autorisée'}), 405
|
||||
|
||||
@visual_targets_bp.errorhandler(500)
|
||||
def internal_error(error):
|
||||
return jsonify({'error': 'Erreur interne du serveur'}), 500
|
||||
595
visual_workflow_builder/backend/api/websocket_handlers.py
Normal file
595
visual_workflow_builder/backend/api/websocket_handlers.py
Normal file
@@ -0,0 +1,595 @@
|
||||
"""
|
||||
WebSocket Handlers - Visual Workflow Builder
|
||||
|
||||
Gestion des événements WebSocket pour les mises à jour en temps réel
|
||||
des exécutions de workflows.
|
||||
|
||||
Exigences: 6.2, 6.3, 6.4
|
||||
"""
|
||||
|
||||
from flask import request
|
||||
from flask_socketio import emit, join_room, leave_room, rooms
|
||||
from app import socketio
|
||||
from services.execution_integration import get_executor
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Dictionnaire pour suivre les souscriptions
|
||||
# Format: {execution_id: [sid1, sid2, ...]}
|
||||
execution_subscriptions = {}
|
||||
|
||||
|
||||
@socketio.on('connect')
|
||||
def handle_connect():
|
||||
"""
|
||||
Gère la connexion d'un client WebSocket.
|
||||
|
||||
Exigence: 6.2
|
||||
"""
|
||||
client_id = request.sid
|
||||
logger.info(f"Client connecté: {client_id}")
|
||||
|
||||
emit('connected', {
|
||||
'message': 'Connecté au serveur WebSocket',
|
||||
'client_id': client_id
|
||||
})
|
||||
|
||||
|
||||
@socketio.on('disconnect')
|
||||
def handle_disconnect():
|
||||
"""
|
||||
Gère la déconnexion d'un client WebSocket.
|
||||
|
||||
Nettoie les souscriptions du client.
|
||||
|
||||
Exigence: 6.2
|
||||
"""
|
||||
client_id = request.sid
|
||||
logger.info(f"Client déconnecté: {client_id}")
|
||||
|
||||
# Nettoyer les souscriptions
|
||||
for execution_id in list(execution_subscriptions.keys()):
|
||||
if client_id in execution_subscriptions[execution_id]:
|
||||
execution_subscriptions[execution_id].remove(client_id)
|
||||
if not execution_subscriptions[execution_id]:
|
||||
del execution_subscriptions[execution_id]
|
||||
|
||||
|
||||
@socketio.on('subscribe_execution')
|
||||
def handle_subscribe_execution(data):
|
||||
"""
|
||||
Souscrit aux mises à jour d'une exécution spécifique.
|
||||
|
||||
Args:
|
||||
data: {'execution_id': str}
|
||||
|
||||
Exigence: 6.2, 6.3
|
||||
"""
|
||||
client_id = request.sid
|
||||
execution_id = data.get('execution_id')
|
||||
|
||||
if not execution_id:
|
||||
emit('error', {'message': 'execution_id requis'})
|
||||
return
|
||||
|
||||
# Ajouter le client à la room de l'exécution
|
||||
join_room(execution_id)
|
||||
|
||||
# Enregistrer la souscription
|
||||
if execution_id not in execution_subscriptions:
|
||||
execution_subscriptions[execution_id] = []
|
||||
if client_id not in execution_subscriptions[execution_id]:
|
||||
execution_subscriptions[execution_id].append(client_id)
|
||||
|
||||
logger.info(f"Client {client_id} souscrit à l'exécution {execution_id}")
|
||||
|
||||
# Envoyer le statut actuel
|
||||
executor = get_executor()
|
||||
result = executor.get_execution_status(execution_id)
|
||||
|
||||
if result:
|
||||
emit('execution_status', {
|
||||
'execution_id': execution_id,
|
||||
'status': result.status,
|
||||
'progress': result.progress,
|
||||
'logs': result.logs[-10:] if len(result.logs) > 10 else result.logs # Derniers 10 logs
|
||||
})
|
||||
else:
|
||||
emit('error', {
|
||||
'message': f'Exécution {execution_id} introuvable'
|
||||
})
|
||||
|
||||
|
||||
@socketio.on('unsubscribe_execution')
|
||||
def handle_unsubscribe_execution(data):
|
||||
"""
|
||||
Se désabonne des mises à jour d'une exécution.
|
||||
|
||||
Args:
|
||||
data: {'execution_id': str}
|
||||
|
||||
Exigence: 6.2
|
||||
"""
|
||||
client_id = request.sid
|
||||
execution_id = data.get('execution_id')
|
||||
|
||||
if not execution_id:
|
||||
emit('error', {'message': 'execution_id requis'})
|
||||
return
|
||||
|
||||
# Retirer le client de la room
|
||||
leave_room(execution_id)
|
||||
|
||||
# Retirer de la liste des souscriptions
|
||||
if execution_id in execution_subscriptions:
|
||||
if client_id in execution_subscriptions[execution_id]:
|
||||
execution_subscriptions[execution_id].remove(client_id)
|
||||
if not execution_subscriptions[execution_id]:
|
||||
del execution_subscriptions[execution_id]
|
||||
|
||||
logger.info(f"Client {client_id} désabonné de l'exécution {execution_id}")
|
||||
|
||||
emit('unsubscribed', {'execution_id': execution_id})
|
||||
|
||||
|
||||
@socketio.on('get_execution_status')
|
||||
def handle_get_execution_status(data):
|
||||
"""
|
||||
Récupère le statut actuel d'une exécution.
|
||||
|
||||
Args:
|
||||
data: {'execution_id': str}
|
||||
|
||||
Exigence: 6.3
|
||||
"""
|
||||
execution_id = data.get('execution_id')
|
||||
|
||||
if not execution_id:
|
||||
emit('error', {'message': 'execution_id requis'})
|
||||
return
|
||||
|
||||
executor = get_executor()
|
||||
result = executor.get_execution_status(execution_id)
|
||||
|
||||
if result:
|
||||
emit('execution_status', result.to_dict())
|
||||
else:
|
||||
emit('error', {
|
||||
'message': f'Exécution {execution_id} introuvable'
|
||||
})
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Fonctions utilitaires pour émettre des événements depuis le backend
|
||||
# ============================================================================
|
||||
|
||||
def broadcast_execution_started(execution_id: str, workflow_id: str):
|
||||
"""
|
||||
Diffuse un événement de démarrage d'exécution.
|
||||
|
||||
Exigence: 6.3
|
||||
"""
|
||||
socketio.emit('execution_started', {
|
||||
'execution_id': execution_id,
|
||||
'workflow_id': workflow_id,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}, room=execution_id)
|
||||
|
||||
logger.info(f"Événement execution_started diffusé pour {execution_id}")
|
||||
|
||||
|
||||
def broadcast_node_status(execution_id: str, node_id: str, status: str, data: dict = None):
|
||||
"""
|
||||
Diffuse un événement de changement de statut de node.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
node_id: ID du node
|
||||
status: Nouveau statut (running, success, failed)
|
||||
data: Données additionnelles
|
||||
|
||||
Exigence: 6.3
|
||||
"""
|
||||
event_data = {
|
||||
'execution_id': execution_id,
|
||||
'node_id': node_id,
|
||||
'status': status,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
if data:
|
||||
event_data.update(data)
|
||||
|
||||
socketio.emit('node_status', event_data, room=execution_id)
|
||||
|
||||
logger.debug(f"Événement node_status diffusé: {node_id} -> {status}")
|
||||
|
||||
|
||||
def broadcast_execution_progress(execution_id: str, progress: dict):
|
||||
"""
|
||||
Diffuse un événement de progression d'exécution.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
progress: Données de progression
|
||||
|
||||
Exigence: 6.3
|
||||
"""
|
||||
socketio.emit('execution_progress', {
|
||||
'execution_id': execution_id,
|
||||
'progress': progress,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}, room=execution_id)
|
||||
|
||||
|
||||
def broadcast_execution_complete(execution_id: str, status: str, result: dict = None):
|
||||
"""
|
||||
Diffuse un événement de fin d'exécution.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
status: Statut final (completed, failed, cancelled)
|
||||
result: Résultat de l'exécution
|
||||
|
||||
Exigence: 6.4
|
||||
"""
|
||||
event_data = {
|
||||
'execution_id': execution_id,
|
||||
'status': status,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
if result:
|
||||
event_data['result'] = result
|
||||
|
||||
socketio.emit('execution_complete', event_data, room=execution_id)
|
||||
|
||||
logger.info(f"Événement execution_complete diffusé pour {execution_id}: {status}")
|
||||
|
||||
|
||||
def broadcast_execution_log(execution_id: str, log_entry: dict):
|
||||
"""
|
||||
Diffuse un nouveau log d'exécution.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
log_entry: Entrée de log
|
||||
|
||||
Exigence: 6.3
|
||||
"""
|
||||
socketio.emit('execution_log', {
|
||||
'execution_id': execution_id,
|
||||
'log': log_entry
|
||||
}, room=execution_id)
|
||||
|
||||
|
||||
def broadcast_execution_error(execution_id: str, error_message: str, node_id: str = None):
|
||||
"""
|
||||
Diffuse un événement d'erreur d'exécution.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
error_message: Message d'erreur
|
||||
node_id: ID du node en erreur (optionnel)
|
||||
|
||||
Exigence: 6.4
|
||||
"""
|
||||
event_data = {
|
||||
'execution_id': execution_id,
|
||||
'error': error_message,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
if node_id:
|
||||
event_data['node_id'] = node_id
|
||||
|
||||
socketio.emit('execution_error', event_data, room=execution_id)
|
||||
|
||||
logger.error(f"Événement execution_error diffusé pour {execution_id}: {error_message}")
|
||||
|
||||
|
||||
# Import datetime pour les timestamps
|
||||
from datetime import datetime
|
||||
|
||||
# ============================================================================
|
||||
# COACHING Mode WebSocket Handlers
|
||||
# ============================================================================
|
||||
|
||||
# Dictionnaire pour suivre les sessions COACHING actives
|
||||
# Format: {execution_id: {client_id: sid, pending_suggestion: dict}}
|
||||
coaching_sessions = {}
|
||||
|
||||
|
||||
@socketio.on('subscribe_coaching')
|
||||
def handle_subscribe_coaching(data):
|
||||
"""
|
||||
Souscrit aux événements COACHING d'une exécution.
|
||||
|
||||
Args:
|
||||
data: {'execution_id': str}
|
||||
"""
|
||||
client_id = request.sid
|
||||
execution_id = data.get('execution_id')
|
||||
|
||||
if not execution_id:
|
||||
emit('error', {'message': 'execution_id requis'})
|
||||
return
|
||||
|
||||
# Rejoindre la room coaching
|
||||
coaching_room = f"coaching_{execution_id}"
|
||||
join_room(coaching_room)
|
||||
|
||||
# Enregistrer la session coaching
|
||||
if execution_id not in coaching_sessions:
|
||||
coaching_sessions[execution_id] = {
|
||||
'clients': [],
|
||||
'pending_suggestion': None,
|
||||
'stats': {
|
||||
'suggestions_made': 0,
|
||||
'accepted': 0,
|
||||
'rejected': 0,
|
||||
'corrected': 0
|
||||
}
|
||||
}
|
||||
if client_id not in coaching_sessions[execution_id]['clients']:
|
||||
coaching_sessions[execution_id]['clients'].append(client_id)
|
||||
|
||||
logger.info(f"Client {client_id} souscrit au COACHING {execution_id}")
|
||||
|
||||
emit('coaching_subscribed', {
|
||||
'execution_id': execution_id,
|
||||
'message': 'Souscrit aux événements COACHING',
|
||||
'stats': coaching_sessions[execution_id]['stats']
|
||||
})
|
||||
|
||||
|
||||
@socketio.on('unsubscribe_coaching')
|
||||
def handle_unsubscribe_coaching(data):
|
||||
"""
|
||||
Se désabonne des événements COACHING.
|
||||
|
||||
Args:
|
||||
data: {'execution_id': str}
|
||||
"""
|
||||
client_id = request.sid
|
||||
execution_id = data.get('execution_id')
|
||||
|
||||
if not execution_id:
|
||||
emit('error', {'message': 'execution_id requis'})
|
||||
return
|
||||
|
||||
coaching_room = f"coaching_{execution_id}"
|
||||
leave_room(coaching_room)
|
||||
|
||||
if execution_id in coaching_sessions:
|
||||
if client_id in coaching_sessions[execution_id]['clients']:
|
||||
coaching_sessions[execution_id]['clients'].remove(client_id)
|
||||
if not coaching_sessions[execution_id]['clients']:
|
||||
del coaching_sessions[execution_id]
|
||||
|
||||
logger.info(f"Client {client_id} désabonné du COACHING {execution_id}")
|
||||
|
||||
emit('coaching_unsubscribed', {'execution_id': execution_id})
|
||||
|
||||
|
||||
@socketio.on('coaching_decision')
|
||||
def handle_coaching_decision(data):
|
||||
"""
|
||||
Reçoit une décision COACHING de l'utilisateur.
|
||||
|
||||
Args:
|
||||
data: {
|
||||
'execution_id': str,
|
||||
'decision': str ('accept', 'reject', 'correct', 'manual', 'skip'),
|
||||
'correction': dict (optionnel, si decision == 'correct'),
|
||||
'feedback': str (optionnel)
|
||||
}
|
||||
"""
|
||||
client_id = request.sid
|
||||
execution_id = data.get('execution_id')
|
||||
decision = data.get('decision')
|
||||
|
||||
if not execution_id or not decision:
|
||||
emit('error', {'message': 'execution_id et decision requis'})
|
||||
return
|
||||
|
||||
valid_decisions = ['accept', 'reject', 'correct', 'manual', 'skip']
|
||||
if decision not in valid_decisions:
|
||||
emit('error', {'message': f'decision invalide. Valeurs: {valid_decisions}'})
|
||||
return
|
||||
|
||||
logger.info(f"Décision COACHING reçue: {execution_id} -> {decision}")
|
||||
|
||||
# Mettre à jour les stats
|
||||
if execution_id in coaching_sessions:
|
||||
stats = coaching_sessions[execution_id]['stats']
|
||||
if decision == 'accept':
|
||||
stats['accepted'] += 1
|
||||
elif decision == 'reject':
|
||||
stats['rejected'] += 1
|
||||
elif decision == 'correct':
|
||||
stats['corrected'] += 1
|
||||
|
||||
# Transmettre la décision au backend d'exécution
|
||||
try:
|
||||
from services.execution_integration import get_executor
|
||||
executor = get_executor()
|
||||
|
||||
# Construire la réponse COACHING
|
||||
coaching_response = {
|
||||
'decision': decision,
|
||||
'correction': data.get('correction'),
|
||||
'feedback': data.get('feedback'),
|
||||
'executed_manually': decision == 'manual'
|
||||
}
|
||||
|
||||
# Soumettre au loop d'exécution
|
||||
result = executor.submit_coaching_decision(execution_id, coaching_response)
|
||||
|
||||
if result:
|
||||
emit('coaching_decision_accepted', {
|
||||
'execution_id': execution_id,
|
||||
'decision': decision,
|
||||
'message': 'Décision enregistrée'
|
||||
})
|
||||
|
||||
# Diffuser à tous les clients de la room
|
||||
coaching_room = f"coaching_{execution_id}"
|
||||
socketio.emit('coaching_decision_broadcast', {
|
||||
'execution_id': execution_id,
|
||||
'decision': decision,
|
||||
'by_client': client_id,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}, room=coaching_room)
|
||||
else:
|
||||
emit('error', {'message': 'Impossible de soumettre la décision'})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Erreur lors de la soumission COACHING: {e}")
|
||||
emit('error', {'message': str(e)})
|
||||
|
||||
|
||||
@socketio.on('get_coaching_stats')
|
||||
def handle_get_coaching_stats(data):
|
||||
"""
|
||||
Récupère les statistiques COACHING d'une exécution.
|
||||
|
||||
Args:
|
||||
data: {'execution_id': str}
|
||||
"""
|
||||
execution_id = data.get('execution_id')
|
||||
|
||||
if not execution_id:
|
||||
emit('error', {'message': 'execution_id requis'})
|
||||
return
|
||||
|
||||
stats = {}
|
||||
if execution_id in coaching_sessions:
|
||||
stats = coaching_sessions[execution_id]['stats']
|
||||
|
||||
emit('coaching_stats', {
|
||||
'execution_id': execution_id,
|
||||
'stats': stats
|
||||
})
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Fonctions pour émettre des événements COACHING depuis le backend
|
||||
# ============================================================================
|
||||
|
||||
def broadcast_coaching_suggestion(
|
||||
execution_id: str,
|
||||
action_info: dict,
|
||||
screenshot_path: str = None,
|
||||
context: dict = None
|
||||
):
|
||||
"""
|
||||
Diffuse une suggestion d'action COACHING.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
action_info: Information sur l'action suggérée
|
||||
screenshot_path: Chemin vers la capture d'écran
|
||||
context: Contexte additionnel
|
||||
"""
|
||||
coaching_room = f"coaching_{execution_id}"
|
||||
|
||||
event_data = {
|
||||
'execution_id': execution_id,
|
||||
'action': action_info.get('action', 'unknown'),
|
||||
'target': action_info.get('target', {}),
|
||||
'params': action_info.get('params', {}),
|
||||
'screenshot_path': screenshot_path,
|
||||
'context': context or {},
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# Enregistrer la suggestion en attente
|
||||
if execution_id in coaching_sessions:
|
||||
coaching_sessions[execution_id]['pending_suggestion'] = event_data
|
||||
coaching_sessions[execution_id]['stats']['suggestions_made'] += 1
|
||||
|
||||
socketio.emit('coaching_suggestion', event_data, room=coaching_room)
|
||||
|
||||
logger.info(f"Suggestion COACHING diffusée: {execution_id} -> {action_info.get('action')}")
|
||||
|
||||
|
||||
def broadcast_coaching_action_result(
|
||||
execution_id: str,
|
||||
action_info: dict,
|
||||
success: bool,
|
||||
result: dict = None,
|
||||
error: str = None
|
||||
):
|
||||
"""
|
||||
Diffuse le résultat d'une action COACHING.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
action_info: Information sur l'action
|
||||
success: Si l'action a réussi
|
||||
result: Résultat de l'action
|
||||
error: Message d'erreur si échec
|
||||
"""
|
||||
coaching_room = f"coaching_{execution_id}"
|
||||
|
||||
event_data = {
|
||||
'execution_id': execution_id,
|
||||
'action': action_info.get('action', 'unknown'),
|
||||
'success': success,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
if result:
|
||||
event_data['result'] = result
|
||||
if error:
|
||||
event_data['error'] = error
|
||||
|
||||
socketio.emit('coaching_action_result', event_data, room=coaching_room)
|
||||
|
||||
logger.info(f"Résultat action COACHING: {execution_id} -> success={success}")
|
||||
|
||||
|
||||
def broadcast_coaching_stats_update(execution_id: str, stats: dict):
|
||||
"""
|
||||
Diffuse une mise à jour des statistiques COACHING.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
stats: Statistiques mises à jour
|
||||
"""
|
||||
coaching_room = f"coaching_{execution_id}"
|
||||
|
||||
socketio.emit('coaching_stats_update', {
|
||||
'execution_id': execution_id,
|
||||
'stats': stats,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}, room=coaching_room)
|
||||
|
||||
|
||||
def broadcast_coaching_session_end(execution_id: str, final_stats: dict):
|
||||
"""
|
||||
Diffuse la fin d'une session COACHING.
|
||||
|
||||
Args:
|
||||
execution_id: ID de l'exécution
|
||||
final_stats: Statistiques finales de la session
|
||||
"""
|
||||
coaching_room = f"coaching_{execution_id}"
|
||||
|
||||
socketio.emit('coaching_session_end', {
|
||||
'execution_id': execution_id,
|
||||
'stats': final_stats,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}, room=coaching_room)
|
||||
|
||||
# Nettoyer la session
|
||||
if execution_id in coaching_sessions:
|
||||
del coaching_sessions[execution_id]
|
||||
|
||||
logger.info(f"Session COACHING terminée: {execution_id}")
|
||||
|
||||
|
||||
# Enregistrer les handlers dans l'application
|
||||
logger.info("WebSocket handlers enregistrés (incluant COACHING)")
|
||||
Reference in New Issue
Block a user