From 38a1a5ddd8d8cf3b2f7cfcc698bfd33eee933659 Mon Sep 17 00:00:00 2001 From: Dom Date: Mon, 19 Jan 2026 08:40:54 +0100 Subject: [PATCH] feat(coaching): Implement complete COACHING mode infrastructure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add comprehensive COACHING mode system with: Backend: - core/coaching module with session persistence and metrics - CoachingSessionPersistence for pause/resume sessions - CoachingMetricsCollector with learning progress tracking - REST API blueprint for coaching sessions management - Execution integration with COACHING mode support Frontend: - CoachingPanel component with keyboard shortcuts - Decision buttons (accept/reject/correct/manual/skip) - Real-time stats display and correction editor - CorrectionPacksDashboard for pack visualization - WebSocket hooks for real-time COACHING events Metrics & Monitoring: - WorkflowLearningMetrics with confidence scoring - GlobalCoachingMetrics for system-wide analytics - AUTO mode readiness detection (85% acceptance threshold) - Learning progress levels (OBSERVATION → COACHING → AUTO) Tests: - E2E tests for complete OBSERVATION → AUTO journey - Session persistence and recovery tests - Metrics threshold validation tests Co-Authored-By: Claude Opus 4.5 --- core/coaching/__init__.py | 40 + core/coaching/metrics.py | 462 +++++++ core/coaching/session_persistence.py | 553 +++++++++ tests/test_coaching_e2e.py | 486 ++++++++ .../backend/api/coaching_sessions.py | 531 ++++++++ .../backend/api/executions.py | 267 ++++ visual_workflow_builder/backend/app.py | 7 + .../backend/services/execution_integration.py | 1073 +++++++++++++++++ .../CoachingPanel/CoachingDecisionButtons.tsx | 102 ++ .../CoachingPanel/CoachingPanel.css | 695 +++++++++++ .../CoachingPanel/CoachingStatsDisplay.tsx | 153 +++ .../CoachingPanel/CoachingSuggestionCard.tsx | 150 +++ .../CoachingPanel/CorrectionEditor.tsx | 235 ++++ .../src/components/CoachingPanel/index.tsx | 297 +++++ .../CorrectionPacksDashboard.css | 838 +++++++++++++ .../CreatePackModal.tsx | 156 +++ .../CorrectionPacksDashboard/PackDetails.tsx | 321 +++++ .../CorrectionPacksDashboard/PackList.tsx | 135 +++ .../CorrectionPacksDashboard/index.tsx | 202 ++++ .../src/hooks/useCoachingWebSocket.ts | 279 +++++ .../frontend/src/hooks/useCorrectionPacks.ts | 287 +++++ 21 files changed, 7269 insertions(+) create mode 100644 core/coaching/__init__.py create mode 100644 core/coaching/metrics.py create mode 100644 core/coaching/session_persistence.py create mode 100644 tests/test_coaching_e2e.py create mode 100644 visual_workflow_builder/backend/api/coaching_sessions.py create mode 100644 visual_workflow_builder/backend/api/executions.py create mode 100644 visual_workflow_builder/backend/services/execution_integration.py create mode 100644 visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingDecisionButtons.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingPanel.css create mode 100644 visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingStatsDisplay.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingSuggestionCard.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CoachingPanel/CorrectionEditor.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CoachingPanel/index.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CorrectionPacksDashboard/CorrectionPacksDashboard.css create mode 100644 visual_workflow_builder/frontend/src/components/CorrectionPacksDashboard/CreatePackModal.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CorrectionPacksDashboard/PackDetails.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CorrectionPacksDashboard/PackList.tsx create mode 100644 visual_workflow_builder/frontend/src/components/CorrectionPacksDashboard/index.tsx create mode 100644 visual_workflow_builder/frontend/src/hooks/useCoachingWebSocket.ts create mode 100644 visual_workflow_builder/frontend/src/hooks/useCorrectionPacks.ts diff --git a/core/coaching/__init__.py b/core/coaching/__init__.py new file mode 100644 index 000000000..561980022 --- /dev/null +++ b/core/coaching/__init__.py @@ -0,0 +1,40 @@ +""" +COACHING Mode Module + +Provides functionality for COACHING mode including: +- Session persistence and recovery +- Session state management +- Statistics tracking +- Metrics and monitoring +""" + +from .session_persistence import ( + CoachingSessionPersistence, + CoachingSessionState, + get_coaching_persistence, + SessionStatus, + CoachingDecisionRecord, +) + +from .metrics import ( + CoachingMetricsCollector, + WorkflowLearningMetrics, + GlobalCoachingMetrics, + LearningProgress, + get_metrics_collector, +) + +__all__ = [ + # Session persistence + 'CoachingSessionPersistence', + 'CoachingSessionState', + 'get_coaching_persistence', + 'SessionStatus', + 'CoachingDecisionRecord', + # Metrics + 'CoachingMetricsCollector', + 'WorkflowLearningMetrics', + 'GlobalCoachingMetrics', + 'LearningProgress', + 'get_metrics_collector', +] diff --git a/core/coaching/metrics.py b/core/coaching/metrics.py new file mode 100644 index 000000000..302c8be3d --- /dev/null +++ b/core/coaching/metrics.py @@ -0,0 +1,462 @@ +""" +COACHING Metrics Module + +Provides comprehensive metrics and monitoring for COACHING mode: +- Session statistics aggregation +- Learning progress tracking +- Performance analytics +- Recommendations for mode transitions +""" + +from dataclasses import dataclass, field +from datetime import datetime, timedelta +from typing import Dict, List, Optional, Any, Tuple +from enum import Enum + +from .session_persistence import ( + CoachingSessionPersistence, + CoachingSessionState, + SessionStatus, + get_coaching_persistence +) + + +class LearningProgress(str, Enum): + """Learning progress levels for workflow.""" + NOT_STARTED = "not_started" + OBSERVATION = "observation" # Still collecting data + LEARNING = "learning" # Actively learning from corrections + COACHING = "coaching" # User coaching mode + READY_FOR_AUTO = "ready" # Ready for autonomous mode + AUTONOMOUS = "autonomous" # Running autonomously + + +@dataclass +class WorkflowLearningMetrics: + """Metrics for a single workflow's learning progress.""" + workflow_id: str + total_sessions: int = 0 + completed_sessions: int = 0 + total_steps_coached: int = 0 + total_decisions: int = 0 + accepted: int = 0 + rejected: int = 0 + corrected: int = 0 + manual_executions: int = 0 + skipped: int = 0 + + # Computed metrics + acceptance_rate: float = 0.0 + correction_rate: float = 0.0 + completion_rate: float = 0.0 + + # Time metrics + avg_session_duration_seconds: float = 0.0 + avg_decision_time_seconds: float = 0.0 + + # Learning progress + learning_progress: LearningProgress = LearningProgress.NOT_STARTED + confidence_score: float = 0.0 + ready_for_auto: bool = False + + # Recommendations + recommendations: List[str] = field(default_factory=list) + + def to_dict(self) -> Dict[str, Any]: + return { + 'workflow_id': self.workflow_id, + 'total_sessions': self.total_sessions, + 'completed_sessions': self.completed_sessions, + 'total_steps_coached': self.total_steps_coached, + 'total_decisions': self.total_decisions, + 'accepted': self.accepted, + 'rejected': self.rejected, + 'corrected': self.corrected, + 'manual_executions': self.manual_executions, + 'skipped': self.skipped, + 'acceptance_rate': self.acceptance_rate, + 'correction_rate': self.correction_rate, + 'completion_rate': self.completion_rate, + 'avg_session_duration_seconds': self.avg_session_duration_seconds, + 'avg_decision_time_seconds': self.avg_decision_time_seconds, + 'learning_progress': self.learning_progress.value, + 'confidence_score': self.confidence_score, + 'ready_for_auto': self.ready_for_auto, + 'recommendations': self.recommendations + } + + +@dataclass +class GlobalCoachingMetrics: + """Global metrics across all workflows.""" + total_workflows: int = 0 + total_sessions: int = 0 + active_sessions: int = 0 + completed_sessions: int = 0 + failed_sessions: int = 0 + + total_decisions: int = 0 + total_accepted: int = 0 + total_rejected: int = 0 + total_corrected: int = 0 + + overall_acceptance_rate: float = 0.0 + overall_correction_rate: float = 0.0 + + workflows_ready_for_auto: int = 0 + workflows_in_learning: int = 0 + + # Time-based metrics + sessions_last_24h: int = 0 + decisions_last_24h: int = 0 + + # Top workflows + top_workflows_by_sessions: List[Tuple[str, int]] = field(default_factory=list) + top_workflows_by_corrections: List[Tuple[str, int]] = field(default_factory=list) + + def to_dict(self) -> Dict[str, Any]: + return { + 'total_workflows': self.total_workflows, + 'total_sessions': self.total_sessions, + 'active_sessions': self.active_sessions, + 'completed_sessions': self.completed_sessions, + 'failed_sessions': self.failed_sessions, + 'total_decisions': self.total_decisions, + 'total_accepted': self.total_accepted, + 'total_rejected': self.total_rejected, + 'total_corrected': self.total_corrected, + 'overall_acceptance_rate': self.overall_acceptance_rate, + 'overall_correction_rate': self.overall_correction_rate, + 'workflows_ready_for_auto': self.workflows_ready_for_auto, + 'workflows_in_learning': self.workflows_in_learning, + 'sessions_last_24h': self.sessions_last_24h, + 'decisions_last_24h': self.decisions_last_24h, + 'top_workflows_by_sessions': self.top_workflows_by_sessions, + 'top_workflows_by_corrections': self.top_workflows_by_corrections + } + + +class CoachingMetricsCollector: + """ + Collector and analyzer for COACHING metrics. + + Provides methods to: + - Calculate workflow-specific learning metrics + - Determine readiness for autonomous mode + - Generate recommendations for improvement + - Track global system health + """ + + # Thresholds for auto mode readiness + MIN_SESSIONS_FOR_AUTO = 5 + MIN_ACCEPTANCE_RATE_FOR_AUTO = 0.85 + MAX_CORRECTION_RATE_FOR_AUTO = 0.10 + MIN_CONFIDENCE_FOR_AUTO = 0.80 + + def __init__(self, persistence: Optional[CoachingSessionPersistence] = None): + """ + Initialize metrics collector. + + Args: + persistence: Session persistence instance + """ + self.persistence = persistence or get_coaching_persistence() + + def get_workflow_metrics(self, workflow_id: str) -> WorkflowLearningMetrics: + """ + Calculate comprehensive metrics for a workflow. + + Args: + workflow_id: Workflow ID + + Returns: + WorkflowLearningMetrics with all computed values + """ + # Get all sessions for this workflow + sessions = self.persistence.list_sessions(workflow_id=workflow_id, limit=1000) + + metrics = WorkflowLearningMetrics(workflow_id=workflow_id) + metrics.total_sessions = len(sessions) + + if not sessions: + metrics.learning_progress = LearningProgress.NOT_STARTED + metrics.recommendations = ["Demarrez une premiere session COACHING"] + return metrics + + # Load full sessions for detailed analysis + full_sessions: List[CoachingSessionState] = [] + for session_info in sessions: + session = self.persistence.load_session(session_info['session_id']) + if session: + full_sessions.append(session) + + # Calculate basic stats + total_duration = 0.0 + for session in full_sessions: + if session.status == SessionStatus.COMPLETED: + metrics.completed_sessions += 1 + + # Aggregate decision stats + metrics.total_steps_coached += len(session.decisions) + metrics.total_decisions += session.stats.get('suggestions_made', 0) + metrics.accepted += session.stats.get('accepted', 0) + metrics.rejected += session.stats.get('rejected', 0) + metrics.corrected += session.stats.get('corrected', 0) + metrics.manual_executions += session.stats.get('manual_executions', 0) + metrics.skipped += session.stats.get('skipped', 0) + + # Calculate duration + if session.started_at and session.completed_at: + try: + start = datetime.fromisoformat(session.started_at) + end = datetime.fromisoformat(session.completed_at) + total_duration += (end - start).total_seconds() + except: + pass + + # Calculate rates + total_decisions = metrics.accepted + metrics.rejected + metrics.corrected + if total_decisions > 0: + metrics.acceptance_rate = metrics.accepted / total_decisions + metrics.correction_rate = metrics.corrected / total_decisions + + if metrics.total_sessions > 0: + metrics.completion_rate = metrics.completed_sessions / metrics.total_sessions + if metrics.completed_sessions > 0: + metrics.avg_session_duration_seconds = total_duration / metrics.completed_sessions + + if metrics.total_decisions > 0 and total_duration > 0: + metrics.avg_decision_time_seconds = total_duration / metrics.total_decisions + + # Determine learning progress + metrics.learning_progress = self._determine_learning_progress(metrics) + + # Calculate confidence score + metrics.confidence_score = self._calculate_confidence_score(metrics) + + # Check if ready for auto + metrics.ready_for_auto = self._check_ready_for_auto(metrics) + + # Generate recommendations + metrics.recommendations = self._generate_recommendations(metrics) + + return metrics + + def get_global_metrics(self) -> GlobalCoachingMetrics: + """ + Calculate global metrics across all workflows. + + Returns: + GlobalCoachingMetrics with aggregated data + """ + metrics = GlobalCoachingMetrics() + + # Get all sessions + all_sessions = self.persistence.list_sessions(limit=10000) + metrics.total_sessions = len(all_sessions) + + # Track unique workflows + workflow_stats: Dict[str, Dict] = {} + now = datetime.now() + last_24h = now - timedelta(hours=24) + + for session_info in all_sessions: + workflow_id = session_info.get('workflow_id', 'unknown') + status = session_info.get('status', 'unknown') + + # Initialize workflow stats + if workflow_id not in workflow_stats: + workflow_stats[workflow_id] = { + 'sessions': 0, + 'corrections': 0 + } + workflow_stats[workflow_id]['sessions'] += 1 + + # Count by status + if status == 'active': + metrics.active_sessions += 1 + elif status == 'completed': + metrics.completed_sessions += 1 + elif status == 'failed': + metrics.failed_sessions += 1 + + # Check last 24h + try: + updated_at = datetime.fromisoformat(session_info.get('updated_at', '')) + if updated_at > last_24h: + metrics.sessions_last_24h += 1 + except: + pass + + # Load full session for decision stats + session = self.persistence.load_session(session_info['session_id']) + if session: + metrics.total_decisions += session.stats.get('suggestions_made', 0) + metrics.total_accepted += session.stats.get('accepted', 0) + metrics.total_rejected += session.stats.get('rejected', 0) + metrics.total_corrected += session.stats.get('corrected', 0) + + workflow_stats[workflow_id]['corrections'] += session.stats.get('corrected', 0) + + # Decisions in last 24h + for decision in session.decisions: + try: + decision_time = datetime.fromisoformat(decision.timestamp) + if decision_time > last_24h: + metrics.decisions_last_24h += 1 + except: + pass + + metrics.total_workflows = len(workflow_stats) + + # Calculate overall rates + total_decided = metrics.total_accepted + metrics.total_rejected + metrics.total_corrected + if total_decided > 0: + metrics.overall_acceptance_rate = metrics.total_accepted / total_decided + metrics.overall_correction_rate = metrics.total_corrected / total_decided + + # Count workflows by learning state + for workflow_id in workflow_stats: + wf_metrics = self.get_workflow_metrics(workflow_id) + if wf_metrics.ready_for_auto: + metrics.workflows_ready_for_auto += 1 + elif wf_metrics.learning_progress in [LearningProgress.LEARNING, LearningProgress.COACHING]: + metrics.workflows_in_learning += 1 + + # Top workflows + sorted_by_sessions = sorted( + workflow_stats.items(), + key=lambda x: x[1]['sessions'], + reverse=True + )[:5] + metrics.top_workflows_by_sessions = [ + (wf_id, stats['sessions']) for wf_id, stats in sorted_by_sessions + ] + + sorted_by_corrections = sorted( + workflow_stats.items(), + key=lambda x: x[1]['corrections'], + reverse=True + )[:5] + metrics.top_workflows_by_corrections = [ + (wf_id, stats['corrections']) for wf_id, stats in sorted_by_corrections + ] + + return metrics + + def _determine_learning_progress(self, metrics: WorkflowLearningMetrics) -> LearningProgress: + """Determine the learning progress level.""" + if metrics.total_sessions == 0: + return LearningProgress.NOT_STARTED + + if metrics.total_sessions < 3: + return LearningProgress.OBSERVATION + + if metrics.acceptance_rate < 0.5: + return LearningProgress.LEARNING + + if metrics.acceptance_rate >= self.MIN_ACCEPTANCE_RATE_FOR_AUTO and \ + metrics.correction_rate <= self.MAX_CORRECTION_RATE_FOR_AUTO and \ + metrics.total_sessions >= self.MIN_SESSIONS_FOR_AUTO: + return LearningProgress.READY_FOR_AUTO + + return LearningProgress.COACHING + + def _calculate_confidence_score(self, metrics: WorkflowLearningMetrics) -> float: + """Calculate overall confidence score (0-1).""" + if metrics.total_decisions == 0: + return 0.0 + + # Weighted factors + acceptance_weight = 0.4 + correction_weight = 0.3 + completion_weight = 0.2 + volume_weight = 0.1 + + # Acceptance component (higher is better) + acceptance_score = metrics.acceptance_rate + + # Correction component (lower is better) + correction_score = max(0, 1 - metrics.correction_rate * 2) + + # Completion component + completion_score = metrics.completion_rate + + # Volume component (normalized, caps at 10 sessions) + volume_score = min(1, metrics.total_sessions / 10) + + confidence = ( + acceptance_weight * acceptance_score + + correction_weight * correction_score + + completion_weight * completion_score + + volume_weight * volume_score + ) + + return round(confidence, 3) + + def _check_ready_for_auto(self, metrics: WorkflowLearningMetrics) -> bool: + """Check if workflow is ready for autonomous mode.""" + return ( + metrics.total_sessions >= self.MIN_SESSIONS_FOR_AUTO and + metrics.acceptance_rate >= self.MIN_ACCEPTANCE_RATE_FOR_AUTO and + metrics.correction_rate <= self.MAX_CORRECTION_RATE_FOR_AUTO and + metrics.confidence_score >= self.MIN_CONFIDENCE_FOR_AUTO + ) + + def _generate_recommendations(self, metrics: WorkflowLearningMetrics) -> List[str]: + """Generate actionable recommendations.""" + recommendations = [] + + if metrics.total_sessions == 0: + recommendations.append("Demarrez votre premiere session COACHING pour commencer l'apprentissage") + return recommendations + + if metrics.total_sessions < self.MIN_SESSIONS_FOR_AUTO: + remaining = self.MIN_SESSIONS_FOR_AUTO - metrics.total_sessions + recommendations.append(f"Completez {remaining} session(s) supplementaire(s) pour atteindre le minimum requis") + + if metrics.acceptance_rate < self.MIN_ACCEPTANCE_RATE_FOR_AUTO: + current_pct = round(metrics.acceptance_rate * 100, 1) + target_pct = round(self.MIN_ACCEPTANCE_RATE_FOR_AUTO * 100, 1) + recommendations.append( + f"Ameliorez le taux d'acceptation de {current_pct}% a {target_pct}% " + "en ajustant les selecteurs d'elements" + ) + + if metrics.correction_rate > self.MAX_CORRECTION_RATE_FOR_AUTO: + recommendations.append( + "Le taux de correction est eleve. Verifiez les elements visuels " + "qui necessitent souvent des corrections" + ) + + if metrics.rejected > metrics.total_sessions * 2: + recommendations.append( + "Beaucoup d'actions rejetees. Revisez le workflow pour supprimer " + "les etapes incorrectes" + ) + + if metrics.manual_executions > metrics.total_decisions * 0.1: + recommendations.append( + "Plusieurs executions manuelles detectees. Considerez automatiser " + "ces actions frequentes" + ) + + if metrics.ready_for_auto: + recommendations.append( + "Ce workflow est pret pour le mode autonome ! " + "Vous pouvez le passer en mode AUTO" + ) + + return recommendations + + +# Singleton instance +_metrics_collector: Optional[CoachingMetricsCollector] = None + + +def get_metrics_collector(persistence: Optional[CoachingSessionPersistence] = None) -> CoachingMetricsCollector: + """Get or create the global metrics collector.""" + global _metrics_collector + if _metrics_collector is None: + _metrics_collector = CoachingMetricsCollector(persistence) + return _metrics_collector diff --git a/core/coaching/session_persistence.py b/core/coaching/session_persistence.py new file mode 100644 index 000000000..7023f3f50 --- /dev/null +++ b/core/coaching/session_persistence.py @@ -0,0 +1,553 @@ +""" +COACHING Session Persistence Module + +Provides persistence layer for COACHING sessions to enable: +- Save session state for recovery after interruption +- Resume sessions from last known state +- Track session history and statistics +""" + +import json +import os +import shutil +from dataclasses import dataclass, field, asdict +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import Dict, List, Optional, Any +import threading +import uuid + + +class SessionStatus(str, Enum): + """Status of a COACHING session.""" + ACTIVE = "active" + PAUSED = "paused" + COMPLETED = "completed" + FAILED = "failed" + ABANDONED = "abandoned" + + +@dataclass +class CoachingDecisionRecord: + """Record of a single coaching decision.""" + step_index: int + node_id: str + action_type: str + decision: str # accept, reject, correct, manual, skip + correction: Optional[Dict[str, Any]] = None + feedback: Optional[str] = None + timestamp: str = field(default_factory=lambda: datetime.now().isoformat()) + execution_success: Optional[bool] = None + + def to_dict(self) -> Dict[str, Any]: + return asdict(self) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> 'CoachingDecisionRecord': + return cls(**data) + + +@dataclass +class CoachingSessionState: + """ + Complete state of a COACHING session. + + This state can be persisted and recovered to resume an interrupted session. + """ + session_id: str + workflow_id: str + execution_id: str + status: SessionStatus = SessionStatus.ACTIVE + current_step_index: int = 0 + total_steps: int = 0 + decisions: List[CoachingDecisionRecord] = field(default_factory=list) + stats: Dict[str, int] = field(default_factory=lambda: { + 'suggestions_made': 0, + 'accepted': 0, + 'rejected': 0, + 'corrected': 0, + 'manual_executions': 0, + 'skipped': 0 + }) + variables: Dict[str, Any] = field(default_factory=dict) + started_at: str = field(default_factory=lambda: datetime.now().isoformat()) + updated_at: str = field(default_factory=lambda: datetime.now().isoformat()) + completed_at: Optional[str] = None + error_message: Optional[str] = None + metadata: Dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + data = { + 'session_id': self.session_id, + 'workflow_id': self.workflow_id, + 'execution_id': self.execution_id, + 'status': self.status.value if isinstance(self.status, SessionStatus) else self.status, + 'current_step_index': self.current_step_index, + 'total_steps': self.total_steps, + 'decisions': [d.to_dict() for d in self.decisions], + 'stats': self.stats, + 'variables': self.variables, + 'started_at': self.started_at, + 'updated_at': self.updated_at, + 'completed_at': self.completed_at, + 'error_message': self.error_message, + 'metadata': self.metadata + } + return data + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> 'CoachingSessionState': + decisions = [ + CoachingDecisionRecord.from_dict(d) + for d in data.get('decisions', []) + ] + status = data.get('status', 'active') + if isinstance(status, str): + status = SessionStatus(status) + + return cls( + session_id=data['session_id'], + workflow_id=data['workflow_id'], + execution_id=data['execution_id'], + status=status, + current_step_index=data.get('current_step_index', 0), + total_steps=data.get('total_steps', 0), + decisions=decisions, + stats=data.get('stats', {}), + variables=data.get('variables', {}), + started_at=data.get('started_at', datetime.now().isoformat()), + updated_at=data.get('updated_at', datetime.now().isoformat()), + completed_at=data.get('completed_at'), + error_message=data.get('error_message'), + metadata=data.get('metadata', {}) + ) + + def update_timestamp(self) -> None: + """Update the updated_at timestamp.""" + self.updated_at = datetime.now().isoformat() + + def add_decision(self, decision: CoachingDecisionRecord) -> None: + """Add a decision and update stats.""" + self.decisions.append(decision) + self.stats['suggestions_made'] += 1 + + if decision.decision == 'accept': + self.stats['accepted'] += 1 + elif decision.decision == 'reject': + self.stats['rejected'] += 1 + elif decision.decision == 'correct': + self.stats['corrected'] += 1 + elif decision.decision == 'manual': + self.stats['manual_executions'] += 1 + elif decision.decision == 'skip': + self.stats['skipped'] += 1 + + self.current_step_index += 1 + self.update_timestamp() + + def get_acceptance_rate(self) -> float: + """Calculate acceptance rate.""" + total = self.stats['accepted'] + self.stats['rejected'] + self.stats['corrected'] + if total == 0: + return 0.0 + return self.stats['accepted'] / total + + def get_correction_rate(self) -> float: + """Calculate correction rate.""" + total = self.stats['accepted'] + self.stats['rejected'] + self.stats['corrected'] + if total == 0: + return 0.0 + return self.stats['corrected'] / total + + def can_resume(self) -> bool: + """Check if session can be resumed.""" + return self.status in [SessionStatus.ACTIVE, SessionStatus.PAUSED] + + +class CoachingSessionPersistence: + """ + Persistence layer for COACHING sessions. + + Handles saving, loading, and managing COACHING session states. + """ + + def __init__(self, storage_path: Optional[Path] = None): + """ + Initialize persistence layer. + + Args: + storage_path: Path to store session data. Defaults to data/coaching_sessions + """ + if storage_path is None: + storage_path = Path(__file__).parent.parent.parent / 'data' / 'coaching_sessions' + self.storage_path = Path(storage_path) + self.storage_path.mkdir(parents=True, exist_ok=True) + + # Index file for quick lookup + self._index_file = self.storage_path / 'sessions_index.json' + self._index: Dict[str, Dict[str, Any]] = {} + self._lock = threading.Lock() + + self._load_index() + + def _load_index(self) -> None: + """Load the sessions index.""" + if self._index_file.exists(): + try: + with open(self._index_file, 'r') as f: + self._index = json.load(f) + except Exception as e: + print(f"Warning: Could not load sessions index: {e}") + self._index = {} + + def _save_index(self) -> None: + """Save the sessions index.""" + try: + temp_file = self._index_file.with_suffix('.tmp') + with open(temp_file, 'w') as f: + json.dump(self._index, f, indent=2) + shutil.move(str(temp_file), str(self._index_file)) + except Exception as e: + print(f"Warning: Could not save sessions index: {e}") + + def _session_file(self, session_id: str) -> Path: + """Get the path for a session file.""" + return self.storage_path / f"{session_id}.json" + + def create_session( + self, + workflow_id: str, + execution_id: str, + total_steps: int = 0, + variables: Optional[Dict[str, Any]] = None, + metadata: Optional[Dict[str, Any]] = None + ) -> CoachingSessionState: + """ + Create a new COACHING session. + + Args: + workflow_id: ID of the workflow being coached + execution_id: ID of the execution + total_steps: Total number of steps in the workflow + variables: Initial variables + metadata: Additional metadata + + Returns: + New session state + """ + session_id = f"coaching_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4().hex[:8]}" + + session = CoachingSessionState( + session_id=session_id, + workflow_id=workflow_id, + execution_id=execution_id, + total_steps=total_steps, + variables=variables or {}, + metadata=metadata or {} + ) + + self.save_session(session) + return session + + def save_session(self, session: CoachingSessionState) -> None: + """ + Save a session state to disk. + + Args: + session: Session state to save + """ + with self._lock: + session.update_timestamp() + + # Save session file + session_file = self._session_file(session.session_id) + try: + temp_file = session_file.with_suffix('.tmp') + with open(temp_file, 'w') as f: + json.dump(session.to_dict(), f, indent=2) + shutil.move(str(temp_file), str(session_file)) + except Exception as e: + raise RuntimeError(f"Failed to save session {session.session_id}: {e}") + + # Update index + self._index[session.session_id] = { + 'workflow_id': session.workflow_id, + 'execution_id': session.execution_id, + 'status': session.status.value if isinstance(session.status, SessionStatus) else session.status, + 'started_at': session.started_at, + 'updated_at': session.updated_at, + 'current_step': session.current_step_index, + 'total_steps': session.total_steps + } + self._save_index() + + def load_session(self, session_id: str) -> Optional[CoachingSessionState]: + """ + Load a session state from disk. + + Args: + session_id: ID of the session to load + + Returns: + Session state or None if not found + """ + session_file = self._session_file(session_id) + if not session_file.exists(): + return None + + try: + with open(session_file, 'r') as f: + data = json.load(f) + return CoachingSessionState.from_dict(data) + except Exception as e: + print(f"Warning: Could not load session {session_id}: {e}") + return None + + def delete_session(self, session_id: str) -> bool: + """ + Delete a session. + + Args: + session_id: ID of the session to delete + + Returns: + True if deleted, False otherwise + """ + with self._lock: + session_file = self._session_file(session_id) + if session_file.exists(): + session_file.unlink() + + if session_id in self._index: + del self._index[session_id] + self._save_index() + return True + + return False + + def list_sessions( + self, + workflow_id: Optional[str] = None, + status: Optional[SessionStatus] = None, + limit: int = 100 + ) -> List[Dict[str, Any]]: + """ + List sessions with optional filters. + + Args: + workflow_id: Filter by workflow ID + status: Filter by status + limit: Maximum number of sessions to return + + Returns: + List of session summaries + """ + sessions = [] + + for session_id, info in self._index.items(): + # Apply filters + if workflow_id and info.get('workflow_id') != workflow_id: + continue + if status: + session_status = info.get('status', 'active') + if session_status != status.value: + continue + + sessions.append({ + 'session_id': session_id, + **info + }) + + # Sort by updated_at descending + sessions.sort(key=lambda x: x.get('updated_at', ''), reverse=True) + + return sessions[:limit] + + def get_resumable_sessions(self, workflow_id: str) -> List[CoachingSessionState]: + """ + Get all resumable sessions for a workflow. + + Args: + workflow_id: Workflow ID to filter by + + Returns: + List of resumable session states + """ + resumable = [] + + for session_info in self.list_sessions(workflow_id=workflow_id): + status = session_info.get('status', 'active') + if status in ['active', 'paused']: + session = self.load_session(session_info['session_id']) + if session and session.can_resume(): + resumable.append(session) + + return resumable + + def pause_session(self, session_id: str) -> bool: + """ + Pause an active session. + + Args: + session_id: Session to pause + + Returns: + True if paused, False otherwise + """ + session = self.load_session(session_id) + if session and session.status == SessionStatus.ACTIVE: + session.status = SessionStatus.PAUSED + self.save_session(session) + return True + return False + + def resume_session(self, session_id: str) -> Optional[CoachingSessionState]: + """ + Resume a paused session. + + Args: + session_id: Session to resume + + Returns: + Resumed session or None if cannot resume + """ + session = self.load_session(session_id) + if session and session.can_resume(): + session.status = SessionStatus.ACTIVE + self.save_session(session) + return session + return None + + def complete_session( + self, + session_id: str, + success: bool = True, + error_message: Optional[str] = None + ) -> Optional[CoachingSessionState]: + """ + Mark a session as completed. + + Args: + session_id: Session to complete + success: Whether the session completed successfully + error_message: Error message if failed + + Returns: + Completed session or None + """ + session = self.load_session(session_id) + if session: + session.status = SessionStatus.COMPLETED if success else SessionStatus.FAILED + session.completed_at = datetime.now().isoformat() + session.error_message = error_message + self.save_session(session) + return session + return None + + def abandon_session(self, session_id: str) -> bool: + """ + Mark a session as abandoned. + + Args: + session_id: Session to abandon + + Returns: + True if abandoned + """ + session = self.load_session(session_id) + if session: + session.status = SessionStatus.ABANDONED + session.completed_at = datetime.now().isoformat() + self.save_session(session) + return True + return False + + def cleanup_old_sessions(self, max_age_days: int = 30) -> int: + """ + Remove sessions older than max_age_days. + + Args: + max_age_days: Maximum age in days + + Returns: + Number of sessions removed + """ + cutoff = datetime.now().timestamp() - (max_age_days * 24 * 3600) + removed = 0 + + with self._lock: + to_remove = [] + for session_id, info in self._index.items(): + try: + updated_at = datetime.fromisoformat(info.get('updated_at', '')).timestamp() + if updated_at < cutoff: + to_remove.append(session_id) + except: + pass + + for session_id in to_remove: + session_file = self._session_file(session_id) + if session_file.exists(): + session_file.unlink() + del self._index[session_id] + removed += 1 + + if removed > 0: + self._save_index() + + return removed + + def get_statistics(self) -> Dict[str, Any]: + """ + Get overall statistics about COACHING sessions. + + Returns: + Statistics dictionary + """ + total = len(self._index) + by_status = {} + total_decisions = 0 + total_accepted = 0 + total_corrected = 0 + + for session_id, info in self._index.items(): + status = info.get('status', 'unknown') + by_status[status] = by_status.get(status, 0) + 1 + + # Load full session for detailed stats + session = self.load_session(session_id) + if session: + total_decisions += session.stats.get('suggestions_made', 0) + total_accepted += session.stats.get('accepted', 0) + total_corrected += session.stats.get('corrected', 0) + + return { + 'total_sessions': total, + 'by_status': by_status, + 'total_decisions': total_decisions, + 'total_accepted': total_accepted, + 'total_corrected': total_corrected, + 'overall_acceptance_rate': total_accepted / total_decisions if total_decisions > 0 else 0, + 'overall_correction_rate': total_corrected / total_decisions if total_decisions > 0 else 0 + } + + +# Global instance +_global_persistence: Optional[CoachingSessionPersistence] = None + + +def get_coaching_persistence(storage_path: Optional[Path] = None) -> CoachingSessionPersistence: + """ + Get or create the global coaching session persistence instance. + + Args: + storage_path: Optional custom storage path + + Returns: + CoachingSessionPersistence instance + """ + global _global_persistence + if _global_persistence is None: + _global_persistence = CoachingSessionPersistence(storage_path) + return _global_persistence diff --git a/tests/test_coaching_e2e.py b/tests/test_coaching_e2e.py new file mode 100644 index 000000000..538593324 --- /dev/null +++ b/tests/test_coaching_e2e.py @@ -0,0 +1,486 @@ +""" +End-to-End Tests for COACHING Mode + +Tests the complete OBSERVATION -> COACHING -> AUTO workflow: +1. Start in OBSERVATION mode (record user actions) +2. Transition to COACHING mode (suggest actions, get user feedback) +3. Accumulate corrections in Correction Packs +4. Track metrics and determine readiness for AUTO mode +5. Transition to AUTO mode when confidence threshold is met + +This test simulates the complete learning journey of a workflow. +""" + +import pytest +import tempfile +import shutil +import time +from pathlib import Path +from datetime import datetime +from unittest.mock import MagicMock, patch + + +@pytest.fixture +def temp_storage(): + """Create temporary storage directories.""" + temp_dir = tempfile.mkdtemp() + yield Path(temp_dir) + shutil.rmtree(temp_dir, ignore_errors=True) + + +@pytest.fixture +def coaching_persistence(temp_storage): + """Create coaching persistence with temp storage.""" + from core.coaching import CoachingSessionPersistence + return CoachingSessionPersistence(temp_storage / 'coaching_sessions') + + +@pytest.fixture +def correction_service(temp_storage): + """Create correction pack service with temp storage.""" + from core.corrections import CorrectionPackService + return CorrectionPackService(storage_path=temp_storage / 'correction_packs') + + +@pytest.fixture +def metrics_collector(coaching_persistence): + """Create metrics collector.""" + from core.coaching import CoachingMetricsCollector + return CoachingMetricsCollector(coaching_persistence) + + +class TestCoachingE2E: + """End-to-end tests for the complete COACHING workflow.""" + + def test_complete_learning_journey( + self, + coaching_persistence, + correction_service, + metrics_collector + ): + """ + Test the complete learning journey from OBSERVATION to AUTO. + + Scenario: + 1. Create workflow and start first COACHING session + 2. Make decisions (mix of accept, correct, reject) + 3. Corrections are captured in Correction Packs + 4. Run multiple sessions to build confidence + 5. Check metrics and readiness for AUTO + """ + workflow_id = "wf_e2e_test_001" + + # ===================================================================== + # Phase 1: First COACHING session - Learning phase + # ===================================================================== + print("\n=== Phase 1: First COACHING Session ===") + + session1 = coaching_persistence.create_session( + workflow_id=workflow_id, + execution_id="exec_001", + total_steps=5, + metadata={'phase': 'learning'} + ) + + # Simulate decisions with some corrections + from core.coaching.session_persistence import CoachingDecisionRecord + + decisions_p1 = [ + ('accept', None), + ('correct', {'target': {'id': 'new_btn'}}), + ('accept', None), + ('reject', None), + ('accept', None), + ] + + for i, (decision, correction) in enumerate(decisions_p1): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision=decision, + correction=correction, + feedback=f"Decision {i+1}" + ) + session1.add_decision(record) + + coaching_persistence.complete_session(session1.session_id, success=True) + + # Verify session stats + session1_reloaded = coaching_persistence.load_session(session1.session_id) + assert session1_reloaded.stats['accepted'] == 3 + assert session1_reloaded.stats['corrected'] == 1 + assert session1_reloaded.stats['rejected'] == 1 + + print(f"Session 1 completed: {session1_reloaded.stats}") + + # ===================================================================== + # Phase 2: Multiple sessions to improve acceptance rate + # ===================================================================== + print("\n=== Phase 2: Multiple Training Sessions ===") + + # Session 2: Better acceptance after learning + session2 = coaching_persistence.create_session( + workflow_id=workflow_id, + execution_id="exec_002", + total_steps=5 + ) + + # Most actions accepted now (corrections are working) + decisions_p2 = [ + ('accept', None), + ('accept', None), + ('accept', None), + ('accept', None), + ('correct', {'target': {'text': 'Submit'}}), + ] + + for i, (decision, correction) in enumerate(decisions_p2): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision=decision, + correction=correction + ) + session2.add_decision(record) + + coaching_persistence.complete_session(session2.session_id, success=True) + print(f"Session 2 completed: {session2.stats}") + + # Sessions 3-5: High acceptance rate + for sess_num in range(3, 6): + session = coaching_persistence.create_session( + workflow_id=workflow_id, + execution_id=f"exec_{sess_num:03d}", + total_steps=5 + ) + + # All accepted + for i in range(5): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision='accept' + ) + session.add_decision(record) + + coaching_persistence.complete_session(session.session_id, success=True) + print(f"Session {sess_num} completed: all accepted") + + # ===================================================================== + # Phase 3: Check Metrics and Learning Progress + # ===================================================================== + print("\n=== Phase 3: Checking Metrics ===") + + metrics = metrics_collector.get_workflow_metrics(workflow_id) + + print(f"Total sessions: {metrics.total_sessions}") + print(f"Total decisions: {metrics.total_decisions}") + print(f"Acceptance rate: {metrics.acceptance_rate:.2%}") + print(f"Correction rate: {metrics.correction_rate:.2%}") + print(f"Confidence score: {metrics.confidence_score:.2f}") + print(f"Learning progress: {metrics.learning_progress.value}") + print(f"Ready for AUTO: {metrics.ready_for_auto}") + print(f"Recommendations: {metrics.recommendations}") + + # Assertions + assert metrics.total_sessions == 5 + assert metrics.total_decisions == 25 + assert metrics.acceptance_rate > 0.8 # Should be high after training + assert metrics.correction_rate < 0.15 # Should be low + + # ===================================================================== + # Phase 4: Verify Readiness for AUTO + # ===================================================================== + print("\n=== Phase 4: AUTO Mode Readiness ===") + + # The workflow should be ready for AUTO after successful training + assert metrics.ready_for_auto, "Workflow should be ready for AUTO mode" + assert metrics.learning_progress.value in ['ready', 'autonomous'] + + print("SUCCESS: Workflow is ready for autonomous execution!") + + def test_session_persistence_and_recovery(self, coaching_persistence): + """ + Test that COACHING sessions can be paused and resumed. + """ + print("\n=== Testing Session Persistence ===") + + workflow_id = "wf_persistence_test" + + # Create and partially complete a session + session = coaching_persistence.create_session( + workflow_id=workflow_id, + execution_id="exec_persist", + total_steps=10 + ) + + from core.coaching.session_persistence import CoachingDecisionRecord + + # Add 3 decisions + for i in range(3): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision='accept' + ) + session.add_decision(record) + + coaching_persistence.save_session(session) + + # Pause the session + coaching_persistence.pause_session(session.session_id) + + # Verify paused + loaded = coaching_persistence.load_session(session.session_id) + assert loaded.status.value == 'paused' + assert len(loaded.decisions) == 3 + assert loaded.current_step_index == 3 + + # Resume the session + resumed = coaching_persistence.resume_session(session.session_id) + assert resumed.status.value == 'active' + assert resumed.can_resume() is True + + # Continue adding decisions + for i in range(3, 6): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision='accept' + ) + resumed.add_decision(record) + + coaching_persistence.save_session(resumed) + + # Verify continuation + final = coaching_persistence.load_session(session.session_id) + assert len(final.decisions) == 6 + assert final.current_step_index == 6 + + print("SUCCESS: Session persistence and recovery works correctly!") + + def test_correction_integration_with_coaching( + self, + coaching_persistence, + correction_service + ): + """ + Test that COACHING corrections integrate with Correction Packs. + """ + print("\n=== Testing Correction Integration ===") + + from core.corrections import CorrectionPackIntegration + + # Create integration + integration = CorrectionPackIntegration( + service=correction_service, + auto_create_pack=True + ) + + workflow_id = "wf_correction_test" + + # Create COACHING session + session = coaching_persistence.create_session( + workflow_id=workflow_id, + execution_id="exec_correction", + total_steps=5 + ) + + from core.coaching.session_persistence import CoachingDecisionRecord + + # Simulate corrections + corrections_made = [ + { + 'action_type': 'click', + 'element_type': 'button', + 'failure_reason': 'element_not_found', + 'correction_type': 'target_change', + 'original_target': {'text': 'OK'}, + 'corrected_target': {'text': 'Valider'} + }, + { + 'action_type': 'type', + 'element_type': 'input', + 'failure_reason': 'wrong_field', + 'correction_type': 'target_change', + 'original_target': {'id': 'email'}, + 'corrected_target': {'name': 'user_email'} + } + ] + + # Add decisions with corrections + for i, correction_data in enumerate(corrections_made): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type=correction_data['action_type'], + decision='correct', + correction=correction_data + ) + session.add_decision(record) + + # Capture correction in Correction Pack + integration.capture_correction( + correction_data=correction_data, + session_id=session.session_id, + workflow_id=workflow_id + ) + + coaching_persistence.complete_session(session.session_id, success=True) + + # Verify corrections captured in pack + pack = correction_service.get_pack(integration._default_pack_id) + corrections_list = pack.get('corrections') if isinstance(pack, dict) else pack.corrections + assert len(corrections_list) == 2 + + print(f"Captured {len(corrections_list)} corrections in Correction Pack") + print("SUCCESS: Corrections integrated correctly!") + + def test_metrics_threshold_for_auto_mode(self, coaching_persistence, metrics_collector): + """ + Test that metrics correctly determine AUTO mode readiness. + """ + print("\n=== Testing AUTO Mode Threshold ===") + + from core.coaching.session_persistence import CoachingDecisionRecord + + workflow_id = "wf_threshold_test" + + # Test case 1: Below threshold (too few sessions) + session = coaching_persistence.create_session( + workflow_id=workflow_id, + execution_id="exec_001", + total_steps=5 + ) + + for i in range(5): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision='accept' + ) + session.add_decision(record) + + coaching_persistence.complete_session(session.session_id, success=True) + + metrics = metrics_collector.get_workflow_metrics(workflow_id) + assert not metrics.ready_for_auto, "Should not be ready with only 1 session" + + # Test case 2: Meet minimum sessions + for sess_num in range(2, 6): + session = coaching_persistence.create_session( + workflow_id=workflow_id, + execution_id=f"exec_{sess_num:03d}", + total_steps=5 + ) + + for i in range(5): + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision='accept' + ) + session.add_decision(record) + + coaching_persistence.complete_session(session.session_id, success=True) + + metrics = metrics_collector.get_workflow_metrics(workflow_id) + print(f"After 5 sessions - Acceptance: {metrics.acceptance_rate:.2%}, Ready: {metrics.ready_for_auto}") + assert metrics.ready_for_auto, "Should be ready after 5 sessions with high acceptance" + + print("SUCCESS: Threshold calculation works correctly!") + + def test_global_metrics_aggregation(self, coaching_persistence, metrics_collector): + """ + Test global metrics aggregation across multiple workflows. + """ + print("\n=== Testing Global Metrics ===") + + from core.coaching.session_persistence import CoachingDecisionRecord + + # Create sessions for multiple workflows + workflows = ["wf_global_1", "wf_global_2", "wf_global_3"] + + for wf_id in workflows: + for sess_num in range(3): + session = coaching_persistence.create_session( + workflow_id=wf_id, + execution_id=f"exec_{wf_id}_{sess_num}", + total_steps=3 + ) + + for i in range(3): + decision = 'accept' if i != 1 else 'correct' + record = CoachingDecisionRecord( + step_index=i, + node_id=f"node_{i+1}", + action_type='click', + decision=decision + ) + session.add_decision(record) + + coaching_persistence.complete_session(session.session_id, success=True) + + # Get global metrics + global_metrics = metrics_collector.get_global_metrics() + + print(f"Total workflows: {global_metrics.total_workflows}") + print(f"Total sessions: {global_metrics.total_sessions}") + print(f"Total decisions: {global_metrics.total_decisions}") + print(f"Acceptance rate: {global_metrics.overall_acceptance_rate:.2%}") + + assert global_metrics.total_workflows == 3 + assert global_metrics.total_sessions == 9 # 3 workflows x 3 sessions + assert global_metrics.total_decisions == 27 # 9 sessions x 3 decisions + + print("SUCCESS: Global metrics aggregation works correctly!") + + +class TestCoachingAPIIntegration: + """Tests for COACHING API integration.""" + + def test_api_session_lifecycle(self, coaching_persistence): + """Test session lifecycle through persistence layer (API simulation).""" + print("\n=== Testing API Session Lifecycle ===") + + from core.coaching.session_persistence import CoachingDecisionRecord + + # Create session (simulating POST /api/coaching-sessions) + session = coaching_persistence.create_session( + workflow_id="wf_api_test", + execution_id="exec_api", + total_steps=3 + ) + assert session.session_id is not None + + # Add decision (simulating POST /api/coaching-sessions/{id}/decisions) + record = CoachingDecisionRecord( + step_index=0, + node_id="node_1", + action_type="click", + decision="accept" + ) + session.add_decision(record) + coaching_persistence.save_session(session) + + # Get session (simulating GET /api/coaching-sessions/{id}) + loaded = coaching_persistence.load_session(session.session_id) + assert loaded is not None + assert len(loaded.decisions) == 1 + + # Complete session (simulating POST /api/coaching-sessions/{id}/complete) + completed = coaching_persistence.complete_session(session.session_id, success=True) + assert completed.status.value == 'completed' + + print("SUCCESS: API session lifecycle works correctly!") + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) diff --git a/visual_workflow_builder/backend/api/coaching_sessions.py b/visual_workflow_builder/backend/api/coaching_sessions.py new file mode 100644 index 000000000..a56ea6c8b --- /dev/null +++ b/visual_workflow_builder/backend/api/coaching_sessions.py @@ -0,0 +1,531 @@ +""" +COACHING Sessions API Blueprint + +Provides REST endpoints for managing COACHING session persistence: +- List/create/load sessions +- Pause/resume sessions +- Session statistics +""" + +import sys +from pathlib import Path +from flask import Blueprint, jsonify, request + +# Add core to path +sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent)) + +from core.coaching import ( + CoachingSessionPersistence, + CoachingSessionState, + get_coaching_persistence, +) +from core.coaching.session_persistence import SessionStatus + +coaching_sessions_bp = Blueprint('coaching_sessions', __name__) + + +def get_persistence() -> CoachingSessionPersistence: + """Get the coaching session persistence instance.""" + return get_coaching_persistence() + + +@coaching_sessions_bp.route('/', methods=['GET']) +def list_sessions(): + """ + List COACHING sessions. + + Query params: + workflow_id: Filter by workflow ID + status: Filter by status (active, paused, completed, failed, abandoned) + limit: Maximum number of sessions (default: 100) + + Returns: + sessions: List of session summaries + """ + workflow_id = request.args.get('workflow_id') + status_str = request.args.get('status') + limit = int(request.args.get('limit', 100)) + + status = None + if status_str: + try: + status = SessionStatus(status_str) + except ValueError: + return jsonify({ + 'error': f'Invalid status. Valid values: {[s.value for s in SessionStatus]}' + }), 400 + + persistence = get_persistence() + sessions = persistence.list_sessions( + workflow_id=workflow_id, + status=status, + limit=limit + ) + + return jsonify({'sessions': sessions}) + + +@coaching_sessions_bp.route('/', methods=['POST']) +def create_session(): + """ + Create a new COACHING session. + + Body JSON: + workflow_id: str - ID of the workflow + execution_id: str - ID of the execution + total_steps: int (optional) - Total number of steps + variables: dict (optional) - Initial variables + metadata: dict (optional) - Additional metadata + + Returns: + session: Created session state + """ + data = request.get_json() or {} + workflow_id = data.get('workflow_id') + execution_id = data.get('execution_id') + + if not workflow_id or not execution_id: + return jsonify({'error': 'workflow_id and execution_id are required'}), 400 + + persistence = get_persistence() + session = persistence.create_session( + workflow_id=workflow_id, + execution_id=execution_id, + total_steps=data.get('total_steps', 0), + variables=data.get('variables', {}), + metadata=data.get('metadata', {}) + ) + + return jsonify({ + 'message': 'Session created', + 'session': session.to_dict() + }), 201 + + +@coaching_sessions_bp.route('/', methods=['GET']) +def get_session(session_id: str): + """ + Get a COACHING session by ID. + + Returns: + session: Full session state + """ + persistence = get_persistence() + session = persistence.load_session(session_id) + + if session is None: + return jsonify({'error': f'Session {session_id} not found'}), 404 + + return jsonify({'session': session.to_dict()}) + + +@coaching_sessions_bp.route('/', methods=['PUT']) +def update_session(session_id: str): + """ + Update a COACHING session. + + Body JSON: + current_step_index: int (optional) + variables: dict (optional) + metadata: dict (optional) + + Returns: + session: Updated session state + """ + data = request.get_json() or {} + + persistence = get_persistence() + session = persistence.load_session(session_id) + + if session is None: + return jsonify({'error': f'Session {session_id} not found'}), 404 + + # Update allowed fields + if 'current_step_index' in data: + session.current_step_index = data['current_step_index'] + if 'variables' in data: + session.variables.update(data['variables']) + if 'metadata' in data: + session.metadata.update(data['metadata']) + + persistence.save_session(session) + + return jsonify({ + 'message': 'Session updated', + 'session': session.to_dict() + }) + + +@coaching_sessions_bp.route('/', methods=['DELETE']) +def delete_session(session_id: str): + """ + Delete a COACHING session. + + Returns: + success: bool + """ + persistence = get_persistence() + + if persistence.delete_session(session_id): + return jsonify({ + 'message': 'Session deleted', + 'session_id': session_id + }) + else: + return jsonify({'error': f'Session {session_id} not found'}), 404 + + +@coaching_sessions_bp.route('//decisions', methods=['POST']) +def add_decision(session_id: str): + """ + Add a decision to a COACHING session. + + Body JSON: + step_index: int + node_id: str + action_type: str + decision: str (accept, reject, correct, manual, skip) + correction: dict (optional) + feedback: str (optional) + execution_success: bool (optional) + + Returns: + session: Updated session state + """ + data = request.get_json() or {} + + required = ['step_index', 'node_id', 'action_type', 'decision'] + for field in required: + if field not in data: + return jsonify({'error': f'{field} is required'}), 400 + + valid_decisions = ['accept', 'reject', 'correct', 'manual', 'skip'] + if data['decision'] not in valid_decisions: + return jsonify({ + 'error': f'Invalid decision. Valid values: {valid_decisions}' + }), 400 + + persistence = get_persistence() + session = persistence.load_session(session_id) + + if session is None: + return jsonify({'error': f'Session {session_id} not found'}), 404 + + from core.coaching.session_persistence import CoachingDecisionRecord + + decision = CoachingDecisionRecord( + step_index=data['step_index'], + node_id=data['node_id'], + action_type=data['action_type'], + decision=data['decision'], + correction=data.get('correction'), + feedback=data.get('feedback'), + execution_success=data.get('execution_success') + ) + + session.add_decision(decision) + persistence.save_session(session) + + return jsonify({ + 'message': 'Decision added', + 'session': session.to_dict() + }) + + +@coaching_sessions_bp.route('//pause', methods=['POST']) +def pause_session(session_id: str): + """ + Pause an active COACHING session. + + Returns: + success: bool + """ + persistence = get_persistence() + + if persistence.pause_session(session_id): + session = persistence.load_session(session_id) + return jsonify({ + 'message': 'Session paused', + 'session': session.to_dict() if session else None + }) + else: + return jsonify({ + 'error': 'Cannot pause session (not active or not found)' + }), 400 + + +@coaching_sessions_bp.route('//resume', methods=['POST']) +def resume_session(session_id: str): + """ + Resume a paused COACHING session. + + Returns: + session: Resumed session state + """ + persistence = get_persistence() + session = persistence.resume_session(session_id) + + if session: + return jsonify({ + 'message': 'Session resumed', + 'session': session.to_dict() + }) + else: + return jsonify({ + 'error': 'Cannot resume session (not paused or not found)' + }), 400 + + +@coaching_sessions_bp.route('//complete', methods=['POST']) +def complete_session(session_id: str): + """ + Mark a COACHING session as completed. + + Body JSON: + success: bool (default: True) + error_message: str (optional) + + Returns: + session: Completed session state + """ + data = request.get_json() or {} + success = data.get('success', True) + error_message = data.get('error_message') + + persistence = get_persistence() + session = persistence.complete_session( + session_id, + success=success, + error_message=error_message + ) + + if session: + return jsonify({ + 'message': 'Session completed', + 'session': session.to_dict() + }) + else: + return jsonify({'error': f'Session {session_id} not found'}), 404 + + +@coaching_sessions_bp.route('//abandon', methods=['POST']) +def abandon_session(session_id: str): + """ + Mark a COACHING session as abandoned. + + Returns: + success: bool + """ + persistence = get_persistence() + + if persistence.abandon_session(session_id): + return jsonify({ + 'message': 'Session abandoned', + 'session_id': session_id + }) + else: + return jsonify({'error': f'Session {session_id} not found'}), 404 + + +@coaching_sessions_bp.route('/resumable', methods=['GET']) +def get_resumable_sessions(): + """ + Get all resumable sessions for a workflow. + + Query params: + workflow_id: str (required) - Workflow ID + + Returns: + sessions: List of resumable session states + """ + workflow_id = request.args.get('workflow_id') + + if not workflow_id: + return jsonify({'error': 'workflow_id is required'}), 400 + + persistence = get_persistence() + sessions = persistence.get_resumable_sessions(workflow_id) + + return jsonify({ + 'sessions': [s.to_dict() for s in sessions] + }) + + +@coaching_sessions_bp.route('/statistics', methods=['GET']) +def get_statistics(): + """ + Get overall COACHING session statistics. + + Returns: + statistics: Overall statistics + """ + persistence = get_persistence() + stats = persistence.get_statistics() + + return jsonify({'statistics': stats}) + + +@coaching_sessions_bp.route('/cleanup', methods=['POST']) +def cleanup_sessions(): + """ + Clean up old COACHING sessions. + + Body JSON: + max_age_days: int (default: 30) + + Returns: + removed_count: int - Number of sessions removed + """ + data = request.get_json() or {} + max_age_days = data.get('max_age_days', 30) + + persistence = get_persistence() + removed = persistence.cleanup_old_sessions(max_age_days) + + return jsonify({ + 'message': f'Removed {removed} old sessions', + 'removed_count': removed + }) + + +# ============================================================================= +# Metrics & Monitoring Endpoints +# ============================================================================= + +@coaching_sessions_bp.route('/metrics/global', methods=['GET']) +def get_global_metrics(): + """ + Get global COACHING metrics across all workflows. + + Returns: + metrics: GlobalCoachingMetrics with aggregated data + """ + try: + from core.coaching import get_metrics_collector + collector = get_metrics_collector() + metrics = collector.get_global_metrics() + return jsonify({'metrics': metrics.to_dict()}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@coaching_sessions_bp.route('/metrics/workflow/', methods=['GET']) +def get_workflow_metrics(workflow_id: str): + """ + Get detailed learning metrics for a specific workflow. + + Returns: + metrics: WorkflowLearningMetrics with learning progress and recommendations + """ + try: + from core.coaching import get_metrics_collector + collector = get_metrics_collector() + metrics = collector.get_workflow_metrics(workflow_id) + return jsonify({'metrics': metrics.to_dict()}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@coaching_sessions_bp.route('/metrics/ready-for-auto', methods=['GET']) +def get_workflows_ready_for_auto(): + """ + Get list of workflows ready for autonomous mode. + + Returns: + workflows: List of workflow IDs ready for AUTO mode + """ + try: + from core.coaching import get_metrics_collector + collector = get_metrics_collector() + + # Get global metrics to find workflows + global_metrics = collector.get_global_metrics() + + # Check each workflow + ready_workflows = [] + persistence = get_persistence() + all_sessions = persistence.list_sessions(limit=10000) + + workflow_ids = set(s.get('workflow_id') for s in all_sessions if s.get('workflow_id')) + + for workflow_id in workflow_ids: + metrics = collector.get_workflow_metrics(workflow_id) + if metrics.ready_for_auto: + ready_workflows.append({ + 'workflow_id': workflow_id, + 'confidence_score': metrics.confidence_score, + 'acceptance_rate': metrics.acceptance_rate, + 'total_sessions': metrics.total_sessions + }) + + return jsonify({ + 'workflows_ready': ready_workflows, + 'total_ready': len(ready_workflows) + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@coaching_sessions_bp.route('/metrics/dashboard', methods=['GET']) +def get_metrics_dashboard(): + """ + Get comprehensive dashboard data for monitoring. + + Returns all metrics needed for a monitoring dashboard: + - Global statistics + - Recent activity + - Top workflows + - Recommendations + """ + try: + from core.coaching import get_metrics_collector + collector = get_metrics_collector() + + # Get global metrics + global_metrics = collector.get_global_metrics() + + # Get recent activity (last 7 days sessions) + persistence = get_persistence() + recent_sessions = persistence.list_sessions(limit=50) + + # Build dashboard response + dashboard = { + 'overview': { + 'total_workflows': global_metrics.total_workflows, + 'total_sessions': global_metrics.total_sessions, + 'active_sessions': global_metrics.active_sessions, + 'workflows_ready_for_auto': global_metrics.workflows_ready_for_auto, + 'workflows_in_learning': global_metrics.workflows_in_learning, + }, + 'rates': { + 'acceptance_rate': round(global_metrics.overall_acceptance_rate * 100, 1), + 'correction_rate': round(global_metrics.overall_correction_rate * 100, 1), + }, + 'activity': { + 'sessions_last_24h': global_metrics.sessions_last_24h, + 'decisions_last_24h': global_metrics.decisions_last_24h, + }, + 'decisions': { + 'total': global_metrics.total_decisions, + 'accepted': global_metrics.total_accepted, + 'rejected': global_metrics.total_rejected, + 'corrected': global_metrics.total_corrected, + }, + 'top_workflows': { + 'by_sessions': global_metrics.top_workflows_by_sessions, + 'by_corrections': global_metrics.top_workflows_by_corrections, + }, + 'recent_sessions': [ + { + 'session_id': s.get('session_id'), + 'workflow_id': s.get('workflow_id'), + 'status': s.get('status'), + 'updated_at': s.get('updated_at'), + } + for s in recent_sessions[:10] + ] + } + + return jsonify({'dashboard': dashboard}) + except Exception as e: + return jsonify({'error': str(e)}), 500 diff --git a/visual_workflow_builder/backend/api/executions.py b/visual_workflow_builder/backend/api/executions.py new file mode 100644 index 000000000..c2d68bc97 --- /dev/null +++ b/visual_workflow_builder/backend/api/executions.py @@ -0,0 +1,267 @@ +""" +Executions API Blueprint + +Provides REST endpoints for workflow execution management. + +Exigences: 6.1, 6.2, 6.3, 6.4 +""" + +from flask import Blueprint, jsonify, request +from services.execution_integration import get_executor + +executions_bp = Blueprint('executions', __name__) + + +@executions_bp.route('/', methods=['POST']) +def start_execution(): + """ + Lance l'exécution d'un workflow. + + Body JSON: + workflow_id: str - ID du workflow à exécuter + variables: dict (optionnel) - Variables d'entrée + mode: str (optionnel) - 'normal' ou 'coaching' (défaut: 'normal') + + Returns: + execution_id: str - ID de l'exécution lancée + """ + data = request.get_json() or {} + workflow_id = data.get('workflow_id') + variables = data.get('variables', {}) + mode = data.get('mode', 'normal') + + if not workflow_id: + return jsonify({'error': 'workflow_id requis'}), 400 + + try: + executor = get_executor() + + if mode == 'coaching': + execution_id = executor.execute_workflow_coaching( + workflow_id=workflow_id, + variables=variables + ) + else: + execution_id = executor.execute_workflow( + workflow_id=workflow_id, + variables=variables + ) + + return jsonify({ + 'execution_id': execution_id, + 'workflow_id': workflow_id, + 'mode': mode, + 'status': 'started' + }), 201 + + except ValueError as e: + return jsonify({'error': str(e)}), 404 + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@executions_bp.route('/coaching', methods=['POST']) +def start_coaching_execution(): + """ + Lance l'exécution d'un workflow en mode COACHING. + + En mode COACHING, chaque étape est soumise à l'utilisateur pour + validation/correction avant exécution. + + Body JSON: + workflow_id: str - ID du workflow à exécuter + variables: dict (optionnel) - Variables d'entrée + + Returns: + execution_id: str - ID de l'exécution COACHING lancée + """ + data = request.get_json() or {} + workflow_id = data.get('workflow_id') + variables = data.get('variables', {}) + + if not workflow_id: + return jsonify({'error': 'workflow_id requis'}), 400 + + try: + executor = get_executor() + + execution_id = executor.execute_workflow_coaching( + workflow_id=workflow_id, + variables=variables + ) + + return jsonify({ + 'execution_id': execution_id, + 'workflow_id': workflow_id, + 'mode': 'coaching', + 'status': 'started', + 'message': 'Connectez-vous via WebSocket pour recevoir les suggestions' + }), 201 + + except ValueError as e: + return jsonify({'error': str(e)}), 404 + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@executions_bp.route('/', methods=['GET']) +def list_executions(): + """ + Liste les exécutions. + + Query params: + workflow_id: str (optionnel) - Filtrer par workflow + mode: str (optionnel) - Filtrer par mode ('normal', 'coaching') + + Returns: + executions: list - Liste des exécutions + """ + workflow_id = request.args.get('workflow_id') + mode = request.args.get('mode') + + executor = get_executor() + executions = executor.list_executions(workflow_id) + + # Filtrer par mode si demandé + if mode: + if mode == 'coaching': + executions = [ + e for e in executions + if executor.is_coaching_execution(e['execution_id']) + ] + elif mode == 'normal': + executions = [ + e for e in executions + if not executor.is_coaching_execution(e['execution_id']) + ] + + return jsonify({'executions': executions}) + + +@executions_bp.route('/', methods=['GET']) +def get_execution(execution_id): + """ + Récupère le statut et les détails d'une exécution. + + Returns: + Détails de l'exécution incluant statut, progression, logs + """ + executor = get_executor() + result = executor.get_execution_status(execution_id) + + if result is None: + return jsonify({'error': f'Exécution {execution_id} introuvable'}), 404 + + response = result.to_dict() + response['is_coaching'] = executor.is_coaching_execution(execution_id) + + return jsonify(response) + + +@executions_bp.route('//cancel', methods=['POST']) +def cancel_execution(execution_id): + """ + Annule une exécution en cours. + + Returns: + success: bool - Si l'annulation a réussi + """ + executor = get_executor() + + if executor.cancel_execution(execution_id): + return jsonify({ + 'execution_id': execution_id, + 'status': 'cancelled', + 'message': 'Exécution annulée avec succès' + }) + else: + return jsonify({ + 'error': 'Impossible d\'annuler l\'exécution (déjà terminée ou inexistante)' + }), 400 + + +@executions_bp.route('//coaching/decision', methods=['POST']) +def submit_coaching_decision(execution_id): + """ + Soumet une décision COACHING pour une exécution. + + Alternative REST à WebSocket pour soumettre une décision. + + Body JSON: + decision: str - 'accept' | 'reject' | 'correct' | 'manual' | 'skip' + correction: dict (optionnel) - Correction si decision == 'correct' + feedback: str (optionnel) - Commentaire utilisateur + + Returns: + success: bool + """ + data = request.get_json() or {} + decision = data.get('decision') + + if not decision: + return jsonify({'error': 'decision requis'}), 400 + + valid_decisions = ['accept', 'reject', 'correct', 'manual', 'skip'] + if decision not in valid_decisions: + return jsonify({ + 'error': f'decision invalide. Valeurs acceptées: {valid_decisions}' + }), 400 + + executor = get_executor() + + if not executor.is_coaching_execution(execution_id): + return jsonify({ + 'error': f'{execution_id} n\'est pas une exécution COACHING' + }), 400 + + decision_response = { + 'decision': decision, + 'correction': data.get('correction'), + 'feedback': data.get('feedback'), + 'executed_manually': decision == 'manual' + } + + success = executor.submit_coaching_decision(execution_id, decision_response) + + if success: + return jsonify({ + 'execution_id': execution_id, + 'decision': decision, + 'status': 'accepted' + }) + else: + return jsonify({ + 'error': 'Impossible de soumettre la décision' + }), 400 + + +@executions_bp.route('//coaching/stats', methods=['GET']) +def get_coaching_stats(execution_id): + """ + Récupère les statistiques COACHING d'une exécution. + + Returns: + stats: dict - Statistiques (suggestions, accepted, rejected, etc.) + """ + executor = get_executor() + + if not executor.is_coaching_execution(execution_id): + return jsonify({ + 'error': f'{execution_id} n\'est pas une exécution COACHING' + }), 400 + + stats = executor.get_coaching_stats(execution_id) + + if stats is None: + stats = { + 'suggestions_made': 0, + 'accepted': 0, + 'rejected': 0, + 'corrected': 0, + 'manual_executions': 0 + } + + return jsonify({ + 'execution_id': execution_id, + 'stats': stats + }) diff --git a/visual_workflow_builder/backend/app.py b/visual_workflow_builder/backend/app.py index 3744bbd1a..126c22a77 100644 --- a/visual_workflow_builder/backend/app.py +++ b/visual_workflow_builder/backend/app.py @@ -119,6 +119,13 @@ try: except ImportError as e: print(f"⚠️ Blueprint correction_packs désactivé: {e}") +try: + from api.coaching_sessions import coaching_sessions_bp + app.register_blueprint(coaching_sessions_bp, url_prefix='/api/coaching-sessions') + print("✅ Blueprint coaching_sessions enregistré") +except ImportError as e: + print(f"⚠️ Blueprint coaching_sessions désactivé: {e}") + # Import WebSocket handlers (optional) try: diff --git a/visual_workflow_builder/backend/services/execution_integration.py b/visual_workflow_builder/backend/services/execution_integration.py new file mode 100644 index 000000000..68b1d3fcb --- /dev/null +++ b/visual_workflow_builder/backend/services/execution_integration.py @@ -0,0 +1,1073 @@ +""" +Execution Integration Service - Visual Workflow Builder + +Intégration avec ExecutionLoop pour l'exécution des workflows visuels. + +Exigences: 20.1, 20.2, 20.3 +""" + +import sys +import asyncio +from pathlib import Path +from typing import Dict, List, Optional, Any, Callable +from datetime import datetime +import threading +import time +import uuid + +# Ajouter le chemin racine pour importer les modules core +sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent)) + +from core.models.workflow_graph import Workflow + +# Imports optionnels pour les intégrations +try: + from core.analytics.integration.execution_integration import AnalyticsExecutionIntegration +except ImportError: + AnalyticsExecutionIntegration = None + +try: + from core.healing.execution_integration import SelfHealingIntegration +except ImportError: + SelfHealingIntegration = None + +from models.visual_workflow import VisualWorkflow +from services.converter import convert_visual_to_graph +from services.serialization import WorkflowDatabase +from services.learning_integration import record_workflow_execution + + +class ExecutionStatus: + """États d'exécution possibles""" + PENDING = 'pending' + RUNNING = 'running' + COMPLETED = 'completed' + FAILED = 'failed' + CANCELLED = 'cancelled' + + +class ExecutionResult: + """Résultat d'exécution d'un workflow""" + + def __init__(self, execution_id: str, workflow_id: str): + self.execution_id = execution_id + self.workflow_id = workflow_id + self.status = ExecutionStatus.PENDING + self.start_time: Optional[datetime] = None + self.end_time: Optional[datetime] = None + self.error_message: Optional[str] = None + self.progress: Dict[str, Any] = {} + self.logs: List[Dict[str, Any]] = [] + self.analytics_data: Dict[str, Any] = {} + + def to_dict(self) -> Dict[str, Any]: + """Convertit en dictionnaire pour l'API""" + return { + 'execution_id': self.execution_id, + 'workflow_id': self.workflow_id, + 'status': self.status, + 'start_time': self.start_time.isoformat() if self.start_time else None, + 'end_time': self.end_time.isoformat() if self.end_time else None, + 'duration_ms': self._calculate_duration(), + 'error_message': self.error_message, + 'progress': self.progress, + 'logs': self.logs, + 'analytics_data': self.analytics_data + } + + def _calculate_duration(self) -> Optional[int]: + """Calcule la durée d'exécution en millisecondes""" + if self.start_time and self.end_time: + return int((self.end_time - self.start_time).total_seconds() * 1000) + return None + + +class VisualWorkflowExecutor: + """ + Exécuteur de workflows visuels via ExecutionLoop. + + Exigences: 20.1, 20.2, 20.3 + """ + + def __init__(self, db_root_dir: str = None): + """Initialise l'exécuteur""" + # Chemin par défaut pour la base de données des workflows + if db_root_dir is None: + db_root_dir = str(Path(__file__).parent.parent / 'data' / 'workflows') + self.db = WorkflowDatabase(root_dir=db_root_dir) + self.executions: Dict[str, ExecutionResult] = {} + self.progress_callbacks: Dict[str, List[Callable]] = {} + + # Intégrations (initialisées de manière lazy) + self._analytics_integration = None + self._healing_integration = None + + # Thread pour surveiller les exécutions + self._monitor_thread = None + self._stop_monitoring = False + + # COACHING mode support + self._execution_loops: Dict[str, Any] = {} # execution_id -> ExecutionLoop + self._coaching_mode_executions: Dict[str, bool] = {} # execution_id -> is_coaching + + @property + def analytics_integration(self): + """Lazy initialization de l'intégration Analytics""" + if self._analytics_integration is None and AnalyticsExecutionIntegration is not None: + try: + self._analytics_integration = AnalyticsExecutionIntegration() + except Exception as e: + print(f"Warning: Analytics integration not available: {e}") + return self._analytics_integration + + @property + def healing_integration(self): + """Lazy initialization de l'intégration Self-Healing""" + if self._healing_integration is None and SelfHealingIntegration is not None: + try: + self._healing_integration = SelfHealingIntegration() + except Exception as e: + print(f"Warning: Self-Healing integration not available: {e}") + return self._healing_integration + + def execute_workflow( + self, + workflow_id: str, + variables: Optional[Dict[str, Any]] = None, + progress_callback: Optional[Callable] = None + ) -> str: + """ + Lance l'exécution d'un workflow visuel. + + Exigence: 20.1 + + Args: + workflow_id: ID du workflow à exécuter + variables: Variables d'entrée optionnelles + progress_callback: Callback pour les mises à jour de progression + + Returns: + ID d'exécution + + Raises: + ValueError: Si le workflow n'existe pas + RuntimeError: Si la conversion échoue + """ + # Charger le workflow visuel + visual_workflow = self.db.load(workflow_id) + if visual_workflow is None: + raise ValueError(f"Workflow {workflow_id} introuvable") + + # Générer un ID d'exécution + execution_id = f"exec_{int(time.time() * 1000)}_{workflow_id[:8]}" + + # Créer le résultat d'exécution + result = ExecutionResult(execution_id, workflow_id) + self.executions[execution_id] = result + + # Enregistrer le callback de progression + if progress_callback: + if execution_id not in self.progress_callbacks: + self.progress_callbacks[execution_id] = [] + self.progress_callbacks[execution_id].append(progress_callback) + + # Lancer l'exécution en arrière-plan + thread = threading.Thread( + target=self._execute_workflow_thread, + args=(execution_id, visual_workflow, variables or {}) + ) + thread.daemon = True + thread.start() + + return execution_id + + def _execute_workflow_thread( + self, + execution_id: str, + visual_workflow: VisualWorkflow, + variables: Dict[str, Any] + ) -> None: + """ + Thread d'exécution du workflow. + + Exigence: 20.1 + """ + result = self.executions[execution_id] + + try: + # Marquer comme démarré + result.status = ExecutionStatus.RUNNING + result.start_time = datetime.now() + self._notify_progress(execution_id, 'started', {'status': 'running'}) + + # Convertir en WorkflowGraph + self._log(execution_id, 'info', 'Conversion du workflow visuel...') + workflow_graph = convert_visual_to_graph(visual_workflow) + + # Ajouter les variables d'entrée + if variables: + for name, value in variables.items(): + workflow_graph.metadata[f'input_{name}'] = value + + # Configurer les intégrations + self._setup_integrations(execution_id, workflow_graph) + + # Exécuter via ExecutionLoop + self._log(execution_id, 'info', 'Démarrage de l\'exécution...') + + # Simuler l'exécution (dans une vraie implémentation, on utiliserait ExecutionLoop) + self._simulate_execution(execution_id, workflow_graph) + + # Marquer comme terminé + result.status = ExecutionStatus.COMPLETED + result.end_time = datetime.now() + + # Notifier Analytics de la fin d'exécution + if self.analytics_integration: + self.analytics_integration.on_execution_complete( + execution_id=execution_id, + workflow_id=workflow_graph.workflow_id, + started_at=result.start_time, + completed_at=result.end_time, + duration=result._calculate_duration() / 1000.0, # en secondes + status='success', + steps_completed=len(workflow_graph.nodes), + steps_failed=0 + ) + + # Collecter les métriques Analytics pour l'UI + analytics_data = self._collect_analytics_data(execution_id, workflow_graph.workflow_id) + result.analytics_data = analytics_data + + # Enregistrer le succès dans le système d'apprentissage + record_workflow_execution( + workflow_id=visual_workflow.id, + success=True, + confidence=0.9 # Haute confiance pour exécution réussie + ) + + self._notify_progress(execution_id, 'completed', { + 'status': 'completed', + 'duration_ms': result._calculate_duration(), + 'analytics': analytics_data + }) + + self._log(execution_id, 'info', f'Exécution terminée avec succès en {result._calculate_duration()}ms') + + except Exception as e: + # Marquer comme échoué + result.status = ExecutionStatus.FAILED + result.end_time = datetime.now() + result.error_message = str(e) + + # Notifier Analytics de l'échec + if self.analytics_integration: + self.analytics_integration.on_execution_complete( + execution_id=execution_id, + workflow_id=visual_workflow.workflow_id, + started_at=result.start_time, + completed_at=result.end_time, + duration=result._calculate_duration() / 1000.0 if result._calculate_duration() else 0, + status='failed', + error_message=str(e), + steps_completed=0, + steps_failed=1 + ) + + # Enregistrer l'échec dans le système d'apprentissage + record_workflow_execution( + workflow_id=visual_workflow.id, + success=False, + confidence=0.5 # Confiance moyenne pour échec + ) + + self._notify_progress(execution_id, 'failed', { + 'status': 'failed', + 'error': str(e) + }) + + self._log(execution_id, 'error', f'Exécution échouée: {str(e)}') + + def _simulate_execution( + self, + execution_id: str, + workflow_graph: Workflow + ) -> None: + """ + Exécute un workflow via ExecutionLoop. + """ + try: + # Importer ExecutionLoop du système principal + from core.execution.execution_loop import ExecutionLoop + from core.config import get_config + + # Créer une instance d'ExecutionLoop + config = get_config() + execution_loop = ExecutionLoop(config) + + # Exécuter le workflow + result = execution_loop.execute(workflow_graph) + + # Traiter le résultat + if result.success: + self._log(execution_id, 'info', f'Workflow exécuté avec succès') + + # Notifier Analytics pour chaque étape + for i, step_result in enumerate(result.step_results): + if self.analytics_integration: + self.analytics_integration.on_step_complete( + execution_id=execution_id, + workflow_id=workflow_graph.workflow_id, + node_id=step_result.node_id, + action_type=step_result.action_type, + started_at=step_result.start_time, + completed_at=step_result.end_time, + duration=step_result.duration_seconds, + success=step_result.success + ) + + # Notifier la progression + progress = (i + 1) / len(result.step_results) * 100 + self._notify_progress(execution_id, 'node_completed', { + 'node_id': step_result.node_id, + 'node_name': step_result.node_name, + 'progress': progress, + 'completed_nodes': i + 1, + 'total_nodes': len(result.step_results), + 'step_duration': step_result.duration_seconds + }) + else: + raise Exception(f"Execution failed: {result.error_message}") + + except ImportError: + # Fallback à la simulation si ExecutionLoop n'est pas disponible + self._log(execution_id, 'warning', 'ExecutionLoop non disponible, utilisation de la simulation') + self._simulate_execution_fallback(execution_id, workflow_graph) + except Exception as e: + self._log(execution_id, 'error', f'Erreur ExecutionLoop: {str(e)}') + # Fallback à la simulation en cas d'erreur + self._simulate_execution_fallback(execution_id, workflow_graph) + + def _simulate_execution_fallback( + self, + execution_id: str, + workflow_graph: Workflow + ) -> None: + """ + Simule l'exécution d'un workflow (fallback). + """ + total_nodes = len(workflow_graph.nodes) + + for i, node in enumerate(workflow_graph.nodes): + # Notifier le début de l'étape + step_start_time = datetime.now() + if self.analytics_integration: + self.analytics_integration.on_step_start( + execution_id=execution_id, + node_id=node.node_id, + step_number=i + 1 + ) + + # Simuler l'exécution du node + time.sleep(0.1) # Simuler le temps d'exécution + + # Notifier la fin de l'étape + step_end_time = datetime.now() + step_duration = (step_end_time - step_start_time).total_seconds() + + if self.analytics_integration: + self.analytics_integration.on_step_complete( + execution_id=execution_id, + workflow_id=workflow_graph.workflow_id, + node_id=node.node_id, + action_type=getattr(node, 'action_type', 'unknown'), + started_at=step_start_time, + completed_at=step_end_time, + duration=step_duration, + success=True + ) + + progress = (i + 1) / total_nodes * 100 + self._notify_progress(execution_id, 'node_completed', { + 'node_id': node.node_id, + 'node_name': node.name, + 'progress': progress, + 'completed_nodes': i + 1, + 'total_nodes': total_nodes, + 'step_duration': step_duration + }) + + self._log(execution_id, 'info', f'Node {node.name} exécuté avec succès') + + def _setup_integrations( + self, + execution_id: str, + workflow_graph: Workflow + ) -> None: + """ + Configure les intégrations Analytics et Self-Healing. + + Exigences: 20.2, 20.3, 18.3 + """ + try: + # Configuration Analytics + if self.analytics_integration: + # Démarrer le suivi d'exécution + self.analytics_integration.on_execution_start( + workflow_id=workflow_graph.workflow_id, + execution_id=execution_id, + total_steps=len(workflow_graph.nodes) + ) + self._log(execution_id, 'info', 'Analytics configuré') + + # Configuration Self-Healing + if hasattr(self.healing_integration, 'enable_healing'): + self.healing_integration.enable_healing(workflow_graph) + self._log(execution_id, 'info', 'Self-Healing activé') + + except Exception as e: + self._log(execution_id, 'warning', f'Erreur configuration intégrations: {str(e)}') + + def get_execution_status(self, execution_id: str) -> Optional[ExecutionResult]: + """ + Récupère le statut d'une exécution. + + Args: + execution_id: ID de l'exécution + + Returns: + Résultat d'exécution ou None si introuvable + """ + return self.executions.get(execution_id) + + def cancel_execution(self, execution_id: str) -> bool: + """ + Annule une exécution en cours. + + Args: + execution_id: ID de l'exécution + + Returns: + True si annulée, False si impossible + """ + result = self.executions.get(execution_id) + if result and result.status == ExecutionStatus.RUNNING: + result.status = ExecutionStatus.CANCELLED + result.end_time = datetime.now() + + self._notify_progress(execution_id, 'cancelled', {'status': 'cancelled'}) + self._log(execution_id, 'info', 'Exécution annulée par l\'utilisateur') + + return True + + return False + + def list_executions(self, workflow_id: Optional[str] = None) -> List[Dict[str, Any]]: + """ + Liste les exécutions. + + Args: + workflow_id: Filtrer par workflow (optionnel) + + Returns: + Liste des exécutions + """ + executions = [] + + for result in self.executions.values(): + if workflow_id is None or result.workflow_id == workflow_id: + executions.append(result.to_dict()) + + # Trier par date de début (plus récent en premier) + executions.sort( + key=lambda x: x['start_time'] or '1970-01-01T00:00:00', + reverse=True + ) + + return executions + + def _notify_progress( + self, + execution_id: str, + event_type: str, + data: Dict[str, Any] + ) -> None: + """ + Notifie les callbacks de progression. + + Exigence: 20.1 + """ + # Mettre à jour le progress dans le résultat + result = self.executions.get(execution_id) + if result: + result.progress.update(data) + + # Appeler les callbacks + callbacks = self.progress_callbacks.get(execution_id, []) + for callback in callbacks: + try: + callback(execution_id, event_type, data) + except Exception as e: + print(f"Erreur callback progression: {e}") + + # Émettre événement WebSocket + self._emit_websocket_event(execution_id, event_type, data) + + def _log( + self, + execution_id: str, + level: str, + message: str + ) -> None: + """ + Ajoute un log à l'exécution. + """ + result = self.executions.get(execution_id) + if result: + result.logs.append({ + 'timestamp': datetime.now().isoformat(), + 'level': level, + 'message': message + }) + + def _collect_analytics_data(self, execution_id: str, workflow_id: str) -> Dict[str, Any]: + """ + Collecte les données Analytics pour l'UI. + + Exigence: 18.3 + + Args: + execution_id: ID de l'exécution + workflow_id: ID du workflow + + Returns: + Données Analytics formatées pour l'UI + """ + analytics_data = {} + + try: + if self.analytics_integration: + # Métriques en temps réel + live_metrics = self.analytics_integration.get_live_metrics(execution_id) + if live_metrics: + analytics_data['live_metrics'] = live_metrics + + # Statistiques du workflow + workflow_stats = self.analytics_integration.get_workflow_stats(workflow_id, hours=24) + if workflow_stats: + analytics_data['workflow_stats'] = workflow_stats + + # Métriques de base + analytics_data['execution_metrics'] = { + 'execution_id': execution_id, + 'workflow_id': workflow_id, + 'timestamp': datetime.now().isoformat() + } + + except Exception as e: + self._log(execution_id, 'warning', f'Erreur collecte Analytics: {str(e)}') + analytics_data['error'] = str(e) + + return analytics_data + + def get_workflow_analytics(self, workflow_id: str, hours: int = 24) -> Optional[Dict[str, Any]]: + """ + Récupère les analytics d'un workflow. + + Exigence: 18.3 + + Args: + workflow_id: ID du workflow + hours: Fenêtre de temps en heures + + Returns: + Données Analytics ou None + """ + if not self.analytics_integration: + return None + + try: + return self.analytics_integration.get_workflow_stats(workflow_id, hours) + except Exception as e: + print(f"Erreur récupération Analytics: {e}") + return None + + def cleanup_old_executions(self, max_age_hours: int = 24) -> int: + """ + Nettoie les anciennes exécutions. + + Args: + max_age_hours: Âge maximum en heures + + Returns: + Nombre d'exécutions supprimées + """ + cutoff_time = datetime.now().timestamp() - (max_age_hours * 3600) + to_remove = [] + + for execution_id, result in self.executions.items(): + if result.start_time and result.start_time.timestamp() < cutoff_time: + to_remove.append(execution_id) + + for execution_id in to_remove: + del self.executions[execution_id] + if execution_id in self.progress_callbacks: + del self.progress_callbacks[execution_id] + + return len(to_remove) + + def _emit_websocket_event( + self, + execution_id: str, + event_type: str, + data: Dict[str, Any] + ) -> None: + """ + Émet un événement WebSocket pour les mises à jour en temps réel. + + Exigence: 6.2, 6.3 + """ + try: + # Import dynamique pour éviter les dépendances circulaires + from api.websocket_handlers import ( + broadcast_execution_started, + broadcast_node_status, + broadcast_execution_progress, + broadcast_execution_complete, + broadcast_execution_error + ) + + if event_type == 'started': + broadcast_execution_started(execution_id, data.get('workflow_id', '')) + + elif event_type == 'node_completed': + node_id = data.get('node_id', '') + broadcast_node_status(execution_id, node_id, 'success', data) + broadcast_execution_progress(execution_id, { + 'progress': data.get('progress', 0), + 'completed_nodes': data.get('completed_nodes', 0), + 'total_nodes': data.get('total_nodes', 0) + }) + + elif event_type == 'completed': + broadcast_execution_complete(execution_id, 'completed', data) + + elif event_type == 'failed': + error_msg = data.get('error', 'Exécution échouée') + broadcast_execution_error(execution_id, error_msg) + broadcast_execution_complete(execution_id, 'failed', data) + + elif event_type == 'cancelled': + broadcast_execution_complete(execution_id, 'cancelled', data) + + except ImportError: + # WebSocket handlers pas encore chargés (mode test) + pass + except Exception as e: + print(f"Erreur émission WebSocket: {e}") + + + def execute_workflow_coaching( + self, + workflow_id: str, + variables: Optional[Dict[str, Any]] = None, + coaching_callback: Optional[Callable] = None, + progress_callback: Optional[Callable] = None + ) -> str: + """ + Lance l'exécution d'un workflow en mode COACHING. + + En mode COACHING, chaque étape est soumise à l'utilisateur pour + validation/correction avant exécution. + + Args: + workflow_id: ID du workflow à exécuter + variables: Variables d'entrée optionnelles + coaching_callback: Callback pour les suggestions COACHING (WebSocket) + progress_callback: Callback pour les mises à jour de progression + + Returns: + ID d'exécution + """ + # Charger le workflow visuel + visual_workflow = self.db.load(workflow_id) + if visual_workflow is None: + raise ValueError(f"Workflow {workflow_id} introuvable") + + # Générer un ID d'exécution + execution_id = f"coaching_{int(time.time() * 1000)}_{workflow_id[:8]}" + + # Créer le résultat d'exécution + result = ExecutionResult(execution_id, workflow_id) + self.executions[execution_id] = result + + # Marquer comme exécution COACHING + self._coaching_mode_executions[execution_id] = True + + # Enregistrer les callbacks + if progress_callback: + if execution_id not in self.progress_callbacks: + self.progress_callbacks[execution_id] = [] + self.progress_callbacks[execution_id].append(progress_callback) + + # Lancer l'exécution COACHING en arrière-plan + thread = threading.Thread( + target=self._execute_workflow_coaching_thread, + args=(execution_id, visual_workflow, variables or {}, coaching_callback) + ) + thread.daemon = True + thread.start() + + return execution_id + + def _execute_workflow_coaching_thread( + self, + execution_id: str, + visual_workflow: VisualWorkflow, + variables: Dict[str, Any], + coaching_callback: Optional[Callable] + ) -> None: + """ + Thread d'exécution du workflow en mode COACHING. + """ + result = self.executions[execution_id] + + try: + # Marquer comme démarré + result.status = ExecutionStatus.RUNNING + result.start_time = datetime.now() + self._notify_progress(execution_id, 'started', { + 'status': 'running', + 'mode': 'COACHING' + }) + + # Convertir en WorkflowGraph + self._log(execution_id, 'info', 'Conversion du workflow visuel (COACHING)...') + workflow_graph = convert_visual_to_graph(visual_workflow) + + # Importer et configurer ExecutionLoop avec mode COACHING + try: + from core.execution.execution_loop import ( + ExecutionLoop, ExecutionMode, CoachingResponse, CoachingDecision + ) + from core.config import get_config + + config = get_config() + + # Créer callback COACHING qui utilise WebSocket + def coaching_decision_callback(action_info: Dict) -> 'CoachingResponse': + """Callback appelé par ExecutionLoop pour demander une décision.""" + # Émettre suggestion via WebSocket + self._emit_coaching_suggestion(execution_id, action_info) + + # Attendre la réponse (polling) + response = self._wait_for_coaching_response(execution_id) + + # Convertir en CoachingResponse + return CoachingResponse( + decision=CoachingDecision(response.get('decision', 'accept')), + correction=response.get('correction'), + feedback=response.get('feedback'), + executed_manually=response.get('executed_manually', False) + ) + + # Créer ExecutionLoop avec callback COACHING + execution_loop = ExecutionLoop( + config, + coaching_callback=coaching_callback or coaching_decision_callback + ) + + # Stocker pour les décisions externes + self._execution_loops[execution_id] = execution_loop + + # Exécuter en mode COACHING + exec_result = execution_loop.execute( + workflow_graph, + mode=ExecutionMode.COACHING + ) + + # Traiter le résultat + if exec_result.success: + result.status = ExecutionStatus.COMPLETED + + # Collecter les stats COACHING + coaching_stats = execution_loop.get_coaching_stats() + result.analytics_data['coaching_stats'] = coaching_stats + + self._log(execution_id, 'info', f'COACHING terminé: {coaching_stats}') + else: + raise Exception(f"COACHING failed: {exec_result.error_message}") + + except ImportError as e: + self._log(execution_id, 'warning', f'ExecutionLoop non disponible: {e}') + # Fallback: simulation COACHING basique + self._simulate_coaching_execution(execution_id, workflow_graph) + + # Finaliser + result.end_time = datetime.now() + + # Émettre fin de session COACHING via WebSocket + self._emit_coaching_session_end(execution_id, result.status) + + self._notify_progress(execution_id, 'completed', { + 'status': 'completed', + 'mode': 'COACHING', + 'duration_ms': result._calculate_duration(), + 'coaching_stats': result.analytics_data.get('coaching_stats', {}) + }) + + except Exception as e: + result.status = ExecutionStatus.FAILED + result.end_time = datetime.now() + result.error_message = str(e) + + self._emit_coaching_session_end(execution_id, 'failed') + + self._notify_progress(execution_id, 'failed', { + 'status': 'failed', + 'mode': 'COACHING', + 'error': str(e) + }) + + self._log(execution_id, 'error', f'COACHING échoué: {str(e)}') + + finally: + # Nettoyer + if execution_id in self._execution_loops: + del self._execution_loops[execution_id] + + def submit_coaching_decision( + self, + execution_id: str, + decision_response: Dict[str, Any] + ) -> bool: + """ + Soumet une décision COACHING depuis l'interface utilisateur. + + Args: + execution_id: ID de l'exécution COACHING + decision_response: Réponse contenant: + - decision: 'accept' | 'reject' | 'correct' | 'manual' | 'skip' + - correction: Correction optionnelle (pour 'correct') + - feedback: Feedback optionnel + - executed_manually: True si exécuté manuellement + + Returns: + True si décision soumise avec succès + """ + # Vérifier que c'est une exécution COACHING + if not self._coaching_mode_executions.get(execution_id): + return False + + # Stocker la réponse pour le thread d'exécution + if not hasattr(self, '_coaching_responses'): + self._coaching_responses = {} + + self._coaching_responses[execution_id] = decision_response + + # Si on a accès à l'ExecutionLoop, soumettre directement + execution_loop = self._execution_loops.get(execution_id) + if execution_loop: + try: + from core.execution.execution_loop import CoachingResponse, CoachingDecision + + response = CoachingResponse( + decision=CoachingDecision(decision_response.get('decision', 'accept')), + correction=decision_response.get('correction'), + feedback=decision_response.get('feedback'), + executed_manually=decision_response.get('executed_manually', False) + ) + + return execution_loop.submit_coaching_decision(response) + except Exception as e: + self._log(execution_id, 'error', f'Erreur soumission décision: {e}') + return False + + return True + + def get_coaching_stats(self, execution_id: str) -> Optional[Dict[str, Any]]: + """ + Récupère les statistiques COACHING d'une exécution. + + Args: + execution_id: ID de l'exécution + + Returns: + Statistiques COACHING ou None + """ + execution_loop = self._execution_loops.get(execution_id) + if execution_loop: + try: + return execution_loop.get_coaching_stats() + except: + pass + + # Vérifier dans les résultats + result = self.executions.get(execution_id) + if result: + return result.analytics_data.get('coaching_stats') + + return None + + def is_coaching_execution(self, execution_id: str) -> bool: + """Vérifie si une exécution est en mode COACHING.""" + return self._coaching_mode_executions.get(execution_id, False) + + def _wait_for_coaching_response( + self, + execution_id: str, + timeout_seconds: float = 300.0 + ) -> Dict[str, Any]: + """ + Attend une réponse COACHING de l'utilisateur. + + Args: + execution_id: ID de l'exécution + timeout_seconds: Timeout en secondes + + Returns: + Réponse de l'utilisateur + """ + if not hasattr(self, '_coaching_responses'): + self._coaching_responses = {} + + start_time = time.time() + + while time.time() - start_time < timeout_seconds: + # Vérifier si une réponse est disponible + if execution_id in self._coaching_responses: + response = self._coaching_responses.pop(execution_id) + return response + + # Attendre un peu avant de vérifier à nouveau + time.sleep(0.1) + + # Timeout: rejeter par défaut + self._log(execution_id, 'warning', 'Timeout COACHING - rejet automatique') + return {'decision': 'reject', 'feedback': 'Timeout - no user response'} + + def _emit_coaching_suggestion( + self, + execution_id: str, + action_info: Dict[str, Any] + ) -> None: + """ + Émet une suggestion COACHING via WebSocket. + """ + try: + from api.websocket_handlers import broadcast_coaching_suggestion + + broadcast_coaching_suggestion(execution_id, { + 'execution_id': execution_id, + 'action': action_info.get('action'), + 'target': action_info.get('target'), + 'params': action_info.get('params'), + 'confidence': action_info.get('confidence', 0.0), + 'alternatives': action_info.get('alternatives', []), + 'timestamp': datetime.now().isoformat() + }) + + except ImportError: + pass + except Exception as e: + self._log(execution_id, 'warning', f'Erreur émission suggestion COACHING: {e}') + + def _emit_coaching_session_end( + self, + execution_id: str, + final_status: str + ) -> None: + """ + Émet la fin de session COACHING via WebSocket. + """ + try: + from api.websocket_handlers import broadcast_coaching_session_end + + stats = self.get_coaching_stats(execution_id) + + broadcast_coaching_session_end(execution_id, { + 'status': final_status, + 'stats': stats, + 'timestamp': datetime.now().isoformat() + }) + + except ImportError: + pass + except Exception as e: + self._log(execution_id, 'warning', f'Erreur émission fin COACHING: {e}') + + def _simulate_coaching_execution( + self, + execution_id: str, + workflow_graph: Workflow + ) -> None: + """ + Simule une exécution COACHING (fallback si ExecutionLoop non disponible). + """ + total_nodes = len(workflow_graph.nodes) + coaching_stats = { + 'suggestions_made': 0, + 'accepted': 0, + 'rejected': 0, + 'corrected': 0, + 'manual_executions': 0 + } + + for i, node in enumerate(workflow_graph.nodes): + # Émettre suggestion + action_info = { + 'action': getattr(node, 'action_type', 'click'), + 'target': getattr(node, 'target', {}), + 'params': getattr(node, 'params', {}), + 'confidence': 0.85, + 'node_name': node.name + } + + self._emit_coaching_suggestion(execution_id, action_info) + coaching_stats['suggestions_made'] += 1 + + # Attendre décision + response = self._wait_for_coaching_response(execution_id, timeout_seconds=60.0) + decision = response.get('decision', 'accept') + + # Traiter la décision + if decision == 'accept': + coaching_stats['accepted'] += 1 + elif decision == 'reject': + coaching_stats['rejected'] += 1 + continue + elif decision == 'correct': + coaching_stats['corrected'] += 1 + elif decision == 'manual': + coaching_stats['manual_executions'] += 1 + + # Simuler exécution + time.sleep(0.1) + + # Notifier progression + progress = (i + 1) / total_nodes * 100 + self._notify_progress(execution_id, 'node_completed', { + 'node_id': node.node_id, + 'node_name': node.name, + 'progress': progress, + 'coaching_decision': decision + }) + + self._log(execution_id, 'info', f'Node {node.name} - Décision: {decision}') + + # Stocker stats + result = self.executions.get(execution_id) + if result: + result.analytics_data['coaching_stats'] = coaching_stats + + +# Instance globale de l'exécuteur +_executor_instance = None + + +def get_executor() -> VisualWorkflowExecutor: + """Récupère l'instance globale de l'exécuteur""" + global _executor_instance + if _executor_instance is None: + _executor_instance = VisualWorkflowExecutor() + return _executor_instance diff --git a/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingDecisionButtons.tsx b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingDecisionButtons.tsx new file mode 100644 index 000000000..2787b2ab2 --- /dev/null +++ b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingDecisionButtons.tsx @@ -0,0 +1,102 @@ +/** + * CoachingDecisionButtons Component + * + * Decision buttons for COACHING mode. + * Provides accept, reject, correct, manual, and skip options. + */ + +import React from 'react'; +import { CoachingDecision } from '../../hooks/useCoachingWebSocket'; + +interface CoachingDecisionButtonsProps { + onDecision: (decision: CoachingDecision) => void; + onShowCorrection: () => void; + disabled?: boolean; +} + +interface DecisionButtonConfig { + decision: CoachingDecision | 'correction'; + label: string; + shortcut: string; + icon: string; + className: string; + title: string; +} + +const DECISION_BUTTONS: DecisionButtonConfig[] = [ + { + decision: 'accept', + label: 'Accepter', + shortcut: 'A', + icon: '\u2713', + className: 'btn-accept', + title: 'Accepter et executer cette action (A)', + }, + { + decision: 'reject', + label: 'Rejeter', + shortcut: 'R', + icon: '\u2717', + className: 'btn-reject', + title: 'Rejeter cette action et passer (R)', + }, + { + decision: 'correction', + label: 'Corriger', + shortcut: 'C', + icon: '\u270E', + className: 'btn-correct', + title: 'Modifier cette action avant execution (C)', + }, + { + decision: 'manual', + label: 'Manuel', + shortcut: 'M', + icon: '\u{1F590}', + className: 'btn-manual', + title: 'Executer manuellement puis continuer (M)', + }, + { + decision: 'skip', + label: 'Passer', + shortcut: 'S', + icon: '\u23E9', + className: 'btn-skip', + title: 'Passer cette etape (S)', + }, +]; + +const CoachingDecisionButtons: React.FC = ({ + onDecision, + onShowCorrection, + disabled = false, +}) => { + const handleClick = (button: DecisionButtonConfig) => { + if (button.decision === 'correction') { + onShowCorrection(); + } else { + onDecision(button.decision as CoachingDecision); + } + }; + + return ( +
+ {DECISION_BUTTONS.map((button) => ( + + ))} +
+ ); +}; + +export default CoachingDecisionButtons; diff --git a/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingPanel.css b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingPanel.css new file mode 100644 index 000000000..a796834fa --- /dev/null +++ b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingPanel.css @@ -0,0 +1,695 @@ +/** + * CoachingPanel Styles + * + * Styles for the COACHING mode UI components. + */ + +/* Main Panel */ +.coaching-panel { + display: flex; + flex-direction: column; + background: #1e1e2e; + border-radius: 8px; + border: 1px solid #313244; + overflow: hidden; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; +} + +.coaching-panel.active { + border-color: #89b4fa; + box-shadow: 0 0 10px rgba(137, 180, 250, 0.2); +} + +/* Header */ +.coaching-panel-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 12px 16px; + background: #313244; + border-bottom: 1px solid #45475a; +} + +.coaching-panel-header h3 { + margin: 0; + font-size: 16px; + color: #cdd6f4; + display: flex; + align-items: center; + gap: 8px; +} + +.coaching-icon { + font-size: 20px; +} + +.coaching-status { + display: flex; + align-items: center; + gap: 8px; + font-size: 12px; + color: #a6adc8; +} + +.status-indicator { + width: 8px; + height: 8px; + border-radius: 50%; + background: #f38ba8; +} + +.status-indicator.connected { + background: #a6e3a1; + animation: pulse 2s infinite; +} + +@keyframes pulse { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.5; } +} + +/* Error display */ +.coaching-error { + display: flex; + align-items: center; + gap: 8px; + padding: 8px 16px; + background: rgba(243, 139, 168, 0.1); + color: #f38ba8; + font-size: 13px; +} + +.error-icon { + font-size: 16px; +} + +/* Content area */ +.coaching-panel-content { + flex: 1; + padding: 16px; + overflow-y: auto; +} + +/* Start prompt */ +.coaching-start-prompt { + text-align: center; + padding: 24px; +} + +.coaching-start-prompt p { + color: #a6adc8; + margin-bottom: 16px; +} + +.btn-start-coaching { + padding: 12px 24px; + background: #89b4fa; + color: #1e1e2e; + border: none; + border-radius: 6px; + font-size: 14px; + font-weight: 600; + cursor: pointer; + transition: background 0.2s; +} + +.btn-start-coaching:hover { + background: #b4befe; +} + +.btn-start-coaching:disabled { + background: #45475a; + color: #6c7086; + cursor: not-allowed; +} + +/* Waiting state */ +.coaching-waiting { + display: flex; + flex-direction: column; + align-items: center; + gap: 16px; + padding: 32px; + color: #a6adc8; +} + +.spinner { + width: 32px; + height: 32px; + border: 3px solid #45475a; + border-top-color: #89b4fa; + border-radius: 50%; + animation: spin 1s linear infinite; +} + +@keyframes spin { + to { transform: rotate(360deg); } +} + +/* Suggestion Card */ +.coaching-suggestion-card { + background: #313244; + border-radius: 8px; + padding: 16px; + margin-bottom: 16px; +} + +.suggestion-header { + display: flex; + align-items: center; + gap: 12px; + margin-bottom: 12px; +} + +.action-icon { + font-size: 24px; +} + +.action-name { + font-size: 18px; + font-weight: 600; + color: #cdd6f4; +} + +.confidence-badge { + margin-left: auto; + padding: 4px 10px; + border-radius: 12px; + font-size: 12px; + font-weight: 600; + color: white; +} + +.suggestion-target, +.suggestion-params { + margin-bottom: 12px; +} + +.suggestion-target label, +.suggestion-params label { + display: block; + font-size: 11px; + color: #6c7086; + text-transform: uppercase; + margin-bottom: 4px; +} + +.target-value { + color: #89b4fa; + font-family: monospace; + font-size: 13px; +} + +.suggestion-params ul { + margin: 0; + padding-left: 16px; + color: #cdd6f4; + font-size: 13px; +} + +.suggestion-params li { + margin-bottom: 4px; +} + +.suggestion-screenshot { + margin-top: 12px; + border-radius: 4px; + overflow: hidden; +} + +.suggestion-screenshot img { + max-width: 100%; + height: auto; +} + +.suggestion-alternatives { + margin-top: 12px; + padding-top: 12px; + border-top: 1px solid #45475a; +} + +.suggestion-alternatives label { + display: block; + font-size: 11px; + color: #6c7086; + margin-bottom: 8px; +} + +.suggestion-alternatives ul { + margin: 0; + padding-left: 16px; + font-size: 12px; + color: #a6adc8; +} + +.suggestion-context { + margin-top: 12px; + font-size: 12px; +} + +.suggestion-context summary { + cursor: pointer; + color: #6c7086; +} + +.suggestion-context pre { + background: #1e1e2e; + padding: 8px; + border-radius: 4px; + overflow-x: auto; + color: #a6adc8; + font-size: 11px; +} + +/* Decision Buttons */ +.coaching-decision-buttons { + display: grid; + grid-template-columns: repeat(5, 1fr); + gap: 8px; + margin-bottom: 16px; +} + +.coaching-decision-btn { + display: flex; + flex-direction: column; + align-items: center; + gap: 4px; + padding: 12px 8px; + border: 1px solid #45475a; + border-radius: 8px; + background: #313244; + color: #cdd6f4; + cursor: pointer; + transition: all 0.2s; +} + +.coaching-decision-btn:hover { + transform: translateY(-2px); +} + +.coaching-decision-btn:disabled { + opacity: 0.5; + cursor: not-allowed; + transform: none; +} + +.coaching-decision-btn .btn-icon { + font-size: 20px; +} + +.coaching-decision-btn .btn-label { + font-size: 11px; + font-weight: 500; +} + +.coaching-decision-btn .btn-shortcut { + font-size: 10px; + padding: 2px 4px; + background: #45475a; + border-radius: 3px; + font-family: monospace; +} + +.btn-accept:hover { + background: rgba(166, 227, 161, 0.2); + border-color: #a6e3a1; +} + +.btn-reject:hover { + background: rgba(243, 139, 168, 0.2); + border-color: #f38ba8; +} + +.btn-correct:hover { + background: rgba(249, 226, 175, 0.2); + border-color: #f9e2af; +} + +.btn-manual:hover { + background: rgba(137, 180, 250, 0.2); + border-color: #89b4fa; +} + +.btn-skip:hover { + background: rgba(166, 173, 200, 0.2); + border-color: #a6adc8; +} + +/* Feedback input */ +.coaching-feedback { + margin-bottom: 16px; +} + +.coaching-feedback label { + display: block; + font-size: 12px; + color: #a6adc8; + margin-bottom: 4px; +} + +.coaching-feedback input { + width: 100%; + padding: 8px 12px; + background: #313244; + border: 1px solid #45475a; + border-radius: 6px; + color: #cdd6f4; + font-size: 13px; +} + +.coaching-feedback input:focus { + outline: none; + border-color: #89b4fa; +} + +/* Result display */ +.coaching-result { + display: flex; + align-items: center; + gap: 8px; + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 16px; + font-size: 13px; +} + +.coaching-result.success { + background: rgba(166, 227, 161, 0.1); + color: #a6e3a1; +} + +.coaching-result.error { + background: rgba(243, 139, 168, 0.1); + color: #f38ba8; +} + +.result-icon { + font-size: 18px; + font-weight: bold; +} + +/* Stats Display */ +.coaching-stats { + padding: 16px; + background: #313244; + border-top: 1px solid #45475a; +} + +.stats-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 12px; +} + +.stats-header h4 { + margin: 0; + font-size: 13px; + color: #cdd6f4; +} + +.btn-refresh-stats { + padding: 4px 8px; + background: transparent; + border: 1px solid #45475a; + border-radius: 4px; + color: #a6adc8; + cursor: pointer; + font-size: 14px; +} + +.btn-refresh-stats:hover { + background: #45475a; +} + +.stats-grid { + display: grid; + grid-template-columns: repeat(5, 1fr); + gap: 8px; + margin-bottom: 16px; +} + +.stat-item { + text-align: center; + padding: 8px; + background: #1e1e2e; + border-radius: 6px; +} + +.stat-value { + display: block; + font-size: 20px; + font-weight: 600; + color: #cdd6f4; +} + +.stat-label { + display: block; + font-size: 10px; + color: #6c7086; + text-transform: uppercase; +} + +.stat-item.accepted .stat-value { color: #a6e3a1; } +.stat-item.rejected .stat-value { color: #f38ba8; } +.stat-item.corrected .stat-value { color: #f9e2af; } +.stat-item.manual .stat-value { color: #89b4fa; } + +.stats-rates { + display: flex; + flex-direction: column; + gap: 8px; +} + +.rate-item { + display: flex; + align-items: center; + gap: 12px; +} + +.rate-label { + font-size: 11px; + color: #a6adc8; + width: 120px; +} + +.rate-bar-container { + flex: 1; + height: 8px; + background: #1e1e2e; + border-radius: 4px; + overflow: hidden; +} + +.rate-bar { + height: 100%; + border-radius: 4px; + transition: width 0.3s ease; +} + +.rate-value { + font-size: 12px; + font-weight: 600; + width: 50px; + text-align: right; +} + +.learning-progress, +.learning-warning { + display: flex; + align-items: center; + gap: 8px; + padding: 12px; + border-radius: 6px; + margin-top: 12px; + font-size: 12px; +} + +.learning-progress { + background: rgba(166, 227, 161, 0.1); + color: #a6e3a1; +} + +.learning-warning { + background: rgba(249, 226, 175, 0.1); + color: #f9e2af; +} + +/* Correction Editor */ +.correction-editor { + background: #313244; + border-radius: 8px; + padding: 16px; +} + +.correction-editor h4 { + margin: 0 0 16px 0; + font-size: 14px; + color: #cdd6f4; +} + +.form-group { + margin-bottom: 16px; +} + +.form-group label { + display: block; + font-size: 12px; + color: #a6adc8; + margin-bottom: 6px; +} + +.form-group input, +.form-group select, +.form-group textarea { + width: 100%; + padding: 8px 12px; + background: #1e1e2e; + border: 1px solid #45475a; + border-radius: 6px; + color: #cdd6f4; + font-size: 13px; +} + +.form-group textarea { + resize: vertical; + font-family: inherit; +} + +.form-group input:focus, +.form-group select:focus, +.form-group textarea:focus { + outline: none; + border-color: #89b4fa; +} + +.params-editor { + background: #1e1e2e; + border-radius: 6px; + padding: 12px; +} + +.param-row { + display: flex; + align-items: center; + gap: 8px; + margin-bottom: 8px; +} + +.param-key { + font-size: 12px; + color: #89b4fa; + font-family: monospace; + min-width: 80px; +} + +.param-row input { + flex: 1; + padding: 4px 8px; + font-size: 12px; +} + +.btn-add-param { + padding: 6px 12px; + background: transparent; + border: 1px dashed #45475a; + border-radius: 4px; + color: #6c7086; + cursor: pointer; + font-size: 12px; + width: 100%; + margin-top: 8px; +} + +.btn-add-param:hover { + border-color: #89b4fa; + color: #89b4fa; +} + +.correction-actions { + display: flex; + gap: 12px; + justify-content: flex-end; +} + +.btn-cancel, +.btn-apply-correction { + padding: 10px 20px; + border: none; + border-radius: 6px; + font-size: 13px; + font-weight: 500; + cursor: pointer; +} + +.btn-cancel { + background: #45475a; + color: #cdd6f4; +} + +.btn-cancel:hover { + background: #585b70; +} + +.btn-apply-correction { + background: #89b4fa; + color: #1e1e2e; +} + +.btn-apply-correction:hover { + background: #b4befe; +} + +/* Footer */ +.coaching-panel-footer { + padding: 12px 16px; + border-top: 1px solid #45475a; + text-align: center; +} + +.btn-end-session { + padding: 8px 16px; + background: transparent; + border: 1px solid #f38ba8; + border-radius: 6px; + color: #f38ba8; + font-size: 12px; + cursor: pointer; + transition: all 0.2s; +} + +.btn-end-session:hover { + background: rgba(243, 139, 168, 0.1); +} + +/* Shortcuts help */ +.coaching-shortcuts-help { + padding: 8px 16px; + background: #1e1e2e; + text-align: center; + border-top: 1px solid #313244; +} + +.coaching-shortcuts-help small { + color: #6c7086; + font-size: 11px; +} + +.coaching-shortcuts-help kbd { + padding: 2px 6px; + background: #313244; + border-radius: 3px; + font-family: monospace; + margin: 0 2px; +} + +/* Responsive */ +@media (max-width: 768px) { + .coaching-decision-buttons { + grid-template-columns: repeat(3, 1fr); + } + + .stats-grid { + grid-template-columns: repeat(3, 1fr); + } +} diff --git a/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingStatsDisplay.tsx b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingStatsDisplay.tsx new file mode 100644 index 000000000..a489aeb6a --- /dev/null +++ b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingStatsDisplay.tsx @@ -0,0 +1,153 @@ +/** + * CoachingStatsDisplay Component + * + * Displays COACHING session statistics including: + * - Total suggestions made + * - Acceptance/rejection/correction counts + * - Acceptance and correction rates + */ + +import React from 'react'; +import { CoachingStats } from '../../hooks/useCoachingWebSocket'; + +interface CoachingStatsDisplayProps { + stats: CoachingStats; + onRefresh?: () => void; +} + +const CoachingStatsDisplay: React.FC = ({ + stats, + onRefresh, +}) => { + // Calculate rates + const total = stats.accepted + stats.rejected + stats.corrected + stats.manualExecutions; + const acceptanceRate = total > 0 ? (stats.accepted / total) * 100 : 0; + const correctionRate = total > 0 ? (stats.corrected / total) * 100 : 0; + + // Get color for rate + const getRateColor = (rate: number, isAcceptance: boolean): string => { + if (isAcceptance) { + if (rate >= 80) return '#4caf50'; + if (rate >= 50) return '#ff9800'; + return '#f44336'; + } + // For correction rate, lower is better + if (rate <= 10) return '#4caf50'; + if (rate <= 30) return '#ff9800'; + return '#f44336'; + }; + + return ( +
+
+

Statistiques COACHING

+ {onRefresh && ( + + )} +
+ +
+ {/* Suggestions count */} +
+ {stats.suggestionsMade} + Suggestions +
+ + {/* Accepted count */} +
+ {stats.accepted} + Acceptees +
+ + {/* Rejected count */} +
+ {stats.rejected} + Rejetees +
+ + {/* Corrected count */} +
+ {stats.corrected} + Corrigees +
+ + {/* Manual executions */} +
+ {stats.manualExecutions} + Manuelles +
+
+ + {/* Rate indicators */} +
+ {/* Acceptance rate */} +
+
Taux d'acceptation
+
+
+
+
+ {acceptanceRate.toFixed(1)}% +
+
+ + {/* Correction rate */} +
+
Taux de correction
+
+
+
+
+ {correctionRate.toFixed(1)}% +
+
+
+ + {/* Learning progress indicator */} + {total >= 10 && acceptanceRate >= 80 && ( +
+ 📈 + + Excellent ! Le workflow peut passer en mode AUTO. + +
+ )} + + {total >= 10 && acceptanceRate < 50 && ( +
+ + + Taux d'acceptation faible. Le workflow necessite plus de corrections. + +
+ )} +
+ ); +}; + +export default CoachingStatsDisplay; diff --git a/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingSuggestionCard.tsx b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingSuggestionCard.tsx new file mode 100644 index 000000000..d2a1e48a2 --- /dev/null +++ b/visual_workflow_builder/frontend/src/components/CoachingPanel/CoachingSuggestionCard.tsx @@ -0,0 +1,150 @@ +/** + * CoachingSuggestionCard Component + * + * Displays the current action suggestion in COACHING mode. + * Shows action type, target, parameters, and confidence level. + */ + +import React from 'react'; +import { CoachingSuggestion } from '../../hooks/useCoachingWebSocket'; + +interface CoachingSuggestionCardProps { + suggestion: CoachingSuggestion; + onViewDetails?: () => void; +} + +const CoachingSuggestionCard: React.FC = ({ + suggestion, + onViewDetails, +}) => { + // Get confidence color based on level + const getConfidenceColor = (confidence: number): string => { + if (confidence >= 0.8) return '#4caf50'; // Green + if (confidence >= 0.5) return '#ff9800'; // Orange + return '#f44336'; // Red + }; + + // Get action icon + const getActionIcon = (action: string): string => { + const icons: Record = { + click: '\u{1F5B1}', + double_click: '\u{1F5B1}', + right_click: '\u{1F5B1}', + type: '\u2328', + fill: '\u2328', + scroll: '\u2195', + hover: '\u{1F4CD}', + wait: '\u23F1', + screenshot: '\u{1F4F8}', + navigate: '\u{1F517}', + default: '\u2699', + }; + return icons[action.toLowerCase()] || icons.default; + }; + + // Format target for display + const formatTarget = (target: Record): string => { + if (target.text) return `Text: "${target.text}"`; + if (target.id) return `ID: ${target.id}`; + if (target.xpath) return `XPath: ${target.xpath.substring(0, 50)}...`; + if (target.css) return `CSS: ${target.css}`; + if (target.image) return 'Image match'; + if (target.x !== undefined && target.y !== undefined) { + return `Coordinates: (${target.x}, ${target.y})`; + } + return JSON.stringify(target).substring(0, 50); + }; + + // Format params for display + const formatParams = (params: Record): string[] => { + return Object.entries(params) + .filter(([key]) => !['target', 'action'].includes(key)) + .map(([key, value]) => { + if (typeof value === 'string' && value.length > 30) { + return `${key}: "${value.substring(0, 30)}..."`; + } + return `${key}: ${JSON.stringify(value)}`; + }); + }; + + const confidencePercent = Math.round(suggestion.confidence * 100); + + return ( +
+ {/* Action header */} +
+ {getActionIcon(suggestion.action)} + {suggestion.action.toUpperCase()} +
+ {confidencePercent}% +
+
+ + {/* Target */} +
+ + {formatTarget(suggestion.target)} +
+ + {/* Parameters */} + {Object.keys(suggestion.params).length > 0 && ( +
+ +
    + {formatParams(suggestion.params).map((param, index) => ( +
  • {param}
  • + ))} +
+
+ )} + + {/* Screenshot preview */} + {suggestion.screenshotPath && ( +
+ Target element { + (e.target as HTMLImageElement).style.display = 'none'; + }} + /> +
+ )} + + {/* Alternatives */} + {suggestion.alternatives && suggestion.alternatives.length > 0 && ( +
+ +
    + {suggestion.alternatives.slice(0, 3).map((alt, index) => ( +
  • + {alt.action}: {formatTarget(alt.target)} ({Math.round(alt.confidence * 100)}%) +
  • + ))} +
+
+ )} + + {/* Context info */} + {suggestion.context && Object.keys(suggestion.context).length > 0 && ( +
+ Contexte +
{JSON.stringify(suggestion.context, null, 2)}
+
+ )} + + {/* View details button */} + {onViewDetails && ( + + )} +
+ ); +}; + +export default CoachingSuggestionCard; diff --git a/visual_workflow_builder/frontend/src/components/CoachingPanel/CorrectionEditor.tsx b/visual_workflow_builder/frontend/src/components/CoachingPanel/CorrectionEditor.tsx new file mode 100644 index 000000000..2c3149a0b --- /dev/null +++ b/visual_workflow_builder/frontend/src/components/CoachingPanel/CorrectionEditor.tsx @@ -0,0 +1,235 @@ +/** + * CorrectionEditor Component + * + * Allows users to modify a suggested action before execution. + * Supports editing: + * - Target (element selector) + * - Parameters (timeout, value, etc.) + * - Action type (in some cases) + */ + +import React, { useState, useEffect } from 'react'; +import { CoachingSuggestion } from '../../hooks/useCoachingWebSocket'; + +interface CorrectionEditorProps { + suggestion: CoachingSuggestion; + onSubmit: (correction: Record) => void; + onCancel: () => void; +} + +const CorrectionEditor: React.FC = ({ + suggestion, + onSubmit, + onCancel, +}) => { + // State for editable fields + const [targetType, setTargetType] = useState('text'); + const [targetValue, setTargetValue] = useState(''); + const [params, setParams] = useState>({}); + const [feedback, setFeedback] = useState(''); + + // Initialize from suggestion + useEffect(() => { + // Detect target type + if (suggestion.target.text) { + setTargetType('text'); + setTargetValue(suggestion.target.text); + } else if (suggestion.target.id) { + setTargetType('id'); + setTargetValue(suggestion.target.id); + } else if (suggestion.target.xpath) { + setTargetType('xpath'); + setTargetValue(suggestion.target.xpath); + } else if (suggestion.target.css) { + setTargetType('css'); + setTargetValue(suggestion.target.css); + } else if (suggestion.target.x !== undefined && suggestion.target.y !== undefined) { + setTargetType('coordinates'); + setTargetValue(`${suggestion.target.x},${suggestion.target.y}`); + } + + // Copy params + setParams({ ...suggestion.params }); + }, [suggestion]); + + // Build correction object + const buildCorrection = (): Record => { + const correction: Record = {}; + + // Build corrected target + const correctedTarget: Record = {}; + switch (targetType) { + case 'text': + correctedTarget.text = targetValue; + break; + case 'id': + correctedTarget.id = targetValue; + break; + case 'xpath': + correctedTarget.xpath = targetValue; + break; + case 'css': + correctedTarget.css = targetValue; + break; + case 'coordinates': + const [x, y] = targetValue.split(',').map((v) => parseInt(v.trim(), 10)); + correctedTarget.x = x; + correctedTarget.y = y; + break; + } + + // Only include target if changed + if (JSON.stringify(correctedTarget) !== JSON.stringify(suggestion.target)) { + correction.target = correctedTarget; + } + + // Only include params if changed + if (JSON.stringify(params) !== JSON.stringify(suggestion.params)) { + correction.params = params; + } + + // Include feedback + if (feedback.trim()) { + correction.feedback = feedback; + } + + return correction; + }; + + // Handle param change + const handleParamChange = (key: string, value: any) => { + setParams((prev) => ({ ...prev, [key]: value })); + }; + + // Handle submit + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault(); + const correction = buildCorrection(); + onSubmit(correction); + }; + + return ( +
+

Corriger l'action

+ +
+ {/* Target type selector */} +
+ + +
+ + {/* Target value */} +
+ + {targetType === 'xpath' ? ( +