v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
11
core/analytics/integration/__init__.py
Normal file
11
core/analytics/integration/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""Analytics integration module."""
|
||||
|
||||
from .execution_integration import (
|
||||
AnalyticsExecutionIntegration,
|
||||
get_analytics_integration
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'AnalyticsExecutionIntegration',
|
||||
'get_analytics_integration'
|
||||
]
|
||||
370
core/analytics/integration/execution_integration.py
Normal file
370
core/analytics/integration/execution_integration.py
Normal file
@@ -0,0 +1,370 @@
|
||||
"""Integration of analytics with ExecutionLoop."""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
|
||||
from ..analytics_system import get_analytics_system
|
||||
from ..collection.metrics_collector import ExecutionMetrics, StepMetrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnalyticsExecutionIntegration:
|
||||
"""Integrate analytics collection with workflow execution."""
|
||||
|
||||
def __init__(self, enabled: bool = True):
|
||||
"""
|
||||
Initialize analytics integration.
|
||||
|
||||
Args:
|
||||
enabled: Whether analytics collection is enabled
|
||||
"""
|
||||
self.enabled = enabled
|
||||
self.analytics = None
|
||||
|
||||
if enabled:
|
||||
try:
|
||||
self.analytics = get_analytics_system()
|
||||
logger.info("Analytics integration enabled")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize analytics: {e}")
|
||||
self.enabled = False
|
||||
|
||||
def on_execution_start(
|
||||
self,
|
||||
workflow_id: str,
|
||||
execution_id: Optional[str] = None,
|
||||
total_steps: int = 0
|
||||
) -> str:
|
||||
"""
|
||||
Called when workflow execution starts.
|
||||
|
||||
Args:
|
||||
workflow_id: Workflow identifier
|
||||
execution_id: Execution identifier (generated if None)
|
||||
total_steps: Total number of steps
|
||||
|
||||
Returns:
|
||||
Execution ID
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return execution_id or str(uuid.uuid4())
|
||||
|
||||
if execution_id is None:
|
||||
execution_id = str(uuid.uuid4())
|
||||
|
||||
try:
|
||||
# Start real-time tracking
|
||||
self.analytics.realtime_analytics.track_execution(
|
||||
execution_id=execution_id,
|
||||
workflow_id=workflow_id,
|
||||
total_steps=total_steps
|
||||
)
|
||||
|
||||
logger.debug(f"Started tracking execution: {execution_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error starting execution tracking: {e}")
|
||||
|
||||
return execution_id
|
||||
|
||||
def on_step_start(
|
||||
self,
|
||||
execution_id: str,
|
||||
node_id: str,
|
||||
step_number: int
|
||||
) -> None:
|
||||
"""
|
||||
Called when a step starts.
|
||||
|
||||
Args:
|
||||
execution_id: Execution identifier
|
||||
node_id: Node identifier
|
||||
step_number: Step number
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return
|
||||
|
||||
try:
|
||||
# Update progress
|
||||
self.analytics.realtime_analytics.update_progress(
|
||||
execution_id=execution_id,
|
||||
current_step=step_number,
|
||||
current_node_id=node_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating step progress: {e}")
|
||||
|
||||
def on_step_complete(
|
||||
self,
|
||||
execution_id: str,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
action_type: str,
|
||||
started_at: datetime,
|
||||
completed_at: datetime,
|
||||
duration: float,
|
||||
success: bool,
|
||||
error_message: Optional[str] = None
|
||||
) -> None:
|
||||
"""
|
||||
Called when a step completes.
|
||||
|
||||
Args:
|
||||
execution_id: Execution identifier
|
||||
workflow_id: Workflow identifier
|
||||
node_id: Node identifier
|
||||
action_type: Type of action
|
||||
started_at: Start timestamp
|
||||
completed_at: Completion timestamp
|
||||
duration: Duration in seconds
|
||||
success: Whether step succeeded
|
||||
error_message: Error message if failed
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return
|
||||
|
||||
try:
|
||||
# Record step metrics
|
||||
step_metrics = StepMetrics(
|
||||
execution_id=execution_id,
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
action_type=action_type,
|
||||
started_at=started_at,
|
||||
completed_at=completed_at,
|
||||
duration=duration,
|
||||
success=success,
|
||||
error_message=error_message
|
||||
)
|
||||
|
||||
self.analytics.metrics_collector.record_step(step_metrics)
|
||||
|
||||
# Update real-time tracking
|
||||
self.analytics.realtime_analytics.record_step_complete(
|
||||
execution_id=execution_id,
|
||||
success=success
|
||||
)
|
||||
|
||||
logger.debug(f"Recorded step: {node_id} ({'success' if success else 'failed'})")
|
||||
except Exception as e:
|
||||
logger.error(f"Error recording step completion: {e}")
|
||||
|
||||
def on_execution_complete(
|
||||
self,
|
||||
execution_id: str,
|
||||
workflow_id: str,
|
||||
started_at: datetime,
|
||||
completed_at: datetime,
|
||||
duration: float,
|
||||
status: str,
|
||||
error_message: Optional[str] = None,
|
||||
steps_completed: int = 0,
|
||||
steps_failed: int = 0
|
||||
) -> None:
|
||||
"""
|
||||
Called when workflow execution completes.
|
||||
|
||||
Args:
|
||||
execution_id: Execution identifier
|
||||
workflow_id: Workflow identifier
|
||||
started_at: Start timestamp
|
||||
completed_at: Completion timestamp
|
||||
duration: Duration in seconds
|
||||
status: Final status (success, failed, timeout)
|
||||
error_message: Error message if failed
|
||||
steps_completed: Number of steps completed
|
||||
steps_failed: Number of steps failed
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return
|
||||
|
||||
try:
|
||||
# Record execution metrics
|
||||
execution_metrics = ExecutionMetrics(
|
||||
execution_id=execution_id,
|
||||
workflow_id=workflow_id,
|
||||
started_at=started_at,
|
||||
completed_at=completed_at,
|
||||
duration=duration,
|
||||
status=status,
|
||||
error_message=error_message,
|
||||
steps_completed=steps_completed,
|
||||
steps_failed=steps_failed
|
||||
)
|
||||
|
||||
self.analytics.metrics_collector.record_execution(execution_metrics)
|
||||
|
||||
# Flush to ensure persistence
|
||||
self.analytics.metrics_collector.flush()
|
||||
|
||||
# Complete real-time tracking
|
||||
self.analytics.realtime_analytics.complete_execution(
|
||||
execution_id=execution_id,
|
||||
status=status
|
||||
)
|
||||
|
||||
logger.info(f"Recorded execution: {execution_id} ({status})")
|
||||
except Exception as e:
|
||||
logger.error(f"Error recording execution completion: {e}")
|
||||
|
||||
def on_recovery_attempt(
|
||||
self,
|
||||
execution_id: str,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
strategy: str,
|
||||
success: bool,
|
||||
duration: float
|
||||
) -> None:
|
||||
"""
|
||||
Called when self-healing attempts recovery.
|
||||
|
||||
Args:
|
||||
execution_id: Execution identifier
|
||||
workflow_id: Workflow identifier
|
||||
node_id: Node identifier
|
||||
strategy: Recovery strategy used
|
||||
success: Whether recovery succeeded
|
||||
duration: Recovery duration
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return
|
||||
|
||||
try:
|
||||
# Record as a special step metric
|
||||
recovery_metrics = StepMetrics(
|
||||
execution_id=execution_id,
|
||||
workflow_id=workflow_id,
|
||||
node_id=f"{node_id}_recovery",
|
||||
action_type=f"recovery_{strategy}",
|
||||
started_at=datetime.now(),
|
||||
completed_at=datetime.now(),
|
||||
duration=duration,
|
||||
success=success,
|
||||
error_message=None if success else f"Recovery failed: {strategy}"
|
||||
)
|
||||
|
||||
self.analytics.metrics_collector.record_step(recovery_metrics)
|
||||
|
||||
logger.debug(f"Recorded recovery: {strategy} ({'success' if success else 'failed'})")
|
||||
except Exception as e:
|
||||
logger.error(f"Error recording recovery attempt: {e}")
|
||||
|
||||
def get_live_metrics(self, execution_id: str) -> Optional[dict]:
|
||||
"""
|
||||
Get live metrics for an execution.
|
||||
|
||||
Args:
|
||||
execution_id: Execution identifier
|
||||
|
||||
Returns:
|
||||
Live metrics dictionary or None
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return None
|
||||
|
||||
try:
|
||||
return self.analytics.realtime_analytics.get_live_metrics(execution_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting live metrics: {e}")
|
||||
return None
|
||||
|
||||
def get_workflow_stats(self, workflow_id: str, hours: int = 24) -> Optional[dict]:
|
||||
"""
|
||||
Get statistics for a workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: Workflow identifier
|
||||
hours: Time window in hours
|
||||
|
||||
Returns:
|
||||
Statistics dictionary or None
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return None
|
||||
|
||||
try:
|
||||
from datetime import timedelta
|
||||
|
||||
end_time = datetime.now()
|
||||
start_time = end_time - timedelta(hours=hours)
|
||||
|
||||
# Get performance stats
|
||||
perf_stats = self.analytics.performance_analyzer.analyze_performance(
|
||||
workflow_id=workflow_id,
|
||||
start_time=start_time,
|
||||
end_time=end_time
|
||||
)
|
||||
|
||||
# Get success rate
|
||||
success_stats = self.analytics.success_rate_calculator.calculate_success_rate(
|
||||
workflow_id=workflow_id,
|
||||
time_window_hours=hours
|
||||
)
|
||||
|
||||
return {
|
||||
'performance': perf_stats.to_dict(),
|
||||
'success_rate': success_stats.to_dict()
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting workflow stats: {e}")
|
||||
return None
|
||||
|
||||
def start_resource_monitoring(self, execution_id: str) -> None:
|
||||
"""
|
||||
Start monitoring resources for an execution.
|
||||
|
||||
Args:
|
||||
execution_id: Execution identifier
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return
|
||||
|
||||
try:
|
||||
# Tag resource metrics with execution ID
|
||||
self.analytics.collectors.resource.start_monitoring(
|
||||
context={'execution_id': execution_id}
|
||||
)
|
||||
logger.debug(f"Started resource monitoring for: {execution_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error starting resource monitoring: {e}")
|
||||
|
||||
def stop_resource_monitoring(self, execution_id: str) -> None:
|
||||
"""
|
||||
Stop monitoring resources for an execution.
|
||||
|
||||
Args:
|
||||
execution_id: Execution identifier
|
||||
"""
|
||||
if not self.enabled or not self.analytics:
|
||||
return
|
||||
|
||||
try:
|
||||
self.analytics.collectors.resource.stop_monitoring()
|
||||
logger.debug(f"Stopped resource monitoring for: {execution_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error stopping resource monitoring: {e}")
|
||||
|
||||
|
||||
# Global instance
|
||||
_analytics_integration: Optional[AnalyticsExecutionIntegration] = None
|
||||
|
||||
|
||||
def get_analytics_integration(enabled: bool = True) -> AnalyticsExecutionIntegration:
|
||||
"""
|
||||
Get or create global analytics integration instance.
|
||||
|
||||
Args:
|
||||
enabled: Whether analytics is enabled
|
||||
|
||||
Returns:
|
||||
AnalyticsExecutionIntegration instance
|
||||
"""
|
||||
global _analytics_integration
|
||||
|
||||
if _analytics_integration is None:
|
||||
_analytics_integration = AnalyticsExecutionIntegration(enabled=enabled)
|
||||
|
||||
return _analytics_integration
|
||||
Reference in New Issue
Block a user