v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
282
demo_integrated_execution.py
Executable file
282
demo_integrated_execution.py
Executable file
@@ -0,0 +1,282 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Demo of integrated execution with analytics and self-healing."""
|
||||
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
from core.analytics.integration import get_analytics_integration
|
||||
|
||||
|
||||
@dataclass
|
||||
class MockNode:
|
||||
"""Mock workflow node."""
|
||||
node_id: str
|
||||
action_type: str
|
||||
should_fail: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class MockWorkflow:
|
||||
"""Mock workflow."""
|
||||
workflow_id: str
|
||||
nodes: list
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionResult:
|
||||
"""Execution result."""
|
||||
success: bool
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class IntegratedExecutionDemo:
|
||||
"""Demo of integrated execution."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize demo."""
|
||||
self.analytics = get_analytics_integration(enabled=True)
|
||||
self.current_execution_id = None
|
||||
self.current_workflow_id = None
|
||||
|
||||
print("=" * 60)
|
||||
print("Integrated Execution Demo")
|
||||
print("=" * 60)
|
||||
|
||||
def execute_workflow(self, workflow: MockWorkflow) -> bool:
|
||||
"""
|
||||
Execute workflow with full analytics integration.
|
||||
|
||||
Args:
|
||||
workflow: Workflow to execute
|
||||
|
||||
Returns:
|
||||
True if successful
|
||||
"""
|
||||
print(f"\n🚀 Executing workflow: {workflow.workflow_id}")
|
||||
print(f" Total steps: {len(workflow.nodes)}")
|
||||
|
||||
# 1. Start tracking
|
||||
self.current_workflow_id = workflow.workflow_id
|
||||
self.current_execution_id = self.analytics.on_execution_start(
|
||||
workflow_id=workflow.workflow_id,
|
||||
total_steps=len(workflow.nodes)
|
||||
)
|
||||
|
||||
print(f" Execution ID: {self.current_execution_id}")
|
||||
|
||||
started_at = datetime.now()
|
||||
steps_completed = 0
|
||||
steps_failed = 0
|
||||
|
||||
try:
|
||||
# 2. Execute steps
|
||||
for i, node in enumerate(workflow.nodes):
|
||||
success = self._execute_step(node, i + 1)
|
||||
|
||||
if success:
|
||||
steps_completed += 1
|
||||
else:
|
||||
steps_failed += 1
|
||||
|
||||
# Show live metrics
|
||||
live_metrics = self.analytics.get_live_metrics(self.current_execution_id)
|
||||
if live_metrics:
|
||||
print(f" Progress: {live_metrics['progress_percent']:.1f}%")
|
||||
|
||||
time.sleep(0.5) # Simulate work
|
||||
|
||||
# 3. Complete successfully
|
||||
completed_at = datetime.now()
|
||||
duration = (completed_at - started_at).total_seconds()
|
||||
|
||||
self.analytics.on_execution_complete(
|
||||
execution_id=self.current_execution_id,
|
||||
workflow_id=workflow.workflow_id,
|
||||
started_at=started_at,
|
||||
completed_at=completed_at,
|
||||
duration=duration,
|
||||
status='success',
|
||||
steps_completed=steps_completed,
|
||||
steps_failed=steps_failed
|
||||
)
|
||||
|
||||
print(f"\n✅ Workflow completed successfully!")
|
||||
print(f" Duration: {duration:.2f}s")
|
||||
print(f" Steps completed: {steps_completed}")
|
||||
print(f" Steps failed: {steps_failed}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
# 4. Complete with failure
|
||||
completed_at = datetime.now()
|
||||
duration = (completed_at - started_at).total_seconds()
|
||||
|
||||
self.analytics.on_execution_complete(
|
||||
execution_id=self.current_execution_id,
|
||||
workflow_id=workflow.workflow_id,
|
||||
started_at=started_at,
|
||||
completed_at=completed_at,
|
||||
duration=duration,
|
||||
status='failed',
|
||||
error_message=str(e),
|
||||
steps_completed=steps_completed,
|
||||
steps_failed=steps_failed
|
||||
)
|
||||
|
||||
print(f"\n❌ Workflow failed: {e}")
|
||||
print(f" Duration: {duration:.2f}s")
|
||||
print(f" Steps completed: {steps_completed}")
|
||||
print(f" Steps failed: {steps_failed}")
|
||||
|
||||
return False
|
||||
|
||||
def _execute_step(self, node: MockNode, step_number: int) -> bool:
|
||||
"""
|
||||
Execute a single step with analytics.
|
||||
|
||||
Args:
|
||||
node: Node to execute
|
||||
step_number: Step number
|
||||
|
||||
Returns:
|
||||
True if successful
|
||||
"""
|
||||
print(f"\n Step {step_number}: {node.node_id} ({node.action_type})")
|
||||
|
||||
# Notify step start
|
||||
self.analytics.on_step_start(
|
||||
execution_id=self.current_execution_id,
|
||||
node_id=node.node_id,
|
||||
step_number=step_number
|
||||
)
|
||||
|
||||
step_start = datetime.now()
|
||||
|
||||
# Simulate execution
|
||||
time.sleep(0.3)
|
||||
|
||||
# Determine result
|
||||
if node.should_fail:
|
||||
success = False
|
||||
error_msg = f"Step {node.node_id} failed (simulated)"
|
||||
print(f" ❌ Failed: {error_msg}")
|
||||
else:
|
||||
success = True
|
||||
error_msg = None
|
||||
print(f" ✅ Success")
|
||||
|
||||
# Notify step complete
|
||||
step_end = datetime.now()
|
||||
self.analytics.on_step_complete(
|
||||
execution_id=self.current_execution_id,
|
||||
workflow_id=self.current_workflow_id,
|
||||
node_id=node.node_id,
|
||||
action_type=node.action_type,
|
||||
started_at=step_start,
|
||||
completed_at=step_end,
|
||||
duration=(step_end - step_start).total_seconds(),
|
||||
success=success,
|
||||
error_message=error_msg
|
||||
)
|
||||
|
||||
return success
|
||||
|
||||
def show_workflow_stats(self, workflow_id: str):
|
||||
"""Show workflow statistics."""
|
||||
print(f"\n📊 Workflow Statistics: {workflow_id}")
|
||||
print("=" * 60)
|
||||
|
||||
stats = self.analytics.get_workflow_stats(workflow_id, hours=1)
|
||||
|
||||
if stats:
|
||||
perf = stats['performance']
|
||||
success = stats['success_rate']
|
||||
|
||||
print(f"\nPerformance:")
|
||||
print(f" Average Duration: {perf['avg_duration']:.2f}s")
|
||||
print(f" Median Duration: {perf['median_duration']:.2f}s")
|
||||
print(f" P95 Duration: {perf['p95_duration']:.2f}s")
|
||||
print(f" P99 Duration: {perf['p99_duration']:.2f}s")
|
||||
|
||||
print(f"\nSuccess Rate:")
|
||||
print(f" Total Executions: {success['total_executions']}")
|
||||
print(f" Successful: {success['successful_executions']}")
|
||||
print(f" Failed: {success['failed_executions']}")
|
||||
print(f" Success Rate: {success['success_rate']:.1f}%")
|
||||
print(f" Reliability Score: {success['reliability_score']:.1f}")
|
||||
|
||||
if success['failure_categories']:
|
||||
print(f"\nFailure Categories:")
|
||||
for category, count in success['failure_categories'].items():
|
||||
print(f" - {category}: {count}")
|
||||
else:
|
||||
print(" No statistics available yet")
|
||||
|
||||
|
||||
def main():
|
||||
"""Run demo."""
|
||||
demo = IntegratedExecutionDemo()
|
||||
|
||||
# Create test workflows
|
||||
workflows = [
|
||||
MockWorkflow(
|
||||
workflow_id="demo_workflow_1",
|
||||
nodes=[
|
||||
MockNode("step_1", "click"),
|
||||
MockNode("step_2", "type"),
|
||||
MockNode("step_3", "click"),
|
||||
]
|
||||
),
|
||||
MockWorkflow(
|
||||
workflow_id="demo_workflow_1",
|
||||
nodes=[
|
||||
MockNode("step_1", "click"),
|
||||
MockNode("step_2", "type"),
|
||||
MockNode("step_3", "click"),
|
||||
MockNode("step_4", "wait"),
|
||||
]
|
||||
),
|
||||
MockWorkflow(
|
||||
workflow_id="demo_workflow_1",
|
||||
nodes=[
|
||||
MockNode("step_1", "click"),
|
||||
MockNode("step_2", "type", should_fail=True), # This will fail
|
||||
MockNode("step_3", "click"),
|
||||
]
|
||||
),
|
||||
]
|
||||
|
||||
# Execute workflows
|
||||
for i, workflow in enumerate(workflows, 1):
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Execution {i}/{len(workflows)}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
demo.execute_workflow(workflow)
|
||||
time.sleep(1)
|
||||
|
||||
# Show statistics
|
||||
demo.show_workflow_stats("demo_workflow_1")
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print("Demo Complete!")
|
||||
print(f"{'='*60}")
|
||||
print("\nNext Steps:")
|
||||
print(" 1. Check the metrics database: data/analytics/metrics.db")
|
||||
print(" 2. View analytics: python demo_analytics.py")
|
||||
print(" 3. Generate reports: see ANALYTICS_QUICKSTART.md")
|
||||
print(" 4. Integrate with your ExecutionLoop: see ANALYTICS_INTEGRATION_GUIDE.md")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("\n\nDemo interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\n\nError during demo: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
Reference in New Issue
Block a user