Files
rpa_vision_v3/tests/integration/test_workflow_pipeline_enhanced.py
Dom a27b74cf22 v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40)
- Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard)
- Ollama GPU fonctionnel
- Self-healing interactif
- Dashboard confiance

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-29 11:23:51 +01:00

418 lines
17 KiB
Python

"""
Tests d'intégration pour WorkflowPipeline avec ExecutionResult amélioré
Auteur: Dom, Alice Kiro - 20 décembre 2024
"""
import pytest
import uuid
from datetime import datetime
from unittest.mock import Mock, patch, MagicMock
from pathlib import Path
from core.pipeline.workflow_pipeline import WorkflowPipeline
from core.pipeline.workflow_pipeline_enhanced import WorkflowPipelineEnhanced, migrate_execute_workflow_step
from core.models.screen_state import ScreenState, WindowContext, RawLevel, PerceptionLevel, ContextLevel, EmbeddingRef
from core.models.execution_result import WorkflowExecutionResult, StepExecutionStatus
from core.models.workflow_graph import Workflow, WorkflowNode, WorkflowEdge, Action
from core.execution.action_executor import ExecutionResult, ExecutionStatus
from core.execution.error_handler import RecoveryResult, RecoveryStrategy
class TestWorkflowPipelineEnhanced:
"""Tests d'intégration pour WorkflowPipeline avec ExecutionResult amélioré"""
@pytest.fixture
def mock_screen_state(self):
"""Créer un ScreenState mock pour les tests"""
window = WindowContext(
app_name="test_app",
window_title="Test Window",
screen_resolution=[1920, 1080],
workspace="main"
)
raw = RawLevel(
screenshot_path="/tmp/test_screenshot.png",
capture_method="test",
file_size_bytes=1024
)
perception = PerceptionLevel(
embedding=EmbeddingRef(
provider="test",
vector_id="test_vector",
dimensions=512
),
detected_text=[],
text_detection_method="test",
confidence_avg=0.9
)
context = ContextLevel(
current_workflow_candidate="test_workflow",
workflow_step=None,
user_id="test_user",
tags=[],
business_variables={}
)
return ScreenState(
screen_state_id="test_state",
timestamp=datetime.now(),
session_id="test_session",
window=window,
raw=raw,
perception=perception,
context=context,
ui_elements=[]
)
@pytest.fixture
def mock_workflow_pipeline(self):
"""Créer un WorkflowPipeline mock pour les tests"""
pipeline = Mock(spec=WorkflowPipeline)
# Mock des composants nécessaires
pipeline.error_handler = Mock()
pipeline.action_executor = Mock()
# Mock des méthodes nécessaires
pipeline.match_current_state = Mock()
pipeline.get_next_action = Mock()
pipeline.load_workflow = Mock()
return pipeline
def test_execute_workflow_step_enhanced_success_returns_complete_metadata(self, mock_workflow_pipeline, mock_screen_state):
"""Test que execute_workflow_step_enhanced retourne des métadonnées complètes en cas de succès"""
# Arrange
workflow_id = "test_workflow"
# Mock du match result
mock_workflow_pipeline.match_current_state.return_value = {
"node_id": "node_1",
"workflow_id": workflow_id,
"confidence": 0.92
}
# Mock de l'action suivante
mock_workflow_pipeline.get_next_action.return_value = {
"edge_id": "edge_1",
"action": {"type": "click", "target": "button"},
"target_node": "node_2",
"confidence": 0.95
}
# Mock du workflow
mock_workflow = Mock(spec=Workflow)
mock_edge = Mock(spec=WorkflowEdge)
mock_edge.edge_id = "edge_1"
mock_edge.from_node = "node_1"
mock_edge.to_node = "node_2"
mock_workflow.edges = [mock_edge]
mock_workflow_pipeline.load_workflow.return_value = mock_workflow
# Mock du résultat d'exécution
mock_execution_result = Mock(spec=ExecutionResult)
mock_execution_result.status = ExecutionStatus.SUCCESS
mock_execution_result.message = "Action executed successfully"
mock_execution_result.duration_ms = 150.0
mock_execution_result.target_resolved = None
mock_execution_result.error = None
mock_workflow_pipeline.action_executor.execute_edge.return_value = mock_execution_result
# Créer l'instance enhanced
enhanced = WorkflowPipelineEnhanced()
# Lier les méthodes du pipeline mock
enhanced.match_current_state = mock_workflow_pipeline.match_current_state
enhanced.get_next_action = mock_workflow_pipeline.get_next_action
enhanced.load_workflow = mock_workflow_pipeline.load_workflow
enhanced.action_executor = mock_workflow_pipeline.action_executor
enhanced.error_handler = mock_workflow_pipeline.error_handler
# Act
result = enhanced.execute_workflow_step_enhanced(
workflow_id=workflow_id,
current_state=mock_screen_state,
context={"test_context": "value"}
)
# Assert
assert isinstance(result, WorkflowExecutionResult)
assert result.success is True
assert result.status == StepExecutionStatus.SUCCESS
assert result.workflow_id == workflow_id
assert result.execution_id is not None
assert result.correlation_id is not None
assert result.correlation_id != result.execution_id # Doivent être différents
# Vérifier les métadonnées de performance
assert result.performance_metrics is not None
assert result.performance_metrics.total_execution_time_ms > 0
assert result.performance_metrics.state_matching_time_ms >= 0
assert result.performance_metrics.action_execution_time_ms >= 0
# Vérifier les détails d'exécution
assert "action_confidence" in result.execution_details
assert "match_confidence" in result.execution_details
assert "execution_context" in result.execution_details
assert result.execution_details["execution_context"]["test_context"] == "value"
# Vérifier l'action exécutée
assert result.action_executed is not None
assert result.action_executed["type"] == "click"
assert result.action_executed["execution_status"] == ExecutionStatus.SUCCESS.value
# Vérifier le match result
assert result.match_result is not None
assert result.match_result["node_id"] == "node_1"
assert result.match_result["confidence"] == 0.92
def test_execute_workflow_step_enhanced_no_match_returns_recovery_info(self, mock_workflow_pipeline, mock_screen_state):
"""Test que execute_workflow_step_enhanced gère correctement l'absence de match avec récupération"""
# Arrange
workflow_id = "test_workflow"
# Mock du match result (pas de match)
mock_workflow_pipeline.match_current_state.return_value = None
# Mock du workflow pour la récupération
mock_workflow = Mock(spec=Workflow)
mock_workflow.nodes = []
mock_workflow_pipeline.load_workflow.return_value = mock_workflow
# Mock de la récupération
mock_recovery_result = Mock(spec=RecoveryResult)
mock_recovery_result.strategy_used = RecoveryStrategy.HIERARCHICAL_MATCHING
mock_recovery_result.message = "Applied hierarchical matching fallback"
mock_recovery_result.success = False
mock_workflow_pipeline.error_handler.handle_matching_failure.return_value = mock_recovery_result
# Créer l'instance enhanced
enhanced = WorkflowPipelineEnhanced()
# Lier les méthodes du pipeline mock
enhanced.match_current_state = mock_workflow_pipeline.match_current_state
enhanced.load_workflow = mock_workflow_pipeline.load_workflow
enhanced.error_handler = mock_workflow_pipeline.error_handler
# Act
result = enhanced.execute_workflow_step_enhanced(
workflow_id=workflow_id,
current_state=mock_screen_state
)
# Assert
assert isinstance(result, WorkflowExecutionResult)
assert result.success is False
assert result.status == StepExecutionStatus.NO_MATCH
assert result.workflow_id == workflow_id
assert result.execution_id is not None
assert result.correlation_id is not None
# Vérifier les informations de récupération
assert result.recovery_applied is not None
assert result.recovery_applied.strategy == RecoveryStrategy.HIERARCHICAL_MATCHING.value
assert result.recovery_applied.message == "Applied hierarchical matching fallback"
assert result.recovery_applied.success is False
assert result.recovery_applied.attempts == 1
assert result.recovery_applied.duration_ms >= 0
# Vérifier les métriques de performance
assert result.performance_metrics is not None
assert result.performance_metrics.total_execution_time_ms > 0
assert result.performance_metrics.state_matching_time_ms >= 0
assert result.performance_metrics.error_handling_time_ms >= 0
# Vérifier que l'état actuel est préservé
assert result.current_state == mock_screen_state
def test_execute_workflow_step_enhanced_workflow_complete(self, mock_workflow_pipeline, mock_screen_state):
"""Test que execute_workflow_step_enhanced gère correctement la fin de workflow"""
# Arrange
workflow_id = "test_workflow"
# Mock du match result
mock_workflow_pipeline.match_current_state.return_value = {
"node_id": "final_node",
"workflow_id": workflow_id,
"confidence": 0.95
}
# Mock de l'action suivante (pas d'action = workflow terminé)
mock_workflow_pipeline.get_next_action.return_value = None
# Créer l'instance enhanced
enhanced = WorkflowPipelineEnhanced()
# Lier les méthodes du pipeline mock
enhanced.match_current_state = mock_workflow_pipeline.match_current_state
enhanced.get_next_action = mock_workflow_pipeline.get_next_action
# Act
result = enhanced.execute_workflow_step_enhanced(
workflow_id=workflow_id,
current_state=mock_screen_state
)
# Assert
assert isinstance(result, WorkflowExecutionResult)
assert result.success is True
assert result.status == StepExecutionStatus.WORKFLOW_COMPLETE
assert result.workflow_id == workflow_id
assert result.current_node == "final_node"
assert result.execution_id is not None
assert result.correlation_id is not None
# Vérifier les métriques de performance
assert result.performance_metrics is not None
assert result.performance_metrics.total_execution_time_ms > 0
assert result.performance_metrics.state_matching_time_ms >= 0
# Vérifier le match result
assert result.match_result is not None
assert result.match_result["node_id"] == "final_node"
def test_execute_workflow_step_enhanced_exception_handling(self, mock_workflow_pipeline, mock_screen_state):
"""Test que execute_workflow_step_enhanced gère correctement les exceptions"""
# Arrange
workflow_id = "test_workflow"
# Mock qui lève une exception
mock_workflow_pipeline.match_current_state.side_effect = Exception("Test exception")
# Mock de l'error handler
mock_workflow_pipeline.error_handler.error_history = []
mock_workflow_pipeline.error_handler._log_error = Mock()
# Créer l'instance enhanced
enhanced = WorkflowPipelineEnhanced()
# Lier les méthodes du pipeline mock
enhanced.match_current_state = mock_workflow_pipeline.match_current_state
enhanced.error_handler = mock_workflow_pipeline.error_handler
# Act
result = enhanced.execute_workflow_step_enhanced(
workflow_id=workflow_id,
current_state=mock_screen_state,
context={"test": "context"}
)
# Assert
assert isinstance(result, WorkflowExecutionResult)
assert result.success is False
assert result.status == StepExecutionStatus.EXECUTION_ERROR
assert result.workflow_id == workflow_id
assert result.error == "Test exception"
assert result.execution_id is not None
assert result.correlation_id is not None
# Vérifier les détails d'exception
assert "exception_type" in result.execution_details
assert result.execution_details["exception_type"] == "Exception"
assert "execution_context" in result.execution_details
# Vérifier que l'error handler a été appelé
assert len(mock_workflow_pipeline.error_handler.error_history) == 1
mock_workflow_pipeline.error_handler._log_error.assert_called_once()
def test_migrate_execute_workflow_step_replaces_method(self):
"""Test que migrate_execute_workflow_step remplace correctement la méthode"""
# Arrange
mock_pipeline = Mock(spec=WorkflowPipeline)
original_method = Mock()
mock_pipeline.execute_workflow_step = original_method
# Act
migrated_pipeline = migrate_execute_workflow_step(mock_pipeline)
# Assert
assert migrated_pipeline == mock_pipeline # Même instance
assert hasattr(mock_pipeline, '_execute_workflow_step_legacy')
assert mock_pipeline._execute_workflow_step_legacy == original_method
assert mock_pipeline.execute_workflow_step != original_method # Méthode remplacée
def test_serialization_preserves_all_metadata(self, mock_workflow_pipeline, mock_screen_state):
"""Test que la sérialisation préserve toutes les métadonnées"""
# Arrange
workflow_id = "test_workflow"
# Mock du match result
mock_workflow_pipeline.match_current_state.return_value = {
"node_id": "node_1",
"workflow_id": workflow_id,
"confidence": 0.92
}
# Mock de l'action suivante
mock_workflow_pipeline.get_next_action.return_value = {
"edge_id": "edge_1",
"action": {"type": "click", "target": "button"},
"target_node": "node_2",
"confidence": 0.95
}
# Mock du workflow
mock_workflow = Mock(spec=Workflow)
mock_edge = Mock(spec=WorkflowEdge)
mock_edge.edge_id = "edge_1"
mock_edge.from_node = "node_1"
mock_edge.to_node = "node_2"
mock_workflow.edges = [mock_edge]
mock_workflow_pipeline.load_workflow.return_value = mock_workflow
# Mock du résultat d'exécution
mock_execution_result = Mock(spec=ExecutionResult)
mock_execution_result.status = ExecutionStatus.SUCCESS
mock_execution_result.message = "Action executed successfully"
mock_execution_result.duration_ms = 150.0
mock_execution_result.target_resolved = None
mock_execution_result.error = None
mock_workflow_pipeline.action_executor.execute_edge.return_value = mock_execution_result
# Créer l'instance enhanced
enhanced = WorkflowPipelineEnhanced()
# Lier les méthodes du pipeline mock
enhanced.match_current_state = mock_workflow_pipeline.match_current_state
enhanced.get_next_action = mock_workflow_pipeline.get_next_action
enhanced.load_workflow = mock_workflow_pipeline.load_workflow
enhanced.action_executor = mock_workflow_pipeline.action_executor
enhanced.error_handler = mock_workflow_pipeline.error_handler
# Act
result = enhanced.execute_workflow_step_enhanced(
workflow_id=workflow_id,
current_state=mock_screen_state,
context={"custom_data": "test_value"}
)
# Sérialiser
result_dict = result.to_dict()
# Assert - Vérifier que toutes les métadonnées critiques sont présentes
assert "execution_id" in result_dict
assert "workflow_id" in result_dict
assert "correlation_id" in result_dict
assert "success" in result_dict
assert "status" in result_dict
assert "performance_metrics" in result_dict
assert "match_result" in result_dict
assert "action_executed" in result_dict
assert "execution_details" in result_dict
# Vérifier les métriques de performance
perf_metrics = result_dict["performance_metrics"]
assert "total_execution_time_ms" in perf_metrics
assert "state_matching_time_ms" in perf_metrics
assert "action_execution_time_ms" in perf_metrics
# Vérifier les détails d'exécution personnalisés
exec_details = result_dict["execution_details"]
assert "action_confidence" in exec_details
assert "match_confidence" in exec_details
assert "execution_context" in exec_details
assert exec_details["execution_context"]["custom_data"] == "test_value"