- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
523 lines
20 KiB
Python
523 lines
20 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Tests de Propriété pour RealtimeValidationService - RPA Vision V3
|
|
|
|
Ces tests vérifient les propriétés universelles du service de validation en temps réel
|
|
en utilisant des tests basés sur les propriétés avec Hypothesis.
|
|
|
|
Feature: visual-rpa-properties-enhancement
|
|
Property 14: Validation Périodique Automatique
|
|
Property 15: Récupération Intelligente d'Éléments
|
|
|
|
Auteur: Assistant IA
|
|
Date: 2026-01-07
|
|
"""
|
|
|
|
import pytest
|
|
import asyncio
|
|
import threading
|
|
import time
|
|
from datetime import datetime, timedelta
|
|
from unittest.mock import Mock, AsyncMock, patch
|
|
from hypothesis import given, strategies as st, assume, settings
|
|
import numpy as np
|
|
|
|
from core.visual.realtime_validation_service import (
|
|
RealtimeValidationService,
|
|
ValidationStatus,
|
|
ValidationResult,
|
|
ValidationConfig
|
|
)
|
|
from core.visual.visual_target_manager import VisualTarget
|
|
from core.models import UIElement, BBox
|
|
|
|
|
|
class TestRealtimeValidationServiceProperties:
|
|
"""Tests de propriété pour RealtimeValidationService"""
|
|
|
|
@pytest.fixture
|
|
def mock_dependencies(self):
|
|
"""Fixture pour créer les dépendances mockées"""
|
|
screen_capturer = Mock()
|
|
screen_capturer.capture_screen = AsyncMock()
|
|
|
|
ui_detector = Mock()
|
|
ui_detector.detect_elements = AsyncMock()
|
|
|
|
embedding_manager = Mock()
|
|
embedding_manager.find_best_match = AsyncMock()
|
|
|
|
target_manager = Mock()
|
|
target_manager.update_target_screenshot = AsyncMock()
|
|
|
|
return {
|
|
'screen_capturer': screen_capturer,
|
|
'ui_detector': ui_detector,
|
|
'embedding_manager': embedding_manager,
|
|
'target_manager': target_manager
|
|
}
|
|
|
|
@pytest.fixture
|
|
def validation_service(self, mock_dependencies):
|
|
"""Fixture pour créer le service de validation"""
|
|
return RealtimeValidationService(**mock_dependencies)
|
|
|
|
@pytest.fixture
|
|
def sample_visual_target(self):
|
|
"""Fixture pour créer une cible visuelle de test"""
|
|
return VisualTarget(
|
|
embedding=np.random.rand(256).astype(np.float32),
|
|
screenshot="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==",
|
|
bounding_box=BoundingBox(x=100, y=100, width=50, height=30),
|
|
confidence=0.85,
|
|
signature="test_signature_123",
|
|
metadata=Mock(),
|
|
contextual_info=Mock()
|
|
)
|
|
|
|
@given(
|
|
validation_interval=st.floats(min_value=0.1, max_value=10.0),
|
|
num_validations=st.integers(min_value=1, max_value=10)
|
|
)
|
|
@settings(max_examples=20, deadline=10000)
|
|
def test_property_14_periodic_automatic_validation(
|
|
self,
|
|
validation_service,
|
|
sample_visual_target,
|
|
validation_interval,
|
|
num_validations
|
|
):
|
|
"""
|
|
Property 14: Validation Périodique Automatique
|
|
|
|
Pour tout élément configuré, le système doit vérifier périodiquement
|
|
sa présence et afficher des indicateurs de statut appropriés.
|
|
|
|
Valide: Exigences 6.1, 6.2, 6.3
|
|
"""
|
|
element_id = "test_element_periodic"
|
|
validation_results = []
|
|
|
|
def validation_callback(result):
|
|
validation_results.append(result)
|
|
|
|
# Configuration de validation
|
|
config = ValidationConfig(
|
|
target=sample_visual_target,
|
|
validation_interval=validation_interval,
|
|
callback=validation_callback
|
|
)
|
|
|
|
# Mock des réponses de validation
|
|
mock_screen_state = Mock()
|
|
mock_screen_state.ui_elements = [Mock()]
|
|
|
|
validation_service.screen_capturer.capture_screen.return_value = Mock()
|
|
validation_service.ui_detector.detect_elements.return_value = mock_screen_state
|
|
|
|
# Simuler des résultats de validation variables
|
|
match_results = []
|
|
for i in range(num_validations):
|
|
match_result = Mock()
|
|
match_result.confidence = 0.8 + (i % 3) * 0.1
|
|
match_result.element = Mock()
|
|
match_results.append(match_result)
|
|
|
|
validation_service.embedding_manager.find_best_match.side_effect = match_results
|
|
|
|
# Démarrer la validation
|
|
success = validation_service.start_validation(element_id, sample_visual_target, config)
|
|
assert success
|
|
|
|
# Attendre plusieurs cycles de validation
|
|
expected_validations = min(num_validations, 3) # Limiter pour les tests
|
|
wait_time = validation_interval * expected_validations + 0.5
|
|
|
|
time.sleep(wait_time)
|
|
|
|
# Arrêter la validation
|
|
validation_service.stop_validation(element_id)
|
|
|
|
# Vérifier que des validations ont eu lieu
|
|
assert len(validation_results) >= 1
|
|
|
|
# Vérifier que toutes les validations ont des propriétés valides
|
|
for result in validation_results:
|
|
assert isinstance(result, ValidationResult)
|
|
assert result.status in [ValidationStatus.VALID, ValidationStatus.WARNING, ValidationStatus.ERROR]
|
|
assert 0.0 <= result.confidence <= 1.0
|
|
assert isinstance(result.timestamp, datetime)
|
|
|
|
# Vérifier que les validations sont espacées correctement
|
|
if len(validation_results) > 1:
|
|
time_diffs = []
|
|
for i in range(1, len(validation_results)):
|
|
diff = (validation_results[i].timestamp - validation_results[i-1].timestamp).total_seconds()
|
|
time_diffs.append(diff)
|
|
|
|
# Les intervalles doivent être proches de l'intervalle configuré (±50%)
|
|
for diff in time_diffs:
|
|
assert validation_interval * 0.5 <= diff <= validation_interval * 2.0
|
|
|
|
@given(
|
|
confidence_threshold=st.floats(min_value=0.1, max_value=0.9),
|
|
initial_confidence=st.floats(min_value=0.0, max_value=1.0),
|
|
recovery_confidence=st.floats(min_value=0.0, max_value=1.0)
|
|
)
|
|
@settings(max_examples=25, deadline=5000)
|
|
def test_property_15_intelligent_element_recovery(
|
|
self,
|
|
validation_service,
|
|
sample_visual_target,
|
|
confidence_threshold,
|
|
initial_confidence,
|
|
recovery_confidence
|
|
):
|
|
"""
|
|
Property 15: Récupération Intelligente d'Éléments
|
|
|
|
Pour tout élément qui change d'apparence ou disparaît, le système doit
|
|
proposer des actions de récupération (mise à jour ou re-sélection).
|
|
|
|
Valide: Exigences 6.4, 6.5
|
|
"""
|
|
assume(initial_confidence != recovery_confidence) # Assurer un changement
|
|
|
|
element_id = "test_element_recovery"
|
|
validation_results = []
|
|
|
|
def validation_callback(result):
|
|
validation_results.append(result)
|
|
|
|
# Configuration avec récupération automatique
|
|
config = ValidationConfig(
|
|
target=sample_visual_target,
|
|
validation_interval=0.5,
|
|
confidence_threshold=confidence_threshold,
|
|
auto_recovery=True,
|
|
callback=validation_callback
|
|
)
|
|
|
|
# Mock des dépendances
|
|
mock_screen_state = Mock()
|
|
mock_screen_state.ui_elements = [Mock()]
|
|
|
|
validation_service.screen_capturer.capture_screen.return_value = Mock()
|
|
validation_service.ui_detector.detect_elements.return_value = mock_screen_state
|
|
|
|
# Simuler un scénario de récupération
|
|
match_results = []
|
|
|
|
# Premier résultat avec confiance initiale
|
|
first_match = Mock() if initial_confidence > 0 else None
|
|
if first_match:
|
|
first_match.confidence = initial_confidence
|
|
first_match.element = Mock()
|
|
match_results.append(first_match)
|
|
|
|
# Deuxième résultat avec confiance de récupération
|
|
second_match = Mock() if recovery_confidence > 0 else None
|
|
if second_match:
|
|
second_match.confidence = recovery_confidence
|
|
second_match.element = Mock()
|
|
match_results.append(second_match)
|
|
|
|
validation_service.embedding_manager.find_best_match.side_effect = match_results
|
|
|
|
# Mock des méthodes de récupération
|
|
validation_service.target_manager.update_target_screenshot.return_value = sample_visual_target
|
|
|
|
# Démarrer la validation
|
|
success = validation_service.start_validation(element_id, sample_visual_target, config)
|
|
assert success
|
|
|
|
# Attendre les validations
|
|
time.sleep(1.5) # Permettre au moins 2 validations
|
|
|
|
# Arrêter la validation
|
|
validation_service.stop_validation(element_id)
|
|
|
|
# Analyser les résultats
|
|
assert len(validation_results) >= 1
|
|
|
|
# Vérifier la logique de récupération
|
|
for result in validation_results:
|
|
if result.confidence < confidence_threshold:
|
|
# Si la confiance est faible, des actions de récupération doivent être proposées
|
|
assert len(result.recovery_actions) > 0 or len(result.suggestions) > 0
|
|
|
|
# Les actions de récupération doivent être appropriées
|
|
valid_actions = ['re_select', 'update_target', 'expand_search']
|
|
for action in result.recovery_actions:
|
|
assert action in valid_actions
|
|
|
|
# Vérifier que le statut correspond à la confiance
|
|
if result.confidence >= confidence_threshold:
|
|
assert result.status in [ValidationStatus.VALID, ValidationStatus.WARNING]
|
|
else:
|
|
assert result.status == ValidationStatus.ERROR
|
|
|
|
@given(
|
|
num_concurrent_validations=st.integers(min_value=1, max_value=5),
|
|
validation_duration=st.floats(min_value=0.5, max_value=2.0)
|
|
)
|
|
@settings(max_examples=15, deadline=8000)
|
|
def test_property_concurrent_validation_safety(
|
|
self,
|
|
validation_service,
|
|
num_concurrent_validations,
|
|
validation_duration
|
|
):
|
|
"""
|
|
Propriété: Sécurité des Validations Concurrentes
|
|
|
|
Pour tout ensemble de validations simultanées, le service doit
|
|
maintenir la cohérence des données sans corruption.
|
|
"""
|
|
element_ids = [f"element_{i}" for i in range(num_concurrent_validations)]
|
|
all_results = {eid: [] for eid in element_ids}
|
|
|
|
# Créer des cibles uniques pour chaque élément
|
|
targets = []
|
|
for i in range(num_concurrent_validations):
|
|
target = VisualTarget(
|
|
embedding=np.random.rand(256).astype(np.float32),
|
|
screenshot=f"screenshot_{i}",
|
|
bounding_box=BoundingBox(x=i*100, y=i*50, width=50, height=30),
|
|
confidence=0.8,
|
|
signature=f"signature_{i}",
|
|
metadata=Mock(),
|
|
contextual_info=Mock()
|
|
)
|
|
targets.append(target)
|
|
|
|
# Callbacks pour collecter les résultats
|
|
def create_callback(element_id):
|
|
def callback(result):
|
|
all_results[element_id].append(result)
|
|
return callback
|
|
|
|
# Mock des dépendances
|
|
mock_screen_state = Mock()
|
|
mock_screen_state.ui_elements = [Mock() for _ in range(num_concurrent_validations)]
|
|
|
|
validation_service.screen_capturer.capture_screen.return_value = Mock()
|
|
validation_service.ui_detector.detect_elements.return_value = mock_screen_state
|
|
|
|
# Mock des résultats de matching
|
|
def mock_find_best_match(embedding, candidates):
|
|
# Retourner un résultat basé sur l'embedding
|
|
match = Mock()
|
|
match.confidence = 0.7 + (hash(str(embedding)) % 3) * 0.1
|
|
match.element = Mock()
|
|
return match
|
|
|
|
validation_service.embedding_manager.find_best_match.side_effect = mock_find_best_match
|
|
|
|
# Démarrer toutes les validations
|
|
started_validations = []
|
|
for i, element_id in enumerate(element_ids):
|
|
config = ValidationConfig(
|
|
target=targets[i],
|
|
validation_interval=0.3,
|
|
callback=create_callback(element_id)
|
|
)
|
|
|
|
success = validation_service.start_validation(element_id, targets[i], config)
|
|
if success:
|
|
started_validations.append(element_id)
|
|
|
|
# Vérifier que toutes les validations ont démarré
|
|
assert len(started_validations) == num_concurrent_validations
|
|
|
|
# Attendre la durée de validation
|
|
time.sleep(validation_duration)
|
|
|
|
# Arrêter toutes les validations
|
|
for element_id in started_validations:
|
|
validation_service.stop_validation(element_id)
|
|
|
|
# Vérifier l'intégrité des résultats
|
|
for element_id in started_validations:
|
|
results = all_results[element_id]
|
|
|
|
# Chaque validation doit avoir produit au moins un résultat
|
|
assert len(results) >= 1
|
|
|
|
# Vérifier que tous les résultats sont valides
|
|
for result in results:
|
|
assert isinstance(result, ValidationResult)
|
|
assert result.status in ValidationStatus
|
|
assert 0.0 <= result.confidence <= 1.0
|
|
|
|
# Vérifier qu'il n'y a pas de corruption croisée
|
|
all_timestamps = []
|
|
for element_id in started_validations:
|
|
for result in all_results[element_id]:
|
|
all_timestamps.append((element_id, result.timestamp))
|
|
|
|
# Les timestamps doivent être cohérents (pas de doublons exacts)
|
|
timestamp_values = [ts for _, ts in all_timestamps]
|
|
assert len(timestamp_values) == len(set(timestamp_values))
|
|
|
|
@given(
|
|
max_retries=st.integers(min_value=1, max_value=5),
|
|
failure_rate=st.floats(min_value=0.0, max_value=1.0)
|
|
)
|
|
@settings(max_examples=20, deadline=5000)
|
|
def test_property_retry_mechanism(
|
|
self,
|
|
validation_service,
|
|
sample_visual_target,
|
|
max_retries,
|
|
failure_rate
|
|
):
|
|
"""
|
|
Propriété: Mécanisme de Retry
|
|
|
|
Pour tout échec de validation, le système doit respecter
|
|
le nombre maximum de tentatives configuré.
|
|
"""
|
|
element_id = "test_element_retry"
|
|
validation_attempts = []
|
|
|
|
def validation_callback(result):
|
|
validation_attempts.append(result)
|
|
|
|
# Configuration avec retry
|
|
config = ValidationConfig(
|
|
target=sample_visual_target,
|
|
validation_interval=0.2,
|
|
max_retries=max_retries,
|
|
callback=validation_callback
|
|
)
|
|
|
|
# Mock pour simuler des échecs selon le taux configuré
|
|
call_count = 0
|
|
|
|
def mock_find_best_match(embedding, candidates):
|
|
nonlocal call_count
|
|
call_count += 1
|
|
|
|
# Simuler un échec selon le taux de failure
|
|
if np.random.random() < failure_rate:
|
|
return None # Échec de matching
|
|
else:
|
|
match = Mock()
|
|
match.confidence = 0.8
|
|
match.element = Mock()
|
|
return match
|
|
|
|
# Setup des mocks
|
|
mock_screen_state = Mock()
|
|
mock_screen_state.ui_elements = [Mock()]
|
|
|
|
validation_service.screen_capturer.capture_screen.return_value = Mock()
|
|
validation_service.ui_detector.detect_elements.return_value = mock_screen_state
|
|
validation_service.embedding_manager.find_best_match.side_effect = mock_find_best_match
|
|
|
|
# Démarrer la validation
|
|
success = validation_service.start_validation(element_id, sample_visual_target, config)
|
|
assert success
|
|
|
|
# Attendre suffisamment pour permettre les retries
|
|
wait_time = config.validation_interval * (max_retries + 2)
|
|
time.sleep(wait_time)
|
|
|
|
# Arrêter la validation
|
|
validation_service.stop_validation(element_id)
|
|
|
|
# Analyser les tentatives
|
|
assert len(validation_attempts) >= 1
|
|
|
|
# Compter les échecs consécutifs
|
|
consecutive_failures = 0
|
|
max_consecutive_failures = 0
|
|
|
|
for result in validation_attempts:
|
|
if result.status == ValidationStatus.ERROR:
|
|
consecutive_failures += 1
|
|
max_consecutive_failures = max(max_consecutive_failures, consecutive_failures)
|
|
else:
|
|
consecutive_failures = 0
|
|
|
|
# Le nombre d'échecs consécutifs ne doit pas dépasser max_retries
|
|
# (sauf si le taux d'échec est très élevé)
|
|
if failure_rate < 0.9: # Si le taux d'échec n'est pas trop élevé
|
|
assert max_consecutive_failures <= max_retries + 1
|
|
|
|
def test_property_validation_result_consistency(self, validation_service, sample_visual_target):
|
|
"""
|
|
Propriété: Cohérence des Résultats de Validation
|
|
|
|
Pour tout résultat de validation, les propriétés doivent
|
|
être cohérentes et respecter les contraintes logiques.
|
|
"""
|
|
element_id = "test_element_consistency"
|
|
validation_results = []
|
|
|
|
def validation_callback(result):
|
|
validation_results.append(result)
|
|
|
|
config = ValidationConfig(
|
|
target=sample_visual_target,
|
|
validation_interval=0.3,
|
|
confidence_threshold=0.7,
|
|
callback=validation_callback
|
|
)
|
|
|
|
# Mock avec différents scénarios
|
|
scenarios = [
|
|
(0.9, ValidationStatus.VALID), # Haute confiance
|
|
(0.75, ValidationStatus.VALID), # Confiance acceptable
|
|
(0.65, ValidationStatus.ERROR), # Confiance faible
|
|
(0.0, ValidationStatus.ERROR), # Aucune confiance
|
|
]
|
|
|
|
mock_results = []
|
|
for confidence, expected_status in scenarios:
|
|
if confidence > 0:
|
|
match = Mock()
|
|
match.confidence = confidence
|
|
match.element = Mock()
|
|
mock_results.append(match)
|
|
else:
|
|
mock_results.append(None)
|
|
|
|
# Setup des mocks
|
|
mock_screen_state = Mock()
|
|
mock_screen_state.ui_elements = [Mock()]
|
|
|
|
validation_service.screen_capturer.capture_screen.return_value = Mock()
|
|
validation_service.ui_detector.detect_elements.return_value = mock_screen_state
|
|
validation_service.embedding_manager.find_best_match.side_effect = mock_results
|
|
|
|
# Démarrer et attendre
|
|
success = validation_service.start_validation(element_id, sample_visual_target, config)
|
|
assert success
|
|
|
|
time.sleep(1.5) # Permettre plusieurs validations
|
|
validation_service.stop_validation(element_id)
|
|
|
|
# Vérifier la cohérence des résultats
|
|
for result in validation_results:
|
|
# Cohérence status/confiance
|
|
if result.confidence >= config.confidence_threshold:
|
|
assert result.status in [ValidationStatus.VALID, ValidationStatus.WARNING]
|
|
else:
|
|
assert result.status == ValidationStatus.ERROR
|
|
|
|
# Cohérence des suggestions/actions
|
|
if result.status == ValidationStatus.ERROR:
|
|
assert len(result.issues) > 0 or len(result.suggestions) > 0
|
|
|
|
# Propriétés temporelles
|
|
assert isinstance(result.timestamp, datetime)
|
|
assert result.timestamp <= datetime.now()
|
|
|
|
# Propriétés numériques
|
|
assert 0.0 <= result.confidence <= 1.0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pytest.main([__file__, "-v", "--tb=short"]) |