v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
279
test_encryption_real_functionality_validation.py
Normal file
279
test_encryption_real_functionality_validation.py
Normal file
@@ -0,0 +1,279 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Validation du test de chiffrement - Real Functionality Testing
|
||||
|
||||
Ce script valide que le test test_encryption_key_sync.py suit bien
|
||||
les principes de test de fonctionnalité réelle:
|
||||
|
||||
1. Utilise de vrais composants (pas de mocks)
|
||||
2. Teste avec des données réalistes
|
||||
3. Vérifie l'intégration complète
|
||||
4. Maintient l'isolation des tests
|
||||
5. Reste rapide et fiable
|
||||
"""
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def validate_test_principles():
|
||||
"""Valider que le test suit les principes de Real Functionality Testing."""
|
||||
print("=== Validation des Principes de Real Functionality Testing ===\n")
|
||||
|
||||
test_file = Path("test_encryption_key_sync.py")
|
||||
if not test_file.exists():
|
||||
print("❌ Fichier de test non trouvé")
|
||||
return False
|
||||
|
||||
# Lire le contenu du test
|
||||
content = test_file.read_text()
|
||||
|
||||
# 1. Vérifier l'absence de mocks
|
||||
print("1. Vérification de l'absence de mocks/simulations...")
|
||||
mock_indicators = [
|
||||
"unittest.mock",
|
||||
"from mock import",
|
||||
"Mock()",
|
||||
"MagicMock()",
|
||||
"patch(",
|
||||
"@mock",
|
||||
"@patch"
|
||||
]
|
||||
|
||||
mock_found = False
|
||||
for indicator in mock_indicators:
|
||||
if indicator in content:
|
||||
print(f" ⚠️ Mock détecté: {indicator}")
|
||||
mock_found = True
|
||||
|
||||
if not mock_found:
|
||||
print(" ✅ Aucun mock détecté - utilise de vrais composants")
|
||||
|
||||
# 2. Vérifier l'utilisation de données réalistes
|
||||
print("\n2. Vérification des données réalistes...")
|
||||
realistic_data_indicators = [
|
||||
"create_realistic_test_session",
|
||||
"mouse_click",
|
||||
"key_combo",
|
||||
"screenshots",
|
||||
"événements",
|
||||
"caractères spéciaux"
|
||||
]
|
||||
|
||||
realistic_data_count = sum(1 for indicator in realistic_data_indicators if indicator in content)
|
||||
if realistic_data_count >= 4:
|
||||
print(f" ✅ Données réalistes utilisées ({realistic_data_count}/6 indicateurs)")
|
||||
else:
|
||||
print(f" ⚠️ Données peu réalistes ({realistic_data_count}/6 indicateurs)")
|
||||
|
||||
# 3. Vérifier les tests d'intégration
|
||||
print("\n3. Vérification des tests d'intégration...")
|
||||
integration_indicators = [
|
||||
"create_session_zip_encrypted",
|
||||
"decrypt_session_file",
|
||||
"agent_v0.storage_encrypted",
|
||||
"agent_v0.raw_session",
|
||||
"cycle complet"
|
||||
]
|
||||
|
||||
integration_count = sum(1 for indicator in integration_indicators if indicator in content)
|
||||
if integration_count >= 4:
|
||||
print(f" ✅ Tests d'intégration présents ({integration_count}/5 indicateurs)")
|
||||
else:
|
||||
print(f" ⚠️ Tests d'intégration insuffisants ({integration_count}/5 indicateurs)")
|
||||
|
||||
# 4. Vérifier l'isolation des tests
|
||||
print("\n4. Vérification de l'isolation des tests...")
|
||||
isolation_indicators = [
|
||||
"tempfile.TemporaryDirectory",
|
||||
"tmpdir",
|
||||
"cleanup",
|
||||
"with tempfile"
|
||||
]
|
||||
|
||||
isolation_count = sum(1 for indicator in isolation_indicators if indicator in content)
|
||||
if isolation_count >= 2:
|
||||
print(f" ✅ Isolation des tests assurée ({isolation_count}/4 indicateurs)")
|
||||
else:
|
||||
print(f" ⚠️ Isolation insuffisante ({isolation_count}/4 indicateurs)")
|
||||
|
||||
# 5. Vérifier les validations approfondies
|
||||
print("\n5. Vérification des validations approfondies...")
|
||||
validation_indicators = [
|
||||
"verify_zip_integrity",
|
||||
"calculate_file_hash",
|
||||
"intégrité",
|
||||
"SHA256",
|
||||
"testzip()",
|
||||
"structure cohérente"
|
||||
]
|
||||
|
||||
validation_count = sum(1 for indicator in validation_indicators if indicator in content)
|
||||
if validation_count >= 4:
|
||||
print(f" ✅ Validations approfondies ({validation_count}/6 indicateurs)")
|
||||
else:
|
||||
print(f" ⚠️ Validations superficielles ({validation_count}/6 indicateurs)")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def run_performance_test():
|
||||
"""Tester les performances du test de fonctionnalité réelle."""
|
||||
print("\n=== Test de Performance ===\n")
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Exécuter le test
|
||||
result = subprocess.run(
|
||||
[sys.executable, "test_encryption_key_sync.py"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30 # 30 secondes max
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
|
||||
print(f"Durée d'exécution: {duration:.2f} secondes")
|
||||
|
||||
if duration < 10:
|
||||
print("✅ Performance acceptable (< 10s)")
|
||||
elif duration < 30:
|
||||
print("⚠️ Performance correcte mais lente (< 30s)")
|
||||
else:
|
||||
print("❌ Performance trop lente (> 30s)")
|
||||
|
||||
# Vérifier le résultat
|
||||
if result.returncode == 0:
|
||||
print("✅ Test exécuté avec succès")
|
||||
return True
|
||||
else:
|
||||
print("❌ Test échoué")
|
||||
print("STDOUT:", result.stdout)
|
||||
print("STDERR:", result.stderr)
|
||||
return False
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print("❌ Test trop lent (timeout 30s)")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Erreur lors de l'exécution: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def validate_real_functionality_improvements():
|
||||
"""Valider les améliorations spécifiques de Real Functionality Testing."""
|
||||
print("\n=== Validation des Améliorations Real Functionality ===\n")
|
||||
|
||||
test_file = Path("test_encryption_key_sync.py")
|
||||
content = test_file.read_text()
|
||||
|
||||
improvements = [
|
||||
{
|
||||
"name": "Configuration réelle du système",
|
||||
"indicators": ["load_real_environment_config", ".env.local", "agent_config.json"],
|
||||
"weight": 3
|
||||
},
|
||||
{
|
||||
"name": "Données de session réalistes",
|
||||
"indicators": ["create_realistic_test_session", "mouse_click", "key_combo", "screenshots"],
|
||||
"weight": 4
|
||||
},
|
||||
{
|
||||
"name": "Vérification d'intégrité complète",
|
||||
"indicators": ["verify_zip_integrity", "calculate_file_hash", "SHA256"],
|
||||
"weight": 3
|
||||
},
|
||||
{
|
||||
"name": "Tests de robustesse",
|
||||
"indicators": ["caractères spéciaux", "Test de robustesse", "éàü 中文 🚀"],
|
||||
"weight": 3
|
||||
},
|
||||
{
|
||||
"name": "Validation de structure de données",
|
||||
"indicators": ["required_fields", "structure d'événement", "schema_version"],
|
||||
"weight": 2
|
||||
},
|
||||
{
|
||||
"name": "Gestion d'erreurs réaliste",
|
||||
"indicators": ["ImportError", "traceback.print_exc", "Exception"],
|
||||
"weight": 2
|
||||
}
|
||||
]
|
||||
|
||||
total_score = 0
|
||||
max_score = 0
|
||||
|
||||
for improvement in improvements:
|
||||
found_indicators = sum(1 for indicator in improvement["indicators"] if indicator in content)
|
||||
score = min(found_indicators, improvement["weight"])
|
||||
total_score += score
|
||||
max_score += improvement["weight"]
|
||||
|
||||
percentage = (score / improvement["weight"]) * 100
|
||||
status = "✅" if percentage >= 80 else "⚠️" if percentage >= 50 else "❌"
|
||||
|
||||
print(f"{status} {improvement['name']}: {score}/{improvement['weight']} ({percentage:.0f}%)")
|
||||
|
||||
overall_percentage = (total_score / max_score) * 100
|
||||
print(f"\n📊 Score global: {total_score}/{max_score} ({overall_percentage:.0f}%)")
|
||||
|
||||
if overall_percentage >= 80:
|
||||
print("🎉 Excellent niveau de Real Functionality Testing!")
|
||||
elif overall_percentage >= 60:
|
||||
print("👍 Bon niveau de Real Functionality Testing")
|
||||
else:
|
||||
print("⚠️ Niveau de Real Functionality Testing à améliorer")
|
||||
|
||||
return overall_percentage >= 60
|
||||
|
||||
|
||||
def main():
|
||||
"""Fonction principale de validation."""
|
||||
print("🔍 Validation du Test de Chiffrement - Real Functionality Testing\n")
|
||||
|
||||
results = []
|
||||
|
||||
# 1. Valider les principes généraux
|
||||
results.append(("Principes généraux", validate_test_principles()))
|
||||
|
||||
# 2. Valider les améliorations spécifiques
|
||||
results.append(("Améliorations Real Functionality", validate_real_functionality_improvements()))
|
||||
|
||||
# 3. Test de performance
|
||||
results.append(("Performance", run_performance_test()))
|
||||
|
||||
# Résumé final
|
||||
print("\n" + "="*60)
|
||||
print("📋 RÉSUMÉ DE LA VALIDATION")
|
||||
print("="*60)
|
||||
|
||||
passed = 0
|
||||
for name, result in results:
|
||||
status = "✅ PASSÉ" if result else "❌ ÉCHOUÉ"
|
||||
print(f"{status:12} {name}")
|
||||
if result:
|
||||
passed += 1
|
||||
|
||||
success_rate = (passed / len(results)) * 100
|
||||
print(f"\nTaux de réussite: {passed}/{len(results)} ({success_rate:.0f}%)")
|
||||
|
||||
if success_rate == 100:
|
||||
print("\n🎉 VALIDATION COMPLÈTE RÉUSSIE!")
|
||||
print("Le test suit parfaitement les principes de Real Functionality Testing.")
|
||||
elif success_rate >= 66:
|
||||
print("\n👍 VALIDATION MAJORITAIREMENT RÉUSSIE")
|
||||
print("Le test suit bien les principes de Real Functionality Testing.")
|
||||
else:
|
||||
print("\n⚠️ VALIDATION PARTIELLE")
|
||||
print("Le test nécessite des améliorations pour suivre les principes de Real Functionality Testing.")
|
||||
|
||||
return success_rate >= 66
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
Reference in New Issue
Block a user