- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
517 lines
22 KiB
Python
517 lines
22 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test d'Intégration - Composant DebugPanel
|
|
Auteur : Dom, Alice, Kiro - 12 janvier 2026
|
|
|
|
Ce test valide l'intégration et le fonctionnement du composant DebugPanel
|
|
pour la visualisation des données d'étapes en temps réel.
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
import subprocess
|
|
import time
|
|
from pathlib import Path
|
|
from typing import Dict, List, Any, Optional
|
|
|
|
class TestDebugPanelIntegration:
|
|
"""Test d'intégration pour le composant DebugPanel."""
|
|
|
|
def __init__(self):
|
|
"""Initialise le test."""
|
|
self.project_root = Path(__file__).parent.parent.parent
|
|
self.frontend_path = self.project_root / "visual_workflow_builder" / "frontend"
|
|
|
|
self.test_results = {
|
|
"timestamp": "2026-01-12",
|
|
"test_version": "1.0.0",
|
|
"component": "DebugPanel",
|
|
"tests_executed": [],
|
|
"tests_passed": 0,
|
|
"tests_failed": 0,
|
|
"total_tests": 0,
|
|
"success_rate": 0.0,
|
|
"issues_found": [],
|
|
"recommendations": []
|
|
}
|
|
|
|
print("🧪 Test d'Intégration - Composant DebugPanel")
|
|
print(f"📁 Frontend path: {self.frontend_path}")
|
|
|
|
def run_all_tests(self) -> Dict[str, Any]:
|
|
"""Exécute tous les tests d'intégration."""
|
|
try:
|
|
print("\n" + "="*60)
|
|
print("🚀 EXÉCUTION DES TESTS D'INTÉGRATION DEBUGPANEL")
|
|
print("="*60)
|
|
|
|
# 1. Test de présence du composant
|
|
self._test_component_presence()
|
|
|
|
# 2. Test de la structure du composant
|
|
self._test_component_structure()
|
|
|
|
# 3. Test de l'intégration avec PropertiesPanel
|
|
self._test_properties_panel_integration()
|
|
|
|
# 4. Test des types TypeScript
|
|
self._test_typescript_types()
|
|
|
|
# 5. Test de compilation
|
|
self._test_compilation()
|
|
|
|
# 6. Calculer les résultats finaux
|
|
self._calculate_final_results()
|
|
|
|
# 7. Sauvegarder le rapport
|
|
self._save_test_report()
|
|
|
|
print(f"\n✅ Tests terminés - {self.test_results['tests_passed']}/{self.test_results['total_tests']} réussis")
|
|
return self.test_results
|
|
|
|
except Exception as e:
|
|
print(f"❌ Erreur lors des tests : {e}")
|
|
self.test_results["fatal_error"] = str(e)
|
|
return self.test_results
|
|
|
|
def _test_component_presence(self):
|
|
"""Test de présence du composant DebugPanel."""
|
|
print("\n📁 Test de présence du composant...")
|
|
|
|
test_name = "component_presence"
|
|
test_result = {
|
|
"name": test_name,
|
|
"description": "Vérification de la présence des fichiers du composant DebugPanel",
|
|
"status": "UNKNOWN",
|
|
"details": {},
|
|
"timestamp": time.time()
|
|
}
|
|
|
|
try:
|
|
# Vérifier la présence des fichiers
|
|
debug_panel_tsx = self.frontend_path / "src" / "components" / "DebugPanel" / "index.tsx"
|
|
debug_panel_css = self.frontend_path / "src" / "components" / "DebugPanel" / "DebugPanel.css"
|
|
|
|
files_status = {
|
|
"index.tsx": debug_panel_tsx.exists(),
|
|
"DebugPanel.css": debug_panel_css.exists()
|
|
}
|
|
|
|
# Vérifier la taille des fichiers
|
|
file_sizes = {}
|
|
for file_name, file_path in [("index.tsx", debug_panel_tsx), ("DebugPanel.css", debug_panel_css)]:
|
|
if file_path.exists():
|
|
file_sizes[file_name] = file_path.stat().st_size
|
|
else:
|
|
file_sizes[file_name] = 0
|
|
|
|
test_result["details"] = {
|
|
"files_status": files_status,
|
|
"file_sizes": file_sizes,
|
|
"all_files_present": all(files_status.values())
|
|
}
|
|
|
|
if all(files_status.values()):
|
|
test_result["status"] = "PASSED"
|
|
print(" ✅ Tous les fichiers du composant sont présents")
|
|
print(f" ✅ index.tsx: {file_sizes['index.tsx']} bytes")
|
|
print(f" ✅ DebugPanel.css: {file_sizes['DebugPanel.css']} bytes")
|
|
self.test_results["tests_passed"] += 1
|
|
else:
|
|
test_result["status"] = "FAILED"
|
|
missing_files = [name for name, exists in files_status.items() if not exists]
|
|
print(f" ❌ Fichiers manquants: {missing_files}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("CRITICAL", f"Fichiers manquants: {missing_files}", test_result["details"])
|
|
|
|
except Exception as e:
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = str(e)
|
|
print(f" ❌ Erreur test présence : {e}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("ERROR", f"Erreur test présence : {e}", {})
|
|
|
|
self.test_results["tests_executed"].append(test_result)
|
|
self.test_results["total_tests"] += 1
|
|
|
|
def _test_component_structure(self):
|
|
"""Test de la structure du composant DebugPanel."""
|
|
print("\n🏗️ Test de la structure du composant...")
|
|
|
|
test_name = "component_structure"
|
|
test_result = {
|
|
"name": test_name,
|
|
"description": "Vérification de la structure et du contenu du composant DebugPanel",
|
|
"status": "UNKNOWN",
|
|
"details": {},
|
|
"timestamp": time.time()
|
|
}
|
|
|
|
try:
|
|
debug_panel_path = self.frontend_path / "src" / "components" / "DebugPanel" / "index.tsx"
|
|
|
|
if not debug_panel_path.exists():
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = "Fichier DebugPanel introuvable"
|
|
print(" ❌ Fichier DebugPanel introuvable")
|
|
self.test_results["tests_failed"] += 1
|
|
return
|
|
|
|
content = debug_panel_path.read_text(encoding='utf-8')
|
|
|
|
# Vérifier les éléments essentiels
|
|
essential_elements = {
|
|
"interface_DebugPanelProps": "interface DebugPanelProps" in content,
|
|
"interface_StepAnalysis": "interface StepAnalysis" in content,
|
|
"component_export": "const DebugPanel: React.FC<DebugPanelProps>" in content,
|
|
"default_export": "export default DebugPanel" in content,
|
|
"author_attribution": "Auteur : Dom, Alice, Kiro" in content,
|
|
"french_comments": "Composant DebugPanel" in content,
|
|
"material_ui_imports": "from '@mui/material'" in content,
|
|
"vwb_hooks_imports": "useVWBStepIntegration" in content,
|
|
"step_analysis_logic": "stepAnalysis: StepAnalysis" in content,
|
|
"real_time_updates": "autoRefresh" in content
|
|
}
|
|
|
|
# Analyser la complexité
|
|
lines_count = len(content.split('\n'))
|
|
function_count = content.count('const ') + content.count('function ')
|
|
hook_usage = content.count('use')
|
|
|
|
# Vérifier les fonctionnalités spécifiques
|
|
features = {
|
|
"accordion_sections": "Accordion" in content,
|
|
"detection_methods": "detectionMethods" in content,
|
|
"parameter_analysis": "parametersAnalysis" in content,
|
|
"validation_analysis": "validationAnalysis" in content,
|
|
"vwb_integration": "vwbAnalysis" in content,
|
|
"toggle_visibility": "onToggleVisibility" in content
|
|
}
|
|
|
|
test_result["details"] = {
|
|
"essential_elements": essential_elements,
|
|
"features": features,
|
|
"lines_count": lines_count,
|
|
"function_count": function_count,
|
|
"hook_usage": hook_usage,
|
|
"completeness_score": sum(essential_elements.values()) / len(essential_elements) * 100,
|
|
"features_score": sum(features.values()) / len(features) * 100
|
|
}
|
|
|
|
if all(essential_elements.values()) and sum(features.values()) >= 5:
|
|
test_result["status"] = "PASSED"
|
|
print(" ✅ Structure du composant complète")
|
|
print(f" ✅ {lines_count} lignes de code")
|
|
print(f" ✅ {sum(features.values())}/6 fonctionnalités implémentées")
|
|
self.test_results["tests_passed"] += 1
|
|
else:
|
|
test_result["status"] = "FAILED"
|
|
missing_elements = [name for name, present in essential_elements.items() if not present]
|
|
missing_features = [name for name, present in features.items() if not present]
|
|
print(f" ❌ Éléments manquants: {missing_elements}")
|
|
print(f" ❌ Fonctionnalités manquantes: {missing_features}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("HIGH", f"Structure incomplète: {missing_elements + missing_features}", test_result["details"])
|
|
|
|
except Exception as e:
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = str(e)
|
|
print(f" ❌ Erreur test structure : {e}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("ERROR", f"Erreur test structure : {e}", {})
|
|
|
|
self.test_results["tests_executed"].append(test_result)
|
|
self.test_results["total_tests"] += 1
|
|
|
|
def _test_properties_panel_integration(self):
|
|
"""Test de l'intégration avec PropertiesPanel."""
|
|
print("\n🔗 Test de l'intégration avec PropertiesPanel...")
|
|
|
|
test_name = "properties_panel_integration"
|
|
test_result = {
|
|
"name": test_name,
|
|
"description": "Vérification de l'intégration du DebugPanel dans PropertiesPanel",
|
|
"status": "UNKNOWN",
|
|
"details": {},
|
|
"timestamp": time.time()
|
|
}
|
|
|
|
try:
|
|
properties_panel_path = self.frontend_path / "src" / "components" / "PropertiesPanel" / "index.tsx"
|
|
|
|
if not properties_panel_path.exists():
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = "Fichier PropertiesPanel introuvable"
|
|
print(" ❌ Fichier PropertiesPanel introuvable")
|
|
self.test_results["tests_failed"] += 1
|
|
return
|
|
|
|
content = properties_panel_path.read_text(encoding='utf-8')
|
|
|
|
# Vérifier l'intégration
|
|
integration_elements = {
|
|
"debug_panel_import": "import DebugPanel from '../DebugPanel'" in content,
|
|
"debug_panel_state": "isDebugPanelVisible" in content,
|
|
"debug_panel_component": "<DebugPanel" in content,
|
|
"development_mode_check": "process.env.NODE_ENV === 'development'" in content,
|
|
"props_passing": "selectedStep={selectedStep}" in content and "variables={variables" in content
|
|
}
|
|
|
|
test_result["details"] = {
|
|
"integration_elements": integration_elements,
|
|
"integration_score": sum(integration_elements.values()) / len(integration_elements) * 100
|
|
}
|
|
|
|
if all(integration_elements.values()):
|
|
test_result["status"] = "PASSED"
|
|
print(" ✅ Intégration complète avec PropertiesPanel")
|
|
print(" ✅ Import du composant présent")
|
|
print(" ✅ État de visibilité géré")
|
|
print(" ✅ Rendu conditionnel en mode développement")
|
|
self.test_results["tests_passed"] += 1
|
|
else:
|
|
test_result["status"] = "FAILED"
|
|
missing_elements = [name for name, present in integration_elements.items() if not present]
|
|
print(f" ❌ Éléments d'intégration manquants: {missing_elements}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("HIGH", f"Intégration incomplète: {missing_elements}", test_result["details"])
|
|
|
|
except Exception as e:
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = str(e)
|
|
print(f" ❌ Erreur test intégration : {e}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("ERROR", f"Erreur test intégration : {e}", {})
|
|
|
|
self.test_results["tests_executed"].append(test_result)
|
|
self.test_results["total_tests"] += 1
|
|
|
|
def _test_typescript_types(self):
|
|
"""Test des types TypeScript."""
|
|
print("\n🔧 Test des types TypeScript...")
|
|
|
|
test_name = "typescript_types"
|
|
test_result = {
|
|
"name": test_name,
|
|
"description": "Vérification des types TypeScript du composant DebugPanel",
|
|
"status": "UNKNOWN",
|
|
"details": {},
|
|
"timestamp": time.time()
|
|
}
|
|
|
|
try:
|
|
debug_panel_path = self.frontend_path / "src" / "components" / "DebugPanel" / "index.tsx"
|
|
content = debug_panel_path.read_text(encoding='utf-8')
|
|
|
|
# Vérifier les définitions de types
|
|
type_definitions = {
|
|
"DebugPanelProps": "interface DebugPanelProps" in content,
|
|
"StepAnalysis": "interface StepAnalysis" in content,
|
|
"typed_props": "React.FC<DebugPanelProps>" in content,
|
|
"typed_state": "useState<" in content,
|
|
"typed_imports": "import { Step, StepType, Variable }" in content
|
|
}
|
|
|
|
# Vérifier les types des propriétés
|
|
prop_types = {
|
|
"selectedStep_typed": "selectedStep: Step | null" in content,
|
|
"variables_typed": "variables: Variable[]" in content,
|
|
"isVisible_typed": "isVisible?: boolean" in content,
|
|
"onToggleVisibility_typed": "onToggleVisibility?: (visible: boolean) => void" in content
|
|
}
|
|
|
|
test_result["details"] = {
|
|
"type_definitions": type_definitions,
|
|
"prop_types": prop_types,
|
|
"type_definitions_score": sum(type_definitions.values()) / len(type_definitions) * 100,
|
|
"prop_types_score": sum(prop_types.values()) / len(prop_types) * 100
|
|
}
|
|
|
|
if all(type_definitions.values()) and all(prop_types.values()):
|
|
test_result["status"] = "PASSED"
|
|
print(" ✅ Tous les types TypeScript sont définis")
|
|
print(" ✅ Interfaces complètes")
|
|
print(" ✅ Props typées correctement")
|
|
self.test_results["tests_passed"] += 1
|
|
else:
|
|
test_result["status"] = "FAILED"
|
|
missing_types = [name for name, present in type_definitions.items() if not present]
|
|
missing_props = [name for name, present in prop_types.items() if not present]
|
|
print(f" ❌ Types manquants: {missing_types}")
|
|
print(f" ❌ Props non typées: {missing_props}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("MEDIUM", f"Types TypeScript incomplets: {missing_types + missing_props}", test_result["details"])
|
|
|
|
except Exception as e:
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = str(e)
|
|
print(f" ❌ Erreur test types : {e}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("ERROR", f"Erreur test types : {e}", {})
|
|
|
|
self.test_results["tests_executed"].append(test_result)
|
|
self.test_results["total_tests"] += 1
|
|
|
|
def _test_compilation(self):
|
|
"""Test de compilation TypeScript."""
|
|
print("\n🏗️ Test de compilation TypeScript...")
|
|
|
|
test_name = "typescript_compilation"
|
|
test_result = {
|
|
"name": test_name,
|
|
"description": "Vérification de la compilation TypeScript avec le nouveau composant",
|
|
"status": "UNKNOWN",
|
|
"details": {},
|
|
"timestamp": time.time()
|
|
}
|
|
|
|
try:
|
|
# Exécuter la compilation TypeScript
|
|
result = subprocess.run(
|
|
["npx", "tsc", "--noEmit"],
|
|
cwd=self.frontend_path,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=60
|
|
)
|
|
|
|
test_result["details"] = {
|
|
"exit_code": result.returncode,
|
|
"stdout": result.stdout,
|
|
"stderr": result.stderr,
|
|
"compilation_success": result.returncode == 0
|
|
}
|
|
|
|
if result.returncode == 0:
|
|
test_result["status"] = "PASSED"
|
|
print(" ✅ Compilation TypeScript réussie")
|
|
self.test_results["tests_passed"] += 1
|
|
else:
|
|
test_result["status"] = "FAILED"
|
|
print(" ❌ Erreurs de compilation TypeScript")
|
|
print(f" Stderr: {result.stderr[:200]}...")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("CRITICAL", "Erreurs de compilation TypeScript", {
|
|
"stderr": result.stderr
|
|
})
|
|
|
|
except subprocess.TimeoutExpired:
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = "Timeout de compilation"
|
|
print(" ❌ Timeout lors de la compilation TypeScript")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("ERROR", "Timeout de compilation TypeScript", {})
|
|
|
|
except Exception as e:
|
|
test_result["status"] = "FAILED"
|
|
test_result["details"]["error"] = str(e)
|
|
print(f" ❌ Erreur compilation : {e}")
|
|
self.test_results["tests_failed"] += 1
|
|
self._add_issue("ERROR", f"Erreur compilation : {e}", {})
|
|
|
|
self.test_results["tests_executed"].append(test_result)
|
|
self.test_results["total_tests"] += 1
|
|
|
|
def _calculate_final_results(self):
|
|
"""Calcule les résultats finaux."""
|
|
total = self.test_results["total_tests"]
|
|
passed = self.test_results["tests_passed"]
|
|
|
|
if total > 0:
|
|
self.test_results["success_rate"] = (passed / total) * 100
|
|
else:
|
|
self.test_results["success_rate"] = 0.0
|
|
|
|
# Générer des recommandations
|
|
if self.test_results["success_rate"] < 100:
|
|
self._generate_recommendations()
|
|
|
|
def _generate_recommendations(self):
|
|
"""Génère des recommandations basées sur les résultats."""
|
|
failed_tests = [test for test in self.test_results["tests_executed"] if test["status"] == "FAILED"]
|
|
|
|
if failed_tests:
|
|
self.test_results["recommendations"].append({
|
|
"priority": "HIGH",
|
|
"title": "Corriger les tests échoués",
|
|
"description": f"{len(failed_tests)} test(s) ont échoué",
|
|
"failed_tests": [test["name"] for test in failed_tests]
|
|
})
|
|
|
|
if self.test_results["success_rate"] < 80:
|
|
self.test_results["recommendations"].append({
|
|
"priority": "CRITICAL",
|
|
"title": "Taux de succès trop bas",
|
|
"description": f"Taux de succès: {self.test_results['success_rate']:.1f}%",
|
|
"action": "Réviser l'implémentation du DebugPanel"
|
|
})
|
|
|
|
def _add_issue(self, severity: str, description: str, details: Dict[str, Any]):
|
|
"""Ajoute un problème identifié."""
|
|
issue = {
|
|
"severity": severity,
|
|
"description": description,
|
|
"details": details,
|
|
"timestamp": time.time()
|
|
}
|
|
self.test_results["issues_found"].append(issue)
|
|
|
|
def _save_test_report(self):
|
|
"""Sauvegarde le rapport de test."""
|
|
report_path = self.project_root / "docs" / "TEST_DEBUG_PANEL_INTEGRATION_12JAN2026.json"
|
|
|
|
try:
|
|
# Créer le répertoire docs s'il n'existe pas
|
|
report_path.parent.mkdir(exist_ok=True)
|
|
|
|
# Sauvegarder le rapport JSON
|
|
with open(report_path, 'w', encoding='utf-8') as f:
|
|
json.dump(self.test_results, f, indent=2, ensure_ascii=False)
|
|
|
|
print(f"\n📄 Rapport de test sauvegardé : {report_path}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ Erreur sauvegarde rapport : {e}")
|
|
|
|
|
|
def main():
|
|
"""Fonction principale."""
|
|
print("🧪 Test d'Intégration - Composant DebugPanel")
|
|
|
|
tester = TestDebugPanelIntegration()
|
|
results = tester.run_all_tests()
|
|
|
|
# Afficher le résumé final
|
|
print("\n" + "="*60)
|
|
print("📊 RÉSUMÉ DES TESTS D'INTÉGRATION DEBUGPANEL")
|
|
print("="*60)
|
|
|
|
print(f"✅ Tests exécutés : {results['total_tests']}")
|
|
print(f"✅ Tests réussis : {results['tests_passed']}")
|
|
print(f"❌ Tests échoués : {results['tests_failed']}")
|
|
print(f"📈 Taux de succès : {results['success_rate']:.1f}%")
|
|
|
|
if results['issues_found']:
|
|
print(f"\n🚨 Problèmes identifiés : {len(results['issues_found'])}")
|
|
for issue in results['issues_found']:
|
|
print(f" - {issue['severity']}: {issue['description']}")
|
|
|
|
if results['recommendations']:
|
|
print(f"\n💡 Recommandations : {len(results['recommendations'])}")
|
|
for rec in results['recommendations']:
|
|
print(f" - {rec['priority']}: {rec['title']}")
|
|
|
|
print(f"\n📄 Rapport détaillé disponible dans docs/")
|
|
|
|
# Code de sortie basé sur le taux de succès
|
|
if results['success_rate'] >= 80:
|
|
print("🎉 Composant DebugPanel validé avec succès !")
|
|
return 0
|
|
else:
|
|
print("⚠️ Composant DebugPanel nécessite des améliorations")
|
|
return 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main()) |