v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
337
test_systeme_complet_final.py
Executable file
337
test_systeme_complet_final.py
Executable file
@@ -0,0 +1,337 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Test Système Complet - RPA Vision V3
|
||||
Auteur : Dom, Alice, Kiro - 8 janvier 2026
|
||||
|
||||
Ce script teste l'ensemble de la chaîne de lancement du système RPA Vision V3.
|
||||
Il vérifie que tous les composants peuvent être lancés et sont fonctionnels.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import time
|
||||
import requests
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Couleurs pour l'affichage
|
||||
class Colors:
|
||||
RED = '\033[0;31m'
|
||||
GREEN = '\033[0;32m'
|
||||
YELLOW = '\033[1;33m'
|
||||
BLUE = '\033[0;34m'
|
||||
PURPLE = '\033[0;35m'
|
||||
CYAN = '\033[0;36m'
|
||||
BOLD = '\033[1m'
|
||||
NC = '\033[0m' # No Color
|
||||
|
||||
def print_header():
|
||||
"""Affiche l'en-tête du test système"""
|
||||
print(f"{Colors.PURPLE}{Colors.BOLD}")
|
||||
print("╔════════════════════════════════════════════════════════════╗")
|
||||
print("║ 🧪 Test Système Complet - RPA Vision V3 ║")
|
||||
print("║ Vérification de la chaîne de lancement ║")
|
||||
print("║ Auteur : Dom, Alice, Kiro - 8 janvier 2026 ║")
|
||||
print("╚════════════════════════════════════════════════════════════╝")
|
||||
print(f"{Colors.NC}")
|
||||
|
||||
def check_file_exists(filepath: str, description: str) -> bool:
|
||||
"""Vérifie qu'un fichier existe"""
|
||||
if os.path.exists(filepath):
|
||||
print(f"{Colors.GREEN}✓ {description} : {filepath}{Colors.NC}")
|
||||
return True
|
||||
else:
|
||||
print(f"{Colors.RED}✗ {description} manquant : {filepath}{Colors.NC}")
|
||||
return False
|
||||
|
||||
def check_directory_exists(dirpath: str, description: str) -> bool:
|
||||
"""Vérifie qu'un répertoire existe"""
|
||||
if os.path.isdir(dirpath):
|
||||
print(f"{Colors.GREEN}✓ {description} : {dirpath}{Colors.NC}")
|
||||
return True
|
||||
else:
|
||||
print(f"{Colors.RED}✗ {description} manquant : {dirpath}{Colors.NC}")
|
||||
return False
|
||||
|
||||
def check_script_executable(script_path: str) -> bool:
|
||||
"""Vérifie qu'un script est exécutable"""
|
||||
if os.access(script_path, os.X_OK):
|
||||
print(f"{Colors.GREEN}✓ Script exécutable : {script_path}{Colors.NC}")
|
||||
return True
|
||||
else:
|
||||
print(f"{Colors.YELLOW}⚠ Script non exécutable : {script_path}{Colors.NC}")
|
||||
# Tenter de le rendre exécutable
|
||||
try:
|
||||
os.chmod(script_path, 0o755)
|
||||
print(f"{Colors.GREEN}✓ Permissions corrigées : {script_path}{Colors.NC}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"{Colors.RED}✗ Impossible de corriger les permissions : {e}{Colors.NC}")
|
||||
return False
|
||||
|
||||
def test_environment_setup() -> bool:
|
||||
"""Test 1 : Vérification de l'environnement"""
|
||||
print(f"\n{Colors.BLUE}[1/7] Test de l'environnement de base{Colors.NC}")
|
||||
print("=" * 50)
|
||||
|
||||
success = True
|
||||
|
||||
# Vérifier Python
|
||||
try:
|
||||
python_version = subprocess.check_output([sys.executable, "--version"],
|
||||
text=True).strip()
|
||||
print(f"{Colors.GREEN}✓ Python : {python_version}{Colors.NC}")
|
||||
except Exception as e:
|
||||
print(f"{Colors.RED}✗ Erreur Python : {e}{Colors.NC}")
|
||||
success = False
|
||||
|
||||
# Vérifier l'environnement virtuel
|
||||
success &= check_directory_exists("venv_v3", "Environnement virtuel")
|
||||
|
||||
# Vérifier les fichiers de configuration
|
||||
success &= check_file_exists("requirements.txt", "Dépendances Python")
|
||||
success &= check_file_exists(".env.example", "Exemple de configuration")
|
||||
|
||||
return success
|
||||
|
||||
def test_scripts_availability() -> bool:
|
||||
"""Test 2 : Vérification des scripts de lancement"""
|
||||
print(f"\n{Colors.BLUE}[2/7] Test des scripts de lancement{Colors.NC}")
|
||||
print("=" * 50)
|
||||
|
||||
success = True
|
||||
|
||||
# Scripts principaux
|
||||
scripts = [
|
||||
("run.sh", "Script principal de lancement"),
|
||||
("launch_all.sh", "Script de lancement complet"),
|
||||
("visual_workflow_builder/run.sh", "Script VWB"),
|
||||
("visual_workflow_builder/start_full.sh", "Script VWB complet")
|
||||
]
|
||||
|
||||
for script_path, description in scripts:
|
||||
if check_file_exists(script_path, description):
|
||||
success &= check_script_executable(script_path)
|
||||
else:
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
def test_project_structure() -> bool:
|
||||
"""Test 3 : Vérification de la structure du projet"""
|
||||
print(f"\n{Colors.BLUE}[3/7] Test de la structure du projet{Colors.NC}")
|
||||
print("=" * 50)
|
||||
|
||||
success = True
|
||||
|
||||
# Répertoires principaux
|
||||
directories = [
|
||||
("core", "Modules core du système"),
|
||||
("agent_v0", "Agent de capture"),
|
||||
("server", "Serveur API"),
|
||||
("web_dashboard", "Dashboard web"),
|
||||
("visual_workflow_builder", "Constructeur de workflows"),
|
||||
("gui", "Interface graphique"),
|
||||
("tests", "Tests du système"),
|
||||
("docs", "Documentation")
|
||||
]
|
||||
|
||||
for dir_path, description in directories:
|
||||
success &= check_directory_exists(dir_path, description)
|
||||
|
||||
# Vérifier la structure VWB
|
||||
vwb_dirs = [
|
||||
("visual_workflow_builder/backend", "Backend VWB"),
|
||||
("visual_workflow_builder/frontend", "Frontend VWB")
|
||||
]
|
||||
|
||||
for dir_path, description in vwb_dirs:
|
||||
success &= check_directory_exists(dir_path, description)
|
||||
|
||||
return success
|
||||
|
||||
def test_dependencies() -> bool:
|
||||
"""Test 4 : Vérification des dépendances"""
|
||||
print(f"\n{Colors.BLUE}[4/7] Test des dépendances{Colors.NC}")
|
||||
print("=" * 50)
|
||||
|
||||
success = True
|
||||
|
||||
# Vérifier Node.js
|
||||
try:
|
||||
node_version = subprocess.check_output(["node", "--version"],
|
||||
text=True).strip()
|
||||
print(f"{Colors.GREEN}✓ Node.js : {node_version}{Colors.NC}")
|
||||
except Exception as e:
|
||||
print(f"{Colors.RED}✗ Node.js non trouvé : {e}{Colors.NC}")
|
||||
success = False
|
||||
|
||||
# Vérifier npm
|
||||
try:
|
||||
npm_version = subprocess.check_output(["npm", "--version"],
|
||||
text=True).strip()
|
||||
print(f"{Colors.GREEN}✓ npm : {npm_version}{Colors.NC}")
|
||||
except Exception as e:
|
||||
print(f"{Colors.RED}✗ npm non trouvé : {e}{Colors.NC}")
|
||||
success = False
|
||||
|
||||
# Vérifier les dépendances VWB frontend
|
||||
if os.path.exists("visual_workflow_builder/frontend/package.json"):
|
||||
print(f"{Colors.GREEN}✓ Configuration npm VWB trouvée{Colors.NC}")
|
||||
else:
|
||||
print(f"{Colors.RED}✗ Configuration npm VWB manquante{Colors.NC}")
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
def test_configuration_files() -> bool:
|
||||
"""Test 5 : Vérification des fichiers de configuration"""
|
||||
print(f"\n{Colors.BLUE}[5/7] Test des fichiers de configuration{Colors.NC}")
|
||||
print("=" * 50)
|
||||
|
||||
success = True
|
||||
|
||||
# Fichiers de configuration principaux
|
||||
config_files = [
|
||||
("core/config.py", "Configuration centrale"),
|
||||
("visual_workflow_builder/backend/app.py", "Application backend VWB"),
|
||||
("visual_workflow_builder/frontend/package.json", "Configuration frontend VWB"),
|
||||
("server/api_upload.py", "API de téléchargement"),
|
||||
("web_dashboard/app.py", "Application dashboard")
|
||||
]
|
||||
|
||||
for file_path, description in config_files:
|
||||
success &= check_file_exists(file_path, description)
|
||||
|
||||
return success
|
||||
|
||||
def test_launch_simulation() -> bool:
|
||||
"""Test 6 : Simulation de lancement (sans démarrage réel)"""
|
||||
print(f"\n{Colors.BLUE}[6/7] Test de simulation de lancement{Colors.NC}")
|
||||
print("=" * 50)
|
||||
|
||||
success = True
|
||||
|
||||
# Test du script principal avec --check
|
||||
try:
|
||||
print(f"{Colors.CYAN}🔍 Test du script principal avec --check...{Colors.NC}")
|
||||
result = subprocess.run(["./run.sh", "--check"],
|
||||
capture_output=True, text=True, timeout=30)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f"{Colors.GREEN}✓ Script principal fonctionne correctement{Colors.NC}")
|
||||
else:
|
||||
print(f"{Colors.YELLOW}⚠ Script principal avec avertissements{Colors.NC}")
|
||||
print(f"Sortie : {result.stdout}")
|
||||
if result.stderr:
|
||||
print(f"Erreurs : {result.stderr}")
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f"{Colors.YELLOW}⚠ Timeout du test de lancement (normal){Colors.NC}")
|
||||
except Exception as e:
|
||||
print(f"{Colors.RED}✗ Erreur lors du test de lancement : {e}{Colors.NC}")
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
def test_documentation_compliance() -> bool:
|
||||
"""Test 7 : Vérification de la conformité de la documentation"""
|
||||
print(f"\n{Colors.BLUE}[7/7] Test de conformité de la documentation{Colors.NC}")
|
||||
print("=" * 50)
|
||||
|
||||
success = True
|
||||
|
||||
# Vérifier les fichiers de documentation
|
||||
doc_files = [
|
||||
("README.md", "Documentation principale"),
|
||||
("docs", "Répertoire de documentation"),
|
||||
("visual_workflow_builder/README_DEMONSTRATION_REELLE.md", "Guide de démonstration VWB")
|
||||
]
|
||||
|
||||
for file_path, description in doc_files:
|
||||
if os.path.isfile(file_path) or os.path.isdir(file_path):
|
||||
print(f"{Colors.GREEN}✓ {description} présent{Colors.NC}")
|
||||
else:
|
||||
print(f"{Colors.YELLOW}⚠ {description} manquant : {file_path}{Colors.NC}")
|
||||
|
||||
# Vérifier la présence d'attribution dans les scripts
|
||||
scripts_to_check = ["run.sh", "launch_all.sh"]
|
||||
|
||||
for script in scripts_to_check:
|
||||
if os.path.exists(script):
|
||||
try:
|
||||
with open(script, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
if "Dom, Alice" in content or "Kiro" in content:
|
||||
print(f"{Colors.GREEN}✓ Attribution trouvée dans {script}{Colors.NC}")
|
||||
else:
|
||||
print(f"{Colors.YELLOW}⚠ Attribution manquante dans {script}{Colors.NC}")
|
||||
except Exception as e:
|
||||
print(f"{Colors.RED}✗ Erreur lecture {script} : {e}{Colors.NC}")
|
||||
|
||||
return success
|
||||
|
||||
def generate_test_report(results: Dict[str, bool]) -> None:
|
||||
"""Génère un rapport de test"""
|
||||
print(f"\n{Colors.PURPLE}{Colors.BOLD}📊 RAPPORT DE TEST SYSTÈME{Colors.NC}")
|
||||
print("=" * 60)
|
||||
|
||||
total_tests = len(results)
|
||||
passed_tests = sum(results.values())
|
||||
|
||||
for test_name, result in results.items():
|
||||
status = f"{Colors.GREEN}✓ RÉUSSI{Colors.NC}" if result else f"{Colors.RED}✗ ÉCHEC{Colors.NC}"
|
||||
print(f" {test_name:<40} {status}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print(f"Total des tests : {total_tests}")
|
||||
print(f"Tests réussis : {Colors.GREEN}{passed_tests}{Colors.NC}")
|
||||
print(f"Tests échoués : {Colors.RED}{total_tests - passed_tests}{Colors.NC}")
|
||||
|
||||
success_rate = (passed_tests / total_tests) * 100
|
||||
if success_rate >= 90:
|
||||
print(f"Taux de réussite : {Colors.GREEN}{success_rate:.1f}%{Colors.NC}")
|
||||
print(f"\n{Colors.GREEN}{Colors.BOLD}🎉 SYSTÈME PRÊT POUR LES TESTS !{Colors.NC}")
|
||||
elif success_rate >= 70:
|
||||
print(f"Taux de réussite : {Colors.YELLOW}{success_rate:.1f}%{Colors.NC}")
|
||||
print(f"\n{Colors.YELLOW}{Colors.BOLD}⚠ SYSTÈME PARTIELLEMENT PRÊT{Colors.NC}")
|
||||
else:
|
||||
print(f"Taux de réussite : {Colors.RED}{success_rate:.1f}%{Colors.NC}")
|
||||
print(f"\n{Colors.RED}{Colors.BOLD}❌ SYSTÈME NON PRÊT{Colors.NC}")
|
||||
|
||||
def main():
|
||||
"""Fonction principale du test système"""
|
||||
print_header()
|
||||
|
||||
# Dictionnaire pour stocker les résultats des tests
|
||||
test_results = {}
|
||||
|
||||
# Exécuter tous les tests
|
||||
test_results["Environnement de base"] = test_environment_setup()
|
||||
test_results["Scripts de lancement"] = test_scripts_availability()
|
||||
test_results["Structure du projet"] = test_project_structure()
|
||||
test_results["Dépendances"] = test_dependencies()
|
||||
test_results["Fichiers de configuration"] = test_configuration_files()
|
||||
test_results["Simulation de lancement"] = test_launch_simulation()
|
||||
test_results["Conformité documentation"] = test_documentation_compliance()
|
||||
|
||||
# Générer le rapport
|
||||
generate_test_report(test_results)
|
||||
|
||||
# Instructions finales
|
||||
print(f"\n{Colors.CYAN}{Colors.BOLD}📋 INSTRUCTIONS DE LANCEMENT{Colors.NC}")
|
||||
print("=" * 60)
|
||||
print(f"{Colors.GREEN}Pour lancer le système complet :{Colors.NC}")
|
||||
print(f" ./run.sh --full")
|
||||
print(f"\n{Colors.GREEN}Pour lancer seulement le Visual Workflow Builder :{Colors.NC}")
|
||||
print(f" cd visual_workflow_builder && ./start_full.sh")
|
||||
print(f"\n{Colors.GREEN}Pour lancer avec le script complet :{Colors.NC}")
|
||||
print(f" ./launch_all.sh")
|
||||
print(f"\n{Colors.YELLOW}Pour des tests rapides :{Colors.NC}")
|
||||
print(f" ./run.sh --test-quick")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user