v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
401
visual_workflow_builder/test_performance.py
Normal file
401
visual_workflow_builder/test_performance.py
Normal file
@@ -0,0 +1,401 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script de Test de Performance - Visual Workflow Builder
|
||||
|
||||
Ce script teste les performances du Visual Workflow Builder avec
|
||||
différentes charges de travail pour valider les optimisations.
|
||||
"""
|
||||
|
||||
import time
|
||||
import json
|
||||
import requests
|
||||
import random
|
||||
import statistics
|
||||
from typing import List, Dict, Any
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# Configuration des tests
|
||||
BASE_URL = "http://localhost:5000"
|
||||
TEST_CONFIGS = [
|
||||
{"nodes": 10, "name": "Small Workflow"},
|
||||
{"nodes": 50, "name": "Medium Workflow"},
|
||||
{"nodes": 100, "name": "Large Workflow"},
|
||||
{"nodes": 200, "name": "Extra Large Workflow"},
|
||||
{"nodes": 500, "name": "Stress Test"},
|
||||
]
|
||||
|
||||
class PerformanceTester:
|
||||
def __init__(self):
|
||||
self.results = {}
|
||||
self.session = requests.Session()
|
||||
|
||||
def create_test_workflow(self, node_count: int) -> Dict[str, Any]:
|
||||
"""Créer un workflow de test avec le nombre spécifié de nodes"""
|
||||
|
||||
nodes = []
|
||||
edges = []
|
||||
|
||||
# Créer des nodes en grille
|
||||
grid_size = int(np.ceil(np.sqrt(node_count)))
|
||||
|
||||
for i in range(node_count):
|
||||
row = i // grid_size
|
||||
col = i % grid_size
|
||||
|
||||
node = {
|
||||
"id": f"node-{i}",
|
||||
"type": random.choice(["click", "type", "wait", "navigate"]),
|
||||
"position": {
|
||||
"x": col * 200 + random.randint(-20, 20),
|
||||
"y": row * 150 + random.randint(-20, 20)
|
||||
},
|
||||
"data": {
|
||||
"label": f"Node {i}",
|
||||
"parameters": {
|
||||
"target": f"#element-{i}",
|
||||
"timeout": 5000
|
||||
}
|
||||
}
|
||||
}
|
||||
nodes.append(node)
|
||||
|
||||
# Créer des connexions (pas toutes pour éviter la complexité excessive)
|
||||
if i > 0 and random.random() < 0.7: # 70% de chance de connexion
|
||||
source_id = f"node-{random.randint(0, i-1)}"
|
||||
edge = {
|
||||
"id": f"edge-{len(edges)}",
|
||||
"source": source_id,
|
||||
"target": f"node-{i}",
|
||||
"type": "default"
|
||||
}
|
||||
edges.append(edge)
|
||||
|
||||
return {
|
||||
"id": f"test-workflow-{node_count}",
|
||||
"name": f"Test Workflow {node_count} nodes",
|
||||
"description": f"Workflow de test avec {node_count} nodes",
|
||||
"nodes": nodes,
|
||||
"edges": edges,
|
||||
"variables": [
|
||||
{"name": "test_var", "value": "test_value", "type": "string"}
|
||||
]
|
||||
}
|
||||
|
||||
def test_workflow_creation(self, node_count: int, iterations: int = 5) -> Dict[str, float]:
|
||||
"""Tester la création de workflow"""
|
||||
|
||||
times = []
|
||||
|
||||
for i in range(iterations):
|
||||
workflow = self.create_test_workflow(node_count)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
response = self.session.post(
|
||||
f"{BASE_URL}/api/workflows",
|
||||
json=workflow,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
end_time = time.time()
|
||||
times.append((end_time - start_time) * 1000) # en ms
|
||||
else:
|
||||
print(f"Erreur création workflow: {response.status_code}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Erreur lors de la création: {e}")
|
||||
|
||||
if times:
|
||||
return {
|
||||
"avg_time": statistics.mean(times),
|
||||
"min_time": min(times),
|
||||
"max_time": max(times),
|
||||
"std_dev": statistics.stdev(times) if len(times) > 1 else 0,
|
||||
"success_rate": len(times) / iterations * 100
|
||||
}
|
||||
else:
|
||||
return {"avg_time": 0, "min_time": 0, "max_time": 0, "std_dev": 0, "success_rate": 0}
|
||||
|
||||
def test_workflow_loading(self, workflow_id: str, iterations: int = 10) -> Dict[str, float]:
|
||||
"""Tester le chargement de workflow"""
|
||||
|
||||
times = []
|
||||
|
||||
for i in range(iterations):
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{BASE_URL}/api/workflows/{workflow_id}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
end_time = time.time()
|
||||
times.append((end_time - start_time) * 1000) # en ms
|
||||
else:
|
||||
print(f"Erreur chargement workflow: {response.status_code}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Erreur lors du chargement: {e}")
|
||||
|
||||
if times:
|
||||
return {
|
||||
"avg_time": statistics.mean(times),
|
||||
"min_time": min(times),
|
||||
"max_time": max(times),
|
||||
"std_dev": statistics.stdev(times) if len(times) > 1 else 0,
|
||||
"success_rate": len(times) / iterations * 100
|
||||
}
|
||||
else:
|
||||
return {"avg_time": 0, "min_time": 0, "max_time": 0, "std_dev": 0, "success_rate": 0}
|
||||
|
||||
def test_workflow_validation(self, workflow: Dict[str, Any], iterations: int = 5) -> Dict[str, float]:
|
||||
"""Tester la validation de workflow"""
|
||||
|
||||
times = []
|
||||
|
||||
for i in range(iterations):
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
response = self.session.post(
|
||||
f"{BASE_URL}/api/workflows/validate",
|
||||
json=workflow,
|
||||
timeout=15
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
end_time = time.time()
|
||||
times.append((end_time - start_time) * 1000) # en ms
|
||||
else:
|
||||
print(f"Erreur validation workflow: {response.status_code}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Erreur lors de la validation: {e}")
|
||||
|
||||
if times:
|
||||
return {
|
||||
"avg_time": statistics.mean(times),
|
||||
"min_time": min(times),
|
||||
"max_time": max(times),
|
||||
"std_dev": statistics.stdev(times) if len(times) > 1 else 0,
|
||||
"success_rate": len(times) / iterations * 100
|
||||
}
|
||||
else:
|
||||
return {"avg_time": 0, "min_time": 0, "max_time": 0, "std_dev": 0, "success_rate": 0}
|
||||
|
||||
def test_concurrent_operations(self, node_count: int, concurrent_users: int = 5) -> Dict[str, Any]:
|
||||
"""Tester les opérations concurrentes"""
|
||||
|
||||
def create_workflow():
|
||||
workflow = self.create_test_workflow(node_count)
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
response = self.session.post(
|
||||
f"{BASE_URL}/api/workflows",
|
||||
json=workflow,
|
||||
timeout=30
|
||||
)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"success": response.status_code == 201,
|
||||
"time": (end_time - start_time) * 1000,
|
||||
"status_code": response.status_code
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"time": 0,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Exécuter les opérations en parallèle
|
||||
with ThreadPoolExecutor(max_workers=concurrent_users) as executor:
|
||||
futures = [executor.submit(create_workflow) for _ in range(concurrent_users)]
|
||||
results = [future.result() for future in futures]
|
||||
|
||||
successful_results = [r for r in results if r["success"]]
|
||||
|
||||
if successful_results:
|
||||
times = [r["time"] for r in successful_results]
|
||||
return {
|
||||
"success_rate": len(successful_results) / len(results) * 100,
|
||||
"avg_time": statistics.mean(times),
|
||||
"max_time": max(times),
|
||||
"min_time": min(times),
|
||||
"concurrent_users": concurrent_users,
|
||||
"total_operations": len(results)
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success_rate": 0,
|
||||
"avg_time": 0,
|
||||
"max_time": 0,
|
||||
"min_time": 0,
|
||||
"concurrent_users": concurrent_users,
|
||||
"total_operations": len(results)
|
||||
}
|
||||
|
||||
def run_performance_tests(self):
|
||||
"""Exécuter tous les tests de performance"""
|
||||
|
||||
print("🚀 Démarrage des tests de performance...")
|
||||
print("=" * 60)
|
||||
|
||||
for config in TEST_CONFIGS:
|
||||
node_count = config["nodes"]
|
||||
name = config["name"]
|
||||
|
||||
print(f"\n📊 Test: {name} ({node_count} nodes)")
|
||||
print("-" * 40)
|
||||
|
||||
# Test de création
|
||||
print(" • Test de création...")
|
||||
creation_results = self.test_workflow_creation(node_count)
|
||||
|
||||
# Créer un workflow pour les autres tests
|
||||
workflow = self.create_test_workflow(node_count)
|
||||
|
||||
# Test de validation
|
||||
print(" • Test de validation...")
|
||||
validation_results = self.test_workflow_validation(workflow)
|
||||
|
||||
# Test concurrent (seulement pour les petits workflows)
|
||||
concurrent_results = None
|
||||
if node_count <= 100:
|
||||
print(" • Test concurrent...")
|
||||
concurrent_results = self.test_concurrent_operations(node_count)
|
||||
|
||||
# Stocker les résultats
|
||||
self.results[name] = {
|
||||
"node_count": node_count,
|
||||
"creation": creation_results,
|
||||
"validation": validation_results,
|
||||
"concurrent": concurrent_results
|
||||
}
|
||||
|
||||
# Afficher les résultats
|
||||
print(f" Création: {creation_results['avg_time']:.1f}ms (±{creation_results['std_dev']:.1f})")
|
||||
print(f" Validation: {validation_results['avg_time']:.1f}ms (±{validation_results['std_dev']:.1f})")
|
||||
if concurrent_results:
|
||||
print(f" Concurrent: {concurrent_results['avg_time']:.1f}ms ({concurrent_results['success_rate']:.0f}% succès)")
|
||||
|
||||
def generate_report(self):
|
||||
"""Générer un rapport de performance"""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("📈 RAPPORT DE PERFORMANCE")
|
||||
print("=" * 60)
|
||||
|
||||
# Tableau de résultats
|
||||
print(f"{'Test':<20} {'Nodes':<8} {'Création':<12} {'Validation':<12} {'Concurrent':<12}")
|
||||
print("-" * 70)
|
||||
|
||||
for name, results in self.results.items():
|
||||
creation_time = f"{results['creation']['avg_time']:.1f}ms"
|
||||
validation_time = f"{results['validation']['avg_time']:.1f}ms"
|
||||
concurrent_time = f"{results['concurrent']['avg_time']:.1f}ms" if results['concurrent'] else "N/A"
|
||||
|
||||
print(f"{name:<20} {results['node_count']:<8} {creation_time:<12} {validation_time:<12} {concurrent_time:<12}")
|
||||
|
||||
# Analyse des performances
|
||||
print("\n🎯 ANALYSE DES PERFORMANCES")
|
||||
print("-" * 30)
|
||||
|
||||
# Objectifs de performance
|
||||
objectives = {
|
||||
"creation_100_nodes": 100, # ms
|
||||
"validation_100_nodes": 50, # ms
|
||||
"concurrent_success_rate": 95, # %
|
||||
}
|
||||
|
||||
# Vérifier les objectifs
|
||||
for name, results in self.results.items():
|
||||
if results["node_count"] == 100:
|
||||
creation_time = results["creation"]["avg_time"]
|
||||
validation_time = results["validation"]["avg_time"]
|
||||
|
||||
creation_status = "✅" if creation_time <= objectives["creation_100_nodes"] else "❌"
|
||||
validation_status = "✅" if validation_time <= objectives["validation_100_nodes"] else "❌"
|
||||
|
||||
print(f"Création 100 nodes: {creation_time:.1f}ms {creation_status} (objectif: {objectives['creation_100_nodes']}ms)")
|
||||
print(f"Validation 100 nodes: {validation_time:.1f}ms {validation_status} (objectif: {objectives['validation_100_nodes']}ms)")
|
||||
|
||||
if results["concurrent"]:
|
||||
concurrent_rate = results["concurrent"]["success_rate"]
|
||||
concurrent_status = "✅" if concurrent_rate >= objectives["concurrent_success_rate"] else "❌"
|
||||
print(f"Taux de succès concurrent: {concurrent_rate:.1f}% {concurrent_status} (objectif: {objectives['concurrent_success_rate']}%)")
|
||||
|
||||
# Recommandations
|
||||
print("\n💡 RECOMMANDATIONS")
|
||||
print("-" * 20)
|
||||
|
||||
max_creation_time = max(r["creation"]["avg_time"] for r in self.results.values())
|
||||
max_validation_time = max(r["validation"]["avg_time"] for r in self.results.values())
|
||||
|
||||
if max_creation_time > 200:
|
||||
print("• Optimiser la création de workflows (temps > 200ms détecté)")
|
||||
|
||||
if max_validation_time > 100:
|
||||
print("• Optimiser la validation de workflows (temps > 100ms détecté)")
|
||||
|
||||
# Vérifier la scalabilité
|
||||
creation_times = [(r["node_count"], r["creation"]["avg_time"]) for r in self.results.values()]
|
||||
creation_times.sort()
|
||||
|
||||
if len(creation_times) >= 2:
|
||||
# Calculer la croissance
|
||||
small_time = creation_times[0][1]
|
||||
large_time = creation_times[-1][1]
|
||||
growth_factor = large_time / small_time if small_time > 0 else 0
|
||||
|
||||
if growth_factor > 10:
|
||||
print(f"• Scalabilité limitée détectée (facteur de croissance: {growth_factor:.1f}x)")
|
||||
else:
|
||||
print(f"• Bonne scalabilité (facteur de croissance: {growth_factor:.1f}x)")
|
||||
|
||||
def save_results(self, filename: str = "performance_results.json"):
|
||||
"""Sauvegarder les résultats en JSON"""
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
json.dump(self.results, f, indent=2)
|
||||
|
||||
print(f"\n💾 Résultats sauvegardés dans {filename}")
|
||||
|
||||
def main():
|
||||
"""Fonction principale"""
|
||||
|
||||
print("🎯 Visual Workflow Builder - Tests de Performance")
|
||||
print("=" * 60)
|
||||
|
||||
# Vérifier que le serveur est accessible
|
||||
try:
|
||||
response = requests.get(f"{BASE_URL}/api/health", timeout=5)
|
||||
if response.status_code != 200:
|
||||
print(f"❌ Serveur non accessible: {BASE_URL}")
|
||||
print(" Assurez-vous que le backend est démarré avec: ./start.sh")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"❌ Impossible de contacter le serveur: {e}")
|
||||
print(" Assurez-vous que le backend est démarré avec: ./start.sh")
|
||||
return
|
||||
|
||||
print(f"✅ Serveur accessible: {BASE_URL}")
|
||||
|
||||
# Exécuter les tests
|
||||
tester = PerformanceTester()
|
||||
tester.run_performance_tests()
|
||||
tester.generate_report()
|
||||
tester.save_results()
|
||||
|
||||
print("\n🎉 Tests de performance terminés!")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user