- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
381 lines
14 KiB
Python
381 lines
14 KiB
Python
"""
|
|
Tests de Performance Complets
|
|
|
|
Valide les contraintes de temps pour toutes les opérations critiques:
|
|
- Property 19: Performance Constraint - State Embedding (<100ms)
|
|
- Property 20: Performance Constraint - End-to-End (<500ms)
|
|
|
|
Benchmarks:
|
|
- Fusion d'embeddings
|
|
- Recherche FAISS
|
|
- Détection UI
|
|
- Construction de workflow
|
|
- Matching de nodes
|
|
- Exécution d'actions
|
|
"""
|
|
|
|
import pytest
|
|
import numpy as np
|
|
import time
|
|
from pathlib import Path
|
|
import tempfile
|
|
import shutil
|
|
from typing import List, Dict, Any
|
|
|
|
from core.embedding.fusion_engine import FusionEngine
|
|
from core.embedding.faiss_manager import FAISSManager
|
|
from core.embedding.embedding_cache import EmbeddingCache
|
|
from core.detection.roi_optimizer import ROIOptimizer
|
|
|
|
|
|
class TestPerformanceBenchmarks:
|
|
"""Tests de performance pour valider les contraintes de temps"""
|
|
|
|
def setup_method(self):
|
|
"""Setup avant chaque test"""
|
|
self.temp_dir = Path(tempfile.mkdtemp())
|
|
|
|
# Créer les composants
|
|
self.fusion_engine = FusionEngine()
|
|
self.faiss_manager = FAISSManager(dimensions=512, index_type="Flat")
|
|
self.embedding_cache = EmbeddingCache(max_size=100)
|
|
self.roi_optimizer = ROIOptimizer()
|
|
|
|
def teardown_method(self):
|
|
"""Cleanup après chaque test"""
|
|
if self.temp_dir.exists():
|
|
shutil.rmtree(self.temp_dir)
|
|
|
|
def _measure_time(self, func, *args, **kwargs) -> float:
|
|
"""Mesurer le temps d'exécution d'une fonction"""
|
|
start = time.perf_counter()
|
|
func(*args, **kwargs)
|
|
end = time.perf_counter()
|
|
return (end - start) * 1000 # Convertir en ms
|
|
|
|
def test_property_19_fusion_performance(self):
|
|
"""
|
|
Property 19: Performance Constraint - State Embedding
|
|
|
|
La fusion d'embeddings doit prendre moins de 100ms
|
|
"""
|
|
# Créer des embeddings de test
|
|
image_emb = np.random.randn(512).astype(np.float32)
|
|
text_emb = np.random.randn(512).astype(np.float32)
|
|
title_emb = np.random.randn(512).astype(np.float32)
|
|
ui_emb = np.random.randn(512).astype(np.float32)
|
|
|
|
# Mesurer le temps de fusion
|
|
times = []
|
|
for _ in range(10): # 10 itérations pour moyenne
|
|
start = time.perf_counter()
|
|
fused = self.fusion_engine.fuse({
|
|
"image": image_emb,
|
|
"text": text_emb,
|
|
"title": title_emb,
|
|
"ui": ui_emb
|
|
})
|
|
end = time.perf_counter()
|
|
times.append((end - start) * 1000)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
max_time = max(times)
|
|
|
|
print(f"\n Fusion Performance:")
|
|
print(f" Average: {avg_time:.2f}ms")
|
|
print(f" Max: {max_time:.2f}ms")
|
|
print(f" Target: <100ms")
|
|
|
|
# Valider la contrainte
|
|
assert avg_time < 100, f"Fusion too slow: {avg_time:.2f}ms (target: <100ms)"
|
|
assert max_time < 150, f"Fusion max too slow: {max_time:.2f}ms (target: <150ms)"
|
|
|
|
def test_faiss_search_performance_small(self):
|
|
"""Test performance de recherche FAISS sur petit index (<10k)"""
|
|
# Ajouter 1000 embeddings
|
|
for i in range(1000):
|
|
vector = np.random.randn(512).astype(np.float32)
|
|
self.faiss_manager.add_embedding(f"emb_{i}", vector)
|
|
|
|
# Mesurer le temps de recherche
|
|
query = np.random.randn(512).astype(np.float32)
|
|
times = []
|
|
for _ in range(10):
|
|
start = time.perf_counter()
|
|
results = self.faiss_manager.search_similar(query, k=10)
|
|
end = time.perf_counter()
|
|
times.append((end - start) * 1000)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
|
|
print(f"\n FAISS Search (1k vectors):")
|
|
print(f" Average: {avg_time:.2f}ms")
|
|
print(f" Target: <50ms")
|
|
|
|
assert avg_time < 50, f"FAISS search too slow: {avg_time:.2f}ms"
|
|
|
|
def test_faiss_search_performance_medium(self):
|
|
"""Test performance de recherche FAISS sur index moyen (10k)"""
|
|
# Créer un index IVF pour 10k embeddings
|
|
faiss_ivf = FAISSManager(
|
|
dimensions=512,
|
|
index_type="IVF",
|
|
nlist=100,
|
|
nprobe=8
|
|
)
|
|
|
|
# Ajouter 10000 embeddings
|
|
print("\n Adding 10k embeddings...")
|
|
for i in range(10000):
|
|
vector = np.random.randn(512).astype(np.float32)
|
|
faiss_ivf.add_embedding(f"emb_{i}", vector)
|
|
|
|
# Mesurer le temps de recherche
|
|
query = np.random.randn(512).astype(np.float32)
|
|
times = []
|
|
for _ in range(10):
|
|
start = time.perf_counter()
|
|
results = faiss_ivf.search_similar(query, k=10)
|
|
end = time.perf_counter()
|
|
times.append((end - start) * 1000)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
|
|
print(f" FAISS Search (10k vectors, IVF):")
|
|
print(f" Average: {avg_time:.2f}ms")
|
|
print(f" Target: <50ms")
|
|
|
|
assert avg_time < 50, f"FAISS IVF search too slow: {avg_time:.2f}ms"
|
|
|
|
def test_embedding_cache_performance(self):
|
|
"""Test performance du cache d'embeddings"""
|
|
# Ajouter des embeddings au cache
|
|
for i in range(100):
|
|
vector = np.random.randn(512).astype(np.float32)
|
|
self.embedding_cache.put(f"emb_{i}", vector)
|
|
|
|
# Mesurer le temps de récupération (cache hit)
|
|
times_hit = []
|
|
for i in range(50):
|
|
start = time.perf_counter()
|
|
vector = self.embedding_cache.get(f"emb_{i}")
|
|
end = time.perf_counter()
|
|
times_hit.append((end - start) * 1000)
|
|
|
|
avg_hit = sum(times_hit) / len(times_hit)
|
|
|
|
# Mesurer le temps de récupération (cache miss)
|
|
times_miss = []
|
|
for i in range(100, 150):
|
|
start = time.perf_counter()
|
|
vector = self.embedding_cache.get(f"emb_{i}")
|
|
end = time.perf_counter()
|
|
times_miss.append((end - start) * 1000)
|
|
|
|
avg_miss = sum(times_miss) / len(times_miss)
|
|
|
|
print(f"\n Cache Performance:")
|
|
print(f" Hit: {avg_hit:.4f}ms")
|
|
print(f" Miss: {avg_miss:.4f}ms")
|
|
print(f" Target: <1ms")
|
|
|
|
assert avg_hit < 1.0, f"Cache hit too slow: {avg_hit:.4f}ms"
|
|
assert avg_miss < 1.0, f"Cache miss too slow: {avg_miss:.4f}ms"
|
|
|
|
def test_roi_optimization_performance(self):
|
|
"""Test performance de l'optimisation ROI"""
|
|
# Créer une image de test
|
|
import cv2
|
|
image = np.ones((2560, 1440, 3), dtype=np.uint8) * 255
|
|
|
|
# Ajouter quelques formes
|
|
cv2.rectangle(image, (100, 100), (300, 200), (0, 0, 255), -1)
|
|
cv2.rectangle(image, (500, 100), (700, 200), (0, 255, 0), -1)
|
|
|
|
# Sauvegarder
|
|
image_path = self.temp_dir / "test_2560x1440.png"
|
|
cv2.imwrite(str(image_path), image)
|
|
|
|
# Mesurer le temps d'optimisation
|
|
times = []
|
|
for _ in range(5):
|
|
start = time.perf_counter()
|
|
optimized = self.roi_optimizer.optimize_frame(str(image_path))
|
|
end = time.perf_counter()
|
|
times.append((end - start) * 1000)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
|
|
print(f"\n ROI Optimization (2560x1440):")
|
|
print(f" Average: {avg_time:.2f}ms")
|
|
print(f" Target: <100ms")
|
|
|
|
assert avg_time < 100, f"ROI optimization too slow: {avg_time:.2f}ms"
|
|
|
|
def test_property_20_end_to_end_performance(self):
|
|
"""
|
|
Property 20: Performance Constraint - End-to-End
|
|
|
|
Le pipeline complet (fusion + recherche) doit prendre moins de 500ms
|
|
"""
|
|
# Préparer l'index FAISS avec 1000 embeddings
|
|
for i in range(1000):
|
|
vector = np.random.randn(512).astype(np.float32)
|
|
self.faiss_manager.add_embedding(f"emb_{i}", vector)
|
|
|
|
# Mesurer le temps end-to-end
|
|
times = []
|
|
for _ in range(10):
|
|
start = time.perf_counter()
|
|
|
|
# 1. Fusion d'embeddings
|
|
image_emb = np.random.randn(512).astype(np.float32)
|
|
text_emb = np.random.randn(512).astype(np.float32)
|
|
title_emb = np.random.randn(512).astype(np.float32)
|
|
ui_emb = np.random.randn(512).astype(np.float32)
|
|
|
|
fused = self.fusion_engine.fuse({
|
|
"image": image_emb,
|
|
"text": text_emb,
|
|
"title": title_emb,
|
|
"ui": ui_emb
|
|
})
|
|
|
|
# 2. Recherche FAISS
|
|
results = self.faiss_manager.search_similar(fused, k=10)
|
|
|
|
end = time.perf_counter()
|
|
times.append((end - start) * 1000)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
max_time = max(times)
|
|
|
|
print(f"\n End-to-End Performance:")
|
|
print(f" Average: {avg_time:.2f}ms")
|
|
print(f" Max: {max_time:.2f}ms")
|
|
print(f" Target: <500ms")
|
|
|
|
# Valider la contrainte
|
|
assert avg_time < 500, f"End-to-end too slow: {avg_time:.2f}ms (target: <500ms)"
|
|
assert max_time < 750, f"End-to-end max too slow: {max_time:.2f}ms (target: <750ms)"
|
|
|
|
def test_batch_processing_performance(self):
|
|
"""Test performance du traitement par batch"""
|
|
# Créer 10 embeddings
|
|
embeddings = [np.random.randn(512).astype(np.float32) for _ in range(10)]
|
|
|
|
# Mesurer le temps de traitement séquentiel
|
|
start = time.perf_counter()
|
|
for emb in embeddings:
|
|
fused = self.fusion_engine.fuse({
|
|
"image": emb,
|
|
"text": emb,
|
|
"title": emb,
|
|
"ui": emb
|
|
})
|
|
end = time.perf_counter()
|
|
sequential_time = (end - start) * 1000
|
|
|
|
print(f"\n Batch Processing (10 embeddings):")
|
|
print(f" Sequential: {sequential_time:.2f}ms")
|
|
print(f" Per item: {sequential_time/10:.2f}ms")
|
|
print(f" Target per item: <100ms")
|
|
|
|
assert sequential_time / 10 < 100, f"Batch processing too slow: {sequential_time/10:.2f}ms per item"
|
|
|
|
def test_memory_usage(self):
|
|
"""Test utilisation mémoire"""
|
|
import psutil
|
|
import os
|
|
|
|
process = psutil.Process(os.getpid())
|
|
|
|
# Mémoire avant
|
|
mem_before = process.memory_info().rss / 1024 / 1024 # MB
|
|
|
|
# Ajouter 10000 embeddings
|
|
for i in range(10000):
|
|
vector = np.random.randn(512).astype(np.float32)
|
|
self.faiss_manager.add_embedding(f"emb_{i}", vector)
|
|
|
|
# Mémoire après
|
|
mem_after = process.memory_info().rss / 1024 / 1024 # MB
|
|
mem_used = mem_after - mem_before
|
|
|
|
print(f"\n Memory Usage (10k embeddings):")
|
|
print(f" Before: {mem_before:.1f} MB")
|
|
print(f" After: {mem_after:.1f} MB")
|
|
print(f" Used: {mem_used:.1f} MB")
|
|
print(f" Per embedding: {mem_used/10000*1024:.2f} KB")
|
|
|
|
# Vérifier que l'utilisation mémoire est raisonnable
|
|
# 512 dimensions * 4 bytes (float32) = 2KB par embedding
|
|
# + overhead FAISS + métadonnées
|
|
# Attendu: ~3-5 KB par embedding
|
|
assert mem_used / 10000 < 10, f"Memory usage too high: {mem_used/10000:.2f} MB per embedding"
|
|
|
|
|
|
class TestPerformanceRegression:
|
|
"""Tests de régression de performance"""
|
|
|
|
def test_no_performance_regression_fusion(self):
|
|
"""Vérifier qu'il n'y a pas de régression sur la fusion"""
|
|
fusion_engine = FusionEngine()
|
|
|
|
# Baseline: fusion doit être <100ms
|
|
image_emb = np.random.randn(512).astype(np.float32)
|
|
text_emb = np.random.randn(512).astype(np.float32)
|
|
|
|
times = []
|
|
for _ in range(100):
|
|
start = time.perf_counter()
|
|
fused = fusion_engine.fuse({
|
|
"image": image_emb,
|
|
"text": text_emb
|
|
})
|
|
end = time.perf_counter()
|
|
times.append((end - start) * 1000)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
p95_time = sorted(times)[int(len(times) * 0.95)]
|
|
|
|
print(f"\n Fusion Regression Test:")
|
|
print(f" Average: {avg_time:.2f}ms")
|
|
print(f" P95: {p95_time:.2f}ms")
|
|
|
|
assert avg_time < 100, f"Performance regression detected: {avg_time:.2f}ms"
|
|
assert p95_time < 150, f"P95 regression detected: {p95_time:.2f}ms"
|
|
|
|
def test_no_performance_regression_faiss(self):
|
|
"""Vérifier qu'il n'y a pas de régression sur FAISS"""
|
|
faiss_manager = FAISSManager(dimensions=512, index_type="Flat")
|
|
|
|
# Ajouter 1000 embeddings
|
|
for i in range(1000):
|
|
vector = np.random.randn(512).astype(np.float32)
|
|
faiss_manager.add_embedding(f"emb_{i}", vector)
|
|
|
|
# Baseline: recherche doit être <50ms
|
|
query = np.random.randn(512).astype(np.float32)
|
|
|
|
times = []
|
|
for _ in range(100):
|
|
start = time.perf_counter()
|
|
results = faiss_manager.search_similar(query, k=10)
|
|
end = time.perf_counter()
|
|
times.append((end - start) * 1000)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
p95_time = sorted(times)[int(len(times) * 0.95)]
|
|
|
|
print(f"\n FAISS Regression Test:")
|
|
print(f" Average: {avg_time:.2f}ms")
|
|
print(f" P95: {p95_time:.2f}ms")
|
|
|
|
assert avg_time < 50, f"Performance regression detected: {avg_time:.2f}ms"
|
|
assert p95_time < 75, f"P95 regression detected: {p95_time:.2f}ms"
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pytest.main([__file__, "-v", "-s"])
|