feat: Phase 1 - Système d'évaluation de la qualité
- Sélection et copie de 27 documents représentatifs (10 simples, 12 moyens, 5 complexes) - Outil d'annotation CLI complet (tools/annotation_tool.py) - Guide d'annotation détaillé (docs/annotation_guide.md) - Évaluateur de qualité (evaluation/quality_evaluator.py) * Calcul Précision, Rappel, F1-Score * Identification faux positifs/négatifs * Métriques par type de PII * Export JSON et rapports texte - Scanner de fuite (evaluation/leak_scanner.py) * Détection PII résiduels (CRITIQUE) * Détection nouveaux PII (HAUTE) * Scan métadonnées PDF (MOYENNE) - Benchmark de performance (evaluation/benchmark.py) * Mesure temps de traitement * Mesure CPU/RAM * Export JSON/CSV - Tests unitaires complets pour tous les composants - Documentation complète du module d'évaluation Tâches complétées: - 1.1.1 Sélection de 27 documents (au lieu de 30) - 1.1.2 Outil d'annotation CLI - 1.2.1 Évaluateur de qualité - 1.2.2 Scanner de fuite - 1.2.3 Benchmark de performance Prochaines étapes: - 1.1.3 Annotation des 27 documents (manuel) - 1.1.4 Enrichissement stopwords médicaux - 1.3 Mesure de la baseline
This commit is contained in:
79
tests/unit/test_benchmark.py
Normal file
79
tests/unit/test_benchmark.py
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests unitaires pour le benchmark.
|
||||
"""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from evaluation.benchmark import Benchmark, BenchmarkResult
|
||||
|
||||
|
||||
class TestBenchmark:
|
||||
"""Tests pour Benchmark."""
|
||||
|
||||
def test_get_system_info(self):
|
||||
"""Test de récupération des informations système."""
|
||||
benchmark = Benchmark(Path("tests/ground_truth"))
|
||||
|
||||
system_info = benchmark.get_system_info()
|
||||
|
||||
assert "os" in system_info
|
||||
assert "cpu" in system_info
|
||||
assert "ram_gb" in system_info
|
||||
assert "python_version" in system_info
|
||||
assert system_info["ram_gb"] > 0
|
||||
|
||||
def test_calculate_summary(self):
|
||||
"""Test de calcul du résumé."""
|
||||
benchmark = Benchmark(Path("tests/ground_truth"))
|
||||
|
||||
results = [
|
||||
BenchmarkResult(
|
||||
pdf_path="test1.pdf",
|
||||
processing_time_s=10.0,
|
||||
cpu_usage_percent=50.0,
|
||||
ram_usage_mb=100.0,
|
||||
pii_detected=10
|
||||
),
|
||||
BenchmarkResult(
|
||||
pdf_path="test2.pdf",
|
||||
processing_time_s=20.0,
|
||||
cpu_usage_percent=60.0,
|
||||
ram_usage_mb=200.0,
|
||||
pii_detected=20
|
||||
)
|
||||
]
|
||||
|
||||
summary = benchmark.calculate_summary(results)
|
||||
|
||||
assert summary["documents_count"] == 2
|
||||
assert summary["avg_time_per_doc"] == 15.0
|
||||
assert summary["min_time"] == 10.0
|
||||
assert summary["max_time"] == 20.0
|
||||
assert summary["avg_cpu_percent"] == 55.0
|
||||
assert summary["avg_ram_mb"] == 150.0
|
||||
assert summary["total_pii_detected"] == 30
|
||||
assert summary["avg_pii_per_doc"] == 15.0
|
||||
|
||||
def test_benchmark_result_to_dict(self):
|
||||
"""Test de conversion en dictionnaire."""
|
||||
result = BenchmarkResult(
|
||||
pdf_path="test.pdf",
|
||||
processing_time_s=12.345,
|
||||
time_per_page_s=4.115,
|
||||
cpu_usage_percent=67.89,
|
||||
ram_usage_mb=123.45,
|
||||
pii_detected=15
|
||||
)
|
||||
|
||||
data = result.to_dict()
|
||||
|
||||
assert data["pdf_path"] == "test.pdf"
|
||||
assert data["processing_time_s"] == 12.35 # Arrondi à 2 décimales
|
||||
assert data["time_per_page_s"] == 4.12
|
||||
assert data["cpu_usage_percent"] == 67.89
|
||||
assert data["ram_usage_mb"] == 123.45
|
||||
assert data["pii_detected"] == 15
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user