feat: Phase 1 - Système d'évaluation de la qualité

- Sélection et copie de 27 documents représentatifs (10 simples, 12 moyens, 5 complexes)
- Outil d'annotation CLI complet (tools/annotation_tool.py)
- Guide d'annotation détaillé (docs/annotation_guide.md)
- Évaluateur de qualité (evaluation/quality_evaluator.py)
  * Calcul Précision, Rappel, F1-Score
  * Identification faux positifs/négatifs
  * Métriques par type de PII
  * Export JSON et rapports texte
- Scanner de fuite (evaluation/leak_scanner.py)
  * Détection PII résiduels (CRITIQUE)
  * Détection nouveaux PII (HAUTE)
  * Scan métadonnées PDF (MOYENNE)
- Benchmark de performance (evaluation/benchmark.py)
  * Mesure temps de traitement
  * Mesure CPU/RAM
  * Export JSON/CSV
- Tests unitaires complets pour tous les composants
- Documentation complète du module d'évaluation

Tâches complétées:
- 1.1.1 Sélection de 27 documents (au lieu de 30)
- 1.1.2 Outil d'annotation CLI
- 1.2.1 Évaluateur de qualité
- 1.2.2 Scanner de fuite
- 1.2.3 Benchmark de performance

Prochaines étapes:
- 1.1.3 Annotation des 27 documents (manuel)
- 1.1.4 Enrichissement stopwords médicaux
- 1.3 Mesure de la baseline
This commit is contained in:
2026-03-02 10:07:41 +01:00
parent 0067738df6
commit 340348b820
86 changed files with 35587 additions and 40 deletions

View File

@@ -0,0 +1,79 @@
#!/usr/bin/env python3
"""
Tests unitaires pour le benchmark.
"""
import pytest
from pathlib import Path
from evaluation.benchmark import Benchmark, BenchmarkResult
class TestBenchmark:
"""Tests pour Benchmark."""
def test_get_system_info(self):
"""Test de récupération des informations système."""
benchmark = Benchmark(Path("tests/ground_truth"))
system_info = benchmark.get_system_info()
assert "os" in system_info
assert "cpu" in system_info
assert "ram_gb" in system_info
assert "python_version" in system_info
assert system_info["ram_gb"] > 0
def test_calculate_summary(self):
"""Test de calcul du résumé."""
benchmark = Benchmark(Path("tests/ground_truth"))
results = [
BenchmarkResult(
pdf_path="test1.pdf",
processing_time_s=10.0,
cpu_usage_percent=50.0,
ram_usage_mb=100.0,
pii_detected=10
),
BenchmarkResult(
pdf_path="test2.pdf",
processing_time_s=20.0,
cpu_usage_percent=60.0,
ram_usage_mb=200.0,
pii_detected=20
)
]
summary = benchmark.calculate_summary(results)
assert summary["documents_count"] == 2
assert summary["avg_time_per_doc"] == 15.0
assert summary["min_time"] == 10.0
assert summary["max_time"] == 20.0
assert summary["avg_cpu_percent"] == 55.0
assert summary["avg_ram_mb"] == 150.0
assert summary["total_pii_detected"] == 30
assert summary["avg_pii_per_doc"] == 15.0
def test_benchmark_result_to_dict(self):
"""Test de conversion en dictionnaire."""
result = BenchmarkResult(
pdf_path="test.pdf",
processing_time_s=12.345,
time_per_page_s=4.115,
cpu_usage_percent=67.89,
ram_usage_mb=123.45,
pii_detected=15
)
data = result.to_dict()
assert data["pdf_path"] == "test.pdf"
assert data["processing_time_s"] == 12.35 # Arrondi à 2 décimales
assert data["time_per_page_s"] == 4.12
assert data["cpu_usage_percent"] == 67.89
assert data["ram_usage_mb"] == 123.45
assert data["pii_detected"] == 15
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -0,0 +1,110 @@
#!/usr/bin/env python3
"""
Tests unitaires pour le scanner de fuite.
"""
import pytest
from pathlib import Path
from evaluation.leak_scanner import LeakScanner, LeakReport
class TestLeakScanner:
"""Tests pour LeakScanner."""
def test_scan_text_no_leak(self):
"""Test sans fuite."""
scanner = LeakScanner()
text = "Le patient a été examiné par le Dr. [NOM] le [DATE]."
original_pii = [
{"kind": "NOM", "original": "DUPONT"},
{"kind": "DATE", "original": "15/01/2024"}
]
leaks = scanner.scan_text(text, original_pii)
assert len(leaks) == 0
def test_scan_text_original_pii_present(self):
"""Test avec PII original présent."""
scanner = LeakScanner()
text = "Le patient DUPONT a été examiné le 15/01/2024."
original_pii = [
{"kind": "NOM", "original": "DUPONT"},
{"kind": "DATE", "original": "15/01/2024"}
]
leaks = scanner.scan_text(text, original_pii)
assert len(leaks) == 2
assert all(leak["severity"] == "CRITIQUE" for leak in leaks)
assert all(leak["type"] == "original_pii_present" for leak in leaks)
def test_scan_text_new_pii_detected(self):
"""Test avec nouveau PII détecté."""
scanner = LeakScanner()
text = "Contact: jean.dupont@example.com ou 01 23 45 67 89"
original_pii = []
leaks = scanner.scan_text(text, original_pii)
# Devrait détecter l'email et le téléphone
assert len(leaks) >= 2
email_leak = next((l for l in leaks if l["pii_type"] == "EMAIL"), None)
assert email_leak is not None
assert email_leak["severity"] == "HAUTE"
tel_leak = next((l for l in leaks if l["pii_type"] == "TEL"), None)
assert tel_leak is not None
assert tel_leak["severity"] == "HAUTE"
def test_leak_report_is_safe(self):
"""Test de rapport sûr."""
report = LeakReport(
is_safe=True,
leak_count=0,
leaks=[],
severity_counts={}
)
assert report.is_safe
assert report.leak_count == 0
def test_leak_report_not_safe(self):
"""Test de rapport non sûr."""
report = LeakReport(
is_safe=False,
leak_count=2,
leaks=[
{"severity": "CRITIQUE", "type": "original_pii_present"},
{"severity": "HAUTE", "type": "new_pii_detected"}
],
severity_counts={"CRITIQUE": 1, "HAUTE": 1}
)
assert not report.is_safe
assert report.leak_count == 2
assert report.severity_counts["CRITIQUE"] == 1
assert report.severity_counts["HAUTE"] == 1
def test_leak_report_to_dict(self):
"""Test de conversion en dictionnaire."""
report = LeakReport(
is_safe=False,
leak_count=1,
leaks=[{"severity": "CRITIQUE"}],
severity_counts={"CRITIQUE": 1}
)
data = report.to_dict()
assert data["is_safe"] is False
assert data["leak_count"] == 1
assert len(data["leaks"]) == 1
assert data["severity_counts"]["CRITIQUE"] == 1
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -0,0 +1,145 @@
#!/usr/bin/env python3
"""
Tests unitaires pour l'évaluateur de qualité.
"""
import pytest
from pathlib import Path
from evaluation.quality_evaluator import QualityEvaluator, EvaluationResult
class TestQualityEvaluator:
"""Tests pour QualityEvaluator."""
def test_normalize_text(self):
"""Test de normalisation de texte."""
evaluator = QualityEvaluator(Path("tests/ground_truth"))
assert evaluator.normalize_text("DUPONT") == "dupont"
assert evaluator.normalize_text(" DUPONT ") == "dupont"
assert evaluator.normalize_text("DUPONT\n\nMARTIN") == "dupont martin"
assert evaluator.normalize_text("Jean-Pierre") == "jean-pierre"
def test_types_match(self):
"""Test de correspondance des types."""
evaluator = QualityEvaluator(Path("tests/ground_truth"))
# Correspondance directe
assert evaluator.types_match("NOM", "NOM")
assert evaluator.types_match("NOM", "NOM_GLOBAL")
assert evaluator.types_match("TEL", "TEL_GLOBAL")
# Correspondance croisée
assert evaluator.types_match("NOM", "PRENOM")
assert evaluator.types_match("PRENOM", "NOM")
# Non correspondance
assert not evaluator.types_match("NOM", "TEL")
assert not evaluator.types_match("EMAIL", "ADRESSE")
def test_calculate_metrics(self):
"""Test de calcul des métriques."""
evaluator = QualityEvaluator(Path("tests/ground_truth"))
# Cas parfait
precision, recall, f1 = evaluator.calculate_metrics(10, 0, 0)
assert precision == 1.0
assert recall == 1.0
assert f1 == 1.0
# Cas avec erreurs
precision, recall, f1 = evaluator.calculate_metrics(8, 2, 2)
assert precision == 0.8 # 8 / (8 + 2)
assert recall == 0.8 # 8 / (8 + 2)
assert f1 == 0.8
# Cas zéro
precision, recall, f1 = evaluator.calculate_metrics(0, 0, 0)
assert precision == 0.0
assert recall == 0.0
assert f1 == 0.0
def test_compare_simple(self):
"""Test de comparaison simple."""
evaluator = QualityEvaluator(Path("tests/ground_truth"))
annotations = [
{"page": 0, "type": "NOM", "text": "DUPONT", "context": "Dr. DUPONT"},
{"page": 0, "type": "TEL", "text": "01 23 45 67 89", "context": "Tel: 01 23 45 67 89"}
]
detections = [
{"page": 0, "kind": "NOM", "original": "DUPONT"},
{"page": 0, "kind": "TEL", "original": "01 23 45 67 89"}
]
tp, fn, fp = evaluator.compare(annotations, detections)
assert len(tp) == 2
assert len(fn) == 0
assert len(fp) == 0
def test_compare_with_false_negative(self):
"""Test avec faux négatif."""
evaluator = QualityEvaluator(Path("tests/ground_truth"))
annotations = [
{"page": 0, "type": "NOM", "text": "DUPONT", "context": "Dr. DUPONT"},
{"page": 0, "type": "TEL", "text": "01 23 45 67 89", "context": "Tel: 01 23 45 67 89"}
]
detections = [
{"page": 0, "kind": "NOM", "original": "DUPONT"}
# TEL manquant
]
tp, fn, fp = evaluator.compare(annotations, detections)
assert len(tp) == 1
assert len(fn) == 1
assert len(fp) == 0
assert fn[0]["type"] == "TEL"
assert fn[0]["reason"] == "not_detected"
def test_compare_with_false_positive(self):
"""Test avec faux positif."""
evaluator = QualityEvaluator(Path("tests/ground_truth"))
annotations = [
{"page": 0, "type": "NOM", "text": "DUPONT", "context": "Dr. DUPONT"}
]
detections = [
{"page": 0, "kind": "NOM", "original": "DUPONT"},
{"page": 0, "kind": "NOM", "original": "MARTIN"} # Faux positif
]
tp, fn, fp = evaluator.compare(annotations, detections)
assert len(tp) == 1
assert len(fn) == 0
assert len(fp) == 1
assert fp[0]["text"] == "MARTIN"
def test_evaluation_result_to_dict(self):
"""Test de conversion en dictionnaire."""
result = EvaluationResult(
pdf_path="test.pdf",
true_positives=10,
false_positives=2,
false_negatives=1,
precision=0.8333,
recall=0.9091,
f1_score=0.8696
)
data = result.to_dict()
assert data["pdf_path"] == "test.pdf"
assert data["true_positives"] == 10
assert data["precision"] == 0.8333
assert data["recall"] == 0.9091
assert data["f1_score"] == 0.8696
if __name__ == "__main__":
pytest.main([__file__, "-v"])