#!/usr/bin/env python3 """ Tests unitaires pour le benchmark. """ import pytest from pathlib import Path from evaluation.benchmark import Benchmark, BenchmarkResult class TestBenchmark: """Tests pour Benchmark.""" def test_get_system_info(self): """Test de récupération des informations système.""" benchmark = Benchmark(Path("tests/ground_truth")) system_info = benchmark.get_system_info() assert "os" in system_info assert "cpu" in system_info assert "ram_gb" in system_info assert "python_version" in system_info assert system_info["ram_gb"] > 0 def test_calculate_summary(self): """Test de calcul du résumé.""" benchmark = Benchmark(Path("tests/ground_truth")) results = [ BenchmarkResult( pdf_path="test1.pdf", processing_time_s=10.0, cpu_usage_percent=50.0, ram_usage_mb=100.0, pii_detected=10 ), BenchmarkResult( pdf_path="test2.pdf", processing_time_s=20.0, cpu_usage_percent=60.0, ram_usage_mb=200.0, pii_detected=20 ) ] summary = benchmark.calculate_summary(results) assert summary["documents_count"] == 2 assert summary["avg_time_per_doc"] == 15.0 assert summary["min_time"] == 10.0 assert summary["max_time"] == 20.0 assert summary["avg_cpu_percent"] == 55.0 assert summary["avg_ram_mb"] == 150.0 assert summary["total_pii_detected"] == 30 assert summary["avg_pii_per_doc"] == 15.0 def test_benchmark_result_to_dict(self): """Test de conversion en dictionnaire.""" result = BenchmarkResult( pdf_path="test.pdf", processing_time_s=12.345, time_per_page_s=4.115, cpu_usage_percent=67.89, ram_usage_mb=123.45, pii_detected=15 ) data = result.to_dict() assert data["pdf_path"] == "test.pdf" assert data["processing_time_s"] == 12.35 # Arrondi à 2 décimales assert data["time_per_page_s"] == 4.12 assert data["cpu_usage_percent"] == 67.89 assert data["ram_usage_mb"] == 123.45 assert data["pii_detected"] == 15 if __name__ == "__main__": pytest.main([__file__, "-v"])