Files
Geniusia_v2/test_summary_dashboard.py
2026-03-05 00:20:25 +01:00

143 lines
4.5 KiB
Python

#!/usr/bin/env python3
"""
Test simple du tableau de bord résumé
"""
import sys
from datetime import datetime
from PyQt5.QtWidgets import QApplication
# Ajouter le chemin du module
sys.path.insert(0, 'geniusia2')
from gui.dialogs.summary_dashboard import SummaryDashboard
def test_dashboard_basic():
"""Test basique du tableau de bord"""
print("Test du tableau de bord résumé")
print("=" * 50)
app = QApplication(sys.argv)
# Créer le tableau de bord
dashboard = SummaryDashboard()
print("✓ Tableau de bord créé")
# Ajouter des données de test
test_tasks = [
{
"task_id": "ouvrir_facture_001",
"task_name": "Ouvrir Facture",
"mode": "auto",
"confidence_score": 0.97,
"observation_count": 45,
"concordance_rate": 0.98,
"correction_count": 1,
"correction_rate": 0.022,
"last_execution": datetime.now().isoformat()
},
{
"task_id": "valider_commande_002",
"task_name": "Valider Commande",
"mode": "assist",
"confidence_score": 0.89,
"observation_count": 12,
"concordance_rate": 0.92,
"correction_count": 2,
"correction_rate": 0.167,
"last_execution": datetime.now().isoformat()
},
{
"task_id": "saisie_client_003",
"task_name": "Saisie Données Client",
"mode": "shadow",
"confidence_score": 0.65,
"observation_count": 3,
"concordance_rate": 0.67,
"correction_count": 0,
"correction_rate": 0.0,
"last_execution": datetime.now().isoformat()
},
{
"task_id": "export_rapport_004",
"task_name": "Export Rapport Mensuel",
"mode": "auto",
"confidence_score": 0.96,
"observation_count": 32,
"concordance_rate": 0.97,
"correction_count": 1,
"correction_rate": 0.031,
"last_execution": datetime.now().isoformat()
},
{
"task_id": "envoi_email_005",
"task_name": "Envoi Email Confirmation",
"mode": "assist",
"confidence_score": 0.88,
"observation_count": 8,
"concordance_rate": 0.88,
"correction_count": 1,
"correction_rate": 0.125,
"last_execution": datetime.now().isoformat()
}
]
print(f"✓ Ajout de {len(test_tasks)} tâches de test")
for task in test_tasks:
dashboard.update_metrics(task["task_id"], task)
print("✓ Métriques mises à jour")
# Vérifier que le tableau contient les bonnes données
assert dashboard.tasks_table.rowCount() == len(test_tasks), \
f"Nombre de lignes incorrect: {dashboard.tasks_table.rowCount()} != {len(test_tasks)}"
print(f"✓ Tableau contient {dashboard.tasks_table.rowCount()} lignes")
# Vérifier les statistiques globales
print("\nStatistiques globales:")
print(f" - {dashboard.total_tasks_label.text()}")
print(f" - {dashboard.shadow_tasks_label.text()}")
print(f" - {dashboard.assist_tasks_label.text()}")
print(f" - {dashboard.auto_tasks_label.text()}")
# Test du filtrage
print("\nTest du filtrage par mode...")
dashboard.mode_filter.setCurrentText("Autopilot")
filtered_count = dashboard.tasks_table.rowCount()
print(f"✓ Filtre Autopilot: {filtered_count} tâches")
dashboard.mode_filter.setCurrentText("Assisté")
filtered_count = dashboard.tasks_table.rowCount()
print(f"✓ Filtre Assisté: {filtered_count} tâches")
dashboard.mode_filter.setCurrentText("Shadow")
filtered_count = dashboard.tasks_table.rowCount()
print(f"✓ Filtre Shadow: {filtered_count} tâches")
dashboard.mode_filter.setCurrentText("Tous")
# Test de la recherche
print("\nTest de la recherche...")
dashboard.search_input.setText("facture")
search_count = dashboard.tasks_table.rowCount()
print(f"✓ Recherche 'facture': {search_count} tâche(s)")
dashboard.search_input.clear()
print("\n" + "=" * 50)
print("✓ Tous les tests basiques réussis!")
print("\nAffichage du tableau de bord...")
print("(Fermez la fenêtre pour terminer le test)")
dashboard.show()
return app.exec_()
if __name__ == "__main__":
sys.exit(test_dashboard_basic())