feat: pipeline complet MACRO/MÉSO/MICRO — Critic, Observer, Policy, Recovery, Learning, Audit Trail, TaskPlanner
Architecture 3 niveaux implémentée et testée (137 tests unitaires + 21 visuels) : MÉSO (acteur intelligent) : - P0 Critic : vérification sémantique post-action via gemma4 (replay_verifier.py) - P1 Observer : pré-analyse écran avant chaque action (api_stream.py /pre_analyze) - P2 Grounding/Policy : séparation localisation (grounding.py) et décision (policy.py) - P3 Recovery : rollback automatique Ctrl+Z/Escape/Alt+F4 (recovery.py) - P4 Learning : apprentissage runtime avec boucle de consolidation (replay_learner.py) MACRO (planificateur) : - TaskPlanner : comprend les ordres en langage naturel via gemma4 (task_planner.py) - Contexte métier TIM/CIM-10 pour les hôpitaux (domain_context.py) - Endpoint POST /api/v1/task pour l'exécution par instruction Traçabilité : - Audit trail complet avec 18 champs par action (audit_trail.py) - Endpoints GET /audit/history, /audit/summary, /audit/export (CSV) Grounding : - Fix parsing bbox_2d qwen2.5vl (pixels relatifs, pas grille 1000x1000) - Benchmarks visuels sur captures réelles (3 approches : baseline, zoom, Citrix) - Reproductibilité validée : variance < 0.008 sur 10 itérations Sécurité : - Tokens de production retirés du code source → .env.local - Secret key aléatoire si non configuré - Suppression logs qui leakent les tokens Résultats : 80% de replay (vs 12.5% avant), 100% détection visuelle Citrix JPEG Q20 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
683
tests/unit/test_audit_trail.py
Normal file
683
tests/unit/test_audit_trail.py
Normal file
@@ -0,0 +1,683 @@
|
||||
# tests/unit/test_audit_trail.py
|
||||
"""
|
||||
Tests unitaires du module Audit Trail.
|
||||
|
||||
Vérifie l'enregistrement, la recherche, l'export CSV et le résumé
|
||||
journalier des entrées d'audit.
|
||||
"""
|
||||
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import date, datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
# Importer depuis le bon chemin (agent_v0/server_v1/)
|
||||
import sys
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
|
||||
|
||||
from agent_v0.server_v1.audit_trail import AuditEntry, AuditTrail
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
@pytest.fixture
|
||||
def audit_dir(tmp_path):
|
||||
"""Répertoire temporaire pour les fichiers d'audit."""
|
||||
d = tmp_path / "audit"
|
||||
d.mkdir()
|
||||
return str(d)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def audit(audit_dir):
|
||||
"""Instance AuditTrail avec répertoire temporaire."""
|
||||
return AuditTrail(audit_dir=audit_dir)
|
||||
|
||||
|
||||
def _make_entry(**kwargs) -> AuditEntry:
|
||||
"""Créer une entrée d'audit avec des valeurs par défaut."""
|
||||
defaults = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"session_id": "sess_test_001",
|
||||
"action_id": "act_001",
|
||||
"user_id": "tim_dupont",
|
||||
"user_name": "Marie Dupont",
|
||||
"machine_id": "PC-TIM-01",
|
||||
"action_type": "click",
|
||||
"action_detail": "Clic sur 'Enregistrer' dans DxCare",
|
||||
"target_app": "DxCare",
|
||||
"execution_mode": "assisted",
|
||||
"result": "success",
|
||||
"resolution_method": "som_text_match",
|
||||
"critic_result": "semantic_ok",
|
||||
"recovery_action": "",
|
||||
"domain": "tim_codage",
|
||||
"workflow_id": "wf_codage_cim10",
|
||||
"workflow_name": "Codage CIM-10 séjour",
|
||||
"duration_ms": 234.5,
|
||||
}
|
||||
defaults.update(kwargs)
|
||||
return AuditEntry(**defaults)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditEntry
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditEntry:
|
||||
"""Tests de la structure AuditEntry."""
|
||||
|
||||
def test_creation_basique(self):
|
||||
"""Créer une entrée avec tous les champs."""
|
||||
entry = _make_entry()
|
||||
assert entry.user_id == "tim_dupont"
|
||||
assert entry.action_type == "click"
|
||||
assert entry.result == "success"
|
||||
assert entry.duration_ms == 234.5
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Sérialiser en dictionnaire."""
|
||||
entry = _make_entry()
|
||||
d = entry.to_dict()
|
||||
assert isinstance(d, dict)
|
||||
assert d["user_id"] == "tim_dupont"
|
||||
assert d["domain"] == "tim_codage"
|
||||
assert d["duration_ms"] == 234.5
|
||||
|
||||
def test_from_dict(self):
|
||||
"""Désérialiser depuis un dictionnaire."""
|
||||
entry = _make_entry()
|
||||
d = entry.to_dict()
|
||||
restored = AuditEntry.from_dict(d)
|
||||
assert restored.user_id == entry.user_id
|
||||
assert restored.action_detail == entry.action_detail
|
||||
assert restored.duration_ms == entry.duration_ms
|
||||
|
||||
def test_from_dict_ignore_unknown_keys(self):
|
||||
"""Les clés inconnues sont ignorées (compatibilité future)."""
|
||||
d = {"user_id": "test", "unknown_field": "valeur", "future_key": 42}
|
||||
entry = AuditEntry.from_dict(d)
|
||||
assert entry.user_id == "test"
|
||||
# Les champs inconnus ne lèvent pas d'erreur
|
||||
|
||||
def test_to_dict_json_serializable(self):
|
||||
"""Le dictionnaire est sérialisable en JSON."""
|
||||
entry = _make_entry(action_detail="Clic sur 'Validé' — accent français")
|
||||
d = entry.to_dict()
|
||||
json_str = json.dumps(d, ensure_ascii=False)
|
||||
assert "accent français" in json_str
|
||||
|
||||
def test_default_values(self):
|
||||
"""Une entrée vide a des valeurs par défaut cohérentes."""
|
||||
entry = AuditEntry()
|
||||
assert entry.timestamp == ""
|
||||
assert entry.user_id == ""
|
||||
assert entry.duration_ms == 0.0
|
||||
assert entry.result == ""
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — enregistrement et lecture
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailRecord:
|
||||
"""Tests d'enregistrement des entrées."""
|
||||
|
||||
def test_record_and_reload(self, audit, audit_dir):
|
||||
"""Enregistrer une entrée puis la relire depuis le fichier."""
|
||||
entry = _make_entry()
|
||||
audit.record(entry)
|
||||
|
||||
# Vérifier que le fichier existe
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
assert filepath.exists()
|
||||
|
||||
# Lire le fichier directement
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) == 1
|
||||
|
||||
data = json.loads(lines[0])
|
||||
assert data["user_id"] == "tim_dupont"
|
||||
assert data["action_detail"] == "Clic sur 'Enregistrer' dans DxCare"
|
||||
|
||||
def test_record_multiple_entries(self, audit, audit_dir):
|
||||
"""Enregistrer plusieurs entrées dans le même fichier."""
|
||||
for i in range(5):
|
||||
entry = _make_entry(action_id=f"act_{i:03d}")
|
||||
audit.record(entry)
|
||||
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) == 5
|
||||
|
||||
def test_record_auto_timestamp(self, audit):
|
||||
"""Le timestamp est généré automatiquement si absent."""
|
||||
entry = _make_entry(timestamp="")
|
||||
audit.record(entry)
|
||||
|
||||
# Le timestamp doit avoir été rempli
|
||||
entries = audit.query()
|
||||
assert len(entries) == 1
|
||||
assert entries[0]["timestamp"] != ""
|
||||
# Vérifier le format ISO 8601
|
||||
datetime.fromisoformat(entries[0]["timestamp"])
|
||||
|
||||
def test_record_utf8_french(self, audit):
|
||||
"""Les caractères français sont correctement enregistrés."""
|
||||
entry = _make_entry(
|
||||
action_detail="Saisie du diagnostic 'Hépatite à cytomégalovirus' — CIM-10: B25.1",
|
||||
user_name="François Müller",
|
||||
workflow_name="Codage séjour réanimation néonatale",
|
||||
)
|
||||
audit.record(entry)
|
||||
|
||||
entries = audit.query()
|
||||
assert len(entries) == 1
|
||||
assert "Hépatite" in entries[0]["action_detail"]
|
||||
assert "François Müller" in entries[0]["user_name"]
|
||||
assert "néonatale" in entries[0]["workflow_name"]
|
||||
|
||||
def test_record_creates_directory(self, tmp_path):
|
||||
"""Le répertoire est créé automatiquement s'il n'existe pas."""
|
||||
new_dir = str(tmp_path / "sub" / "deep" / "audit")
|
||||
audit = AuditTrail(audit_dir=new_dir)
|
||||
entry = _make_entry()
|
||||
audit.record(entry)
|
||||
|
||||
assert Path(new_dir).exists()
|
||||
entries = audit.query()
|
||||
assert len(entries) == 1
|
||||
|
||||
def test_record_different_dates(self, audit, audit_dir):
|
||||
"""Les entrées de dates différentes vont dans des fichiers différents."""
|
||||
today = date.today()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
entry_today = _make_entry(timestamp=datetime.now().isoformat())
|
||||
entry_yesterday = _make_entry(
|
||||
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||
action_id="act_yesterday",
|
||||
)
|
||||
|
||||
audit.record(entry_today)
|
||||
audit.record(entry_yesterday)
|
||||
|
||||
# Vérifier les fichiers
|
||||
file_today = Path(audit_dir) / f"audit_{today.isoformat()}.jsonl"
|
||||
file_yesterday = Path(audit_dir) / f"audit_{yesterday.isoformat()}.jsonl"
|
||||
assert file_today.exists()
|
||||
assert file_yesterday.exists()
|
||||
|
||||
def test_jsonl_format(self, audit, audit_dir):
|
||||
"""Chaque ligne du fichier est un JSON valide (format JSONL)."""
|
||||
for i in range(3):
|
||||
audit.record(_make_entry(action_id=f"act_{i}"))
|
||||
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
for line_num, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
assert line, f"Ligne {line_num} vide"
|
||||
data = json.loads(line) # Ne doit pas lever d'exception
|
||||
assert "action_id" in data
|
||||
assert "timestamp" in data
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — requêtes avec filtres
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailQuery:
|
||||
"""Tests de recherche et filtrage."""
|
||||
|
||||
def _seed_entries(self, audit):
|
||||
"""Insérer des entrées de test variées."""
|
||||
entries = [
|
||||
_make_entry(
|
||||
action_id="act_001",
|
||||
user_id="tim_dupont",
|
||||
result="success",
|
||||
action_type="click",
|
||||
workflow_id="wf_01",
|
||||
domain="tim_codage",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_002",
|
||||
user_id="tim_dupont",
|
||||
result="failed",
|
||||
action_type="type",
|
||||
workflow_id="wf_01",
|
||||
domain="generic",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_003",
|
||||
user_id="tim_martin",
|
||||
user_name="Jean Martin",
|
||||
result="success",
|
||||
action_type="click",
|
||||
workflow_id="wf_02",
|
||||
domain="generic",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_004",
|
||||
user_id="tim_martin",
|
||||
user_name="Jean Martin",
|
||||
result="recovered",
|
||||
action_type="key_combo",
|
||||
workflow_id="wf_02",
|
||||
domain="generic",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_005",
|
||||
user_id="tim_dupont",
|
||||
result="success",
|
||||
action_type="click",
|
||||
workflow_id="wf_01",
|
||||
domain="generic",
|
||||
),
|
||||
]
|
||||
for e in entries:
|
||||
audit.record(e)
|
||||
|
||||
def test_query_all(self, audit):
|
||||
"""Requête sans filtre retourne tout."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query()
|
||||
assert len(results) == 5
|
||||
|
||||
def test_query_by_user(self, audit):
|
||||
"""Filtrer par identifiant utilisateur."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(user_id="tim_dupont")
|
||||
assert len(results) == 3
|
||||
assert all(r["user_id"] == "tim_dupont" for r in results)
|
||||
|
||||
def test_query_by_result(self, audit):
|
||||
"""Filtrer par résultat."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(result="success")
|
||||
assert len(results) == 3
|
||||
assert all(r["result"] == "success" for r in results)
|
||||
|
||||
def test_query_by_action_type(self, audit):
|
||||
"""Filtrer par type d'action."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(action_type="click")
|
||||
assert len(results) == 3
|
||||
|
||||
def test_query_by_workflow(self, audit):
|
||||
"""Filtrer par workflow."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(workflow_id="wf_02")
|
||||
assert len(results) == 2
|
||||
|
||||
def test_query_by_domain(self, audit):
|
||||
"""Filtrer par domaine métier."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(domain="tim_codage")
|
||||
assert len(results) == 1
|
||||
assert results[0]["action_id"] == "act_001"
|
||||
|
||||
def test_query_by_session(self, audit):
|
||||
"""Filtrer par session."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(session_id="sess_test_001")
|
||||
assert len(results) == 5 # Toutes les entrées ont la même session
|
||||
|
||||
def test_query_combined_filters(self, audit):
|
||||
"""Combinaison de plusieurs filtres (AND)."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(user_id="tim_dupont", result="success")
|
||||
assert len(results) == 2
|
||||
|
||||
def test_query_no_match(self, audit):
|
||||
"""Filtre sans correspondance retourne une liste vide."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(user_id="tim_inexistant")
|
||||
assert len(results) == 0
|
||||
|
||||
def test_query_pagination_limit(self, audit):
|
||||
"""Limiter le nombre de résultats."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(limit=2)
|
||||
assert len(results) == 2
|
||||
|
||||
def test_query_pagination_offset(self, audit):
|
||||
"""Décalage dans les résultats."""
|
||||
self._seed_entries(audit)
|
||||
all_results = audit.query()
|
||||
offset_results = audit.query(offset=3)
|
||||
assert len(offset_results) == 2
|
||||
assert offset_results[0] == all_results[3]
|
||||
|
||||
def test_query_sorted_by_timestamp_desc(self, audit):
|
||||
"""Les résultats sont triés par timestamp décroissant."""
|
||||
now = datetime.now()
|
||||
for i in range(5):
|
||||
ts = (now - timedelta(minutes=i)).isoformat()
|
||||
audit.record(_make_entry(
|
||||
timestamp=ts,
|
||||
action_id=f"act_{i}",
|
||||
))
|
||||
|
||||
results = audit.query()
|
||||
timestamps = [r["timestamp"] for r in results]
|
||||
assert timestamps == sorted(timestamps, reverse=True)
|
||||
|
||||
def test_query_date_range(self, audit):
|
||||
"""Filtrer par plage de dates."""
|
||||
today = date.today()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
# Entrée d'hier
|
||||
audit.record(_make_entry(
|
||||
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||
action_id="act_yesterday",
|
||||
))
|
||||
# Entrée d'aujourd'hui
|
||||
audit.record(_make_entry(
|
||||
timestamp=datetime.now().isoformat(),
|
||||
action_id="act_today",
|
||||
))
|
||||
|
||||
# Filtrer uniquement hier
|
||||
results = audit.query(
|
||||
date_from=yesterday.isoformat(),
|
||||
date_to=yesterday.isoformat(),
|
||||
)
|
||||
assert len(results) == 1
|
||||
assert results[0]["action_id"] == "act_yesterday"
|
||||
|
||||
# Filtrer les deux jours
|
||||
results = audit.query(
|
||||
date_from=yesterday.isoformat(),
|
||||
date_to=today.isoformat(),
|
||||
)
|
||||
assert len(results) == 2
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — résumé journalier
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailSummary:
|
||||
"""Tests du résumé journalier."""
|
||||
|
||||
def test_summary_empty(self, audit):
|
||||
"""Résumé d'un jour sans données."""
|
||||
summary = audit.get_summary("2025-01-01")
|
||||
assert summary["total_actions"] == 0
|
||||
assert summary["success_rate"] == 0.0
|
||||
assert summary["by_user"] == {}
|
||||
|
||||
def test_summary_basic(self, audit):
|
||||
"""Résumé avec quelques entrées."""
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="failed"))
|
||||
audit.record(_make_entry(user_id="tim_martin", user_name="Jean Martin", result="success"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["total_actions"] == 3
|
||||
assert summary["success_rate"] == round(2 / 3, 3)
|
||||
|
||||
def test_summary_by_user(self, audit):
|
||||
"""Répartition par utilisateur."""
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="failed"))
|
||||
audit.record(_make_entry(user_id="tim_martin", user_name="Jean Martin", result="success"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert "tim_dupont" in summary["by_user"]
|
||||
assert summary["by_user"]["tim_dupont"]["total"] == 3
|
||||
assert summary["by_user"]["tim_dupont"]["success"] == 2
|
||||
assert summary["by_user"]["tim_dupont"]["success_rate"] == round(2 / 3, 3)
|
||||
assert summary["by_user"]["tim_martin"]["total"] == 1
|
||||
assert summary["by_user"]["tim_martin"]["success_rate"] == 1.0
|
||||
|
||||
def test_summary_by_result(self, audit):
|
||||
"""Répartition par résultat."""
|
||||
audit.record(_make_entry(result="success"))
|
||||
audit.record(_make_entry(result="success"))
|
||||
audit.record(_make_entry(result="failed"))
|
||||
audit.record(_make_entry(result="recovered"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_result"]["success"] == 2
|
||||
assert summary["by_result"]["failed"] == 1
|
||||
assert summary["by_result"]["recovered"] == 1
|
||||
|
||||
def test_summary_by_action_type(self, audit):
|
||||
"""Répartition par type d'action."""
|
||||
audit.record(_make_entry(action_type="click"))
|
||||
audit.record(_make_entry(action_type="click"))
|
||||
audit.record(_make_entry(action_type="type"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_action_type"]["click"] == 2
|
||||
assert summary["by_action_type"]["type"] == 1
|
||||
|
||||
def test_summary_by_workflow(self, audit):
|
||||
"""Répartition par workflow."""
|
||||
audit.record(_make_entry(workflow_id="wf_01"))
|
||||
audit.record(_make_entry(workflow_id="wf_01"))
|
||||
audit.record(_make_entry(workflow_id="wf_02"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_workflow"]["wf_01"] == 2
|
||||
assert summary["by_workflow"]["wf_02"] == 1
|
||||
|
||||
def test_summary_by_execution_mode(self, audit):
|
||||
"""Répartition par mode d'exécution."""
|
||||
audit.record(_make_entry(execution_mode="autonomous"))
|
||||
audit.record(_make_entry(execution_mode="assisted"))
|
||||
audit.record(_make_entry(execution_mode="assisted"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_execution_mode"]["autonomous"] == 1
|
||||
assert summary["by_execution_mode"]["assisted"] == 2
|
||||
|
||||
def test_summary_date_field(self, audit):
|
||||
"""Le résumé contient la date demandée."""
|
||||
today = date.today().isoformat()
|
||||
summary = audit.get_summary(today)
|
||||
assert summary["date"] == today
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — export CSV
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailExportCSV:
|
||||
"""Tests de l'export CSV."""
|
||||
|
||||
def test_export_csv_empty(self, audit):
|
||||
"""Export sans données retourne une chaîne vide."""
|
||||
csv_data = audit.export_csv(date_from="2025-01-01")
|
||||
assert csv_data == ""
|
||||
|
||||
def test_export_csv_basic(self, audit):
|
||||
"""Export CSV avec quelques entrées."""
|
||||
audit.record(_make_entry(action_id="act_001"))
|
||||
audit.record(_make_entry(action_id="act_002"))
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
assert csv_data
|
||||
assert "act_001" in csv_data
|
||||
assert "act_002" in csv_data
|
||||
|
||||
def test_export_csv_header(self, audit):
|
||||
"""L'en-tête CSV contient tous les champs du dataclass."""
|
||||
audit.record(_make_entry())
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
fieldnames = reader.fieldnames
|
||||
assert "timestamp" in fieldnames
|
||||
assert "user_id" in fieldnames
|
||||
assert "action_detail" in fieldnames
|
||||
assert "domain" in fieldnames
|
||||
assert "duration_ms" in fieldnames
|
||||
|
||||
def test_export_csv_parseable(self, audit):
|
||||
"""Le CSV produit est parseable par le module csv."""
|
||||
for i in range(5):
|
||||
audit.record(_make_entry(
|
||||
action_id=f"act_{i}",
|
||||
action_detail=f"Action {i} — avec des 'guillemets' et des, virgules",
|
||||
))
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
rows = list(reader)
|
||||
assert len(rows) == 5
|
||||
|
||||
# Vérifier que les valeurs sont correctes malgré les caractères spéciaux
|
||||
for row in rows:
|
||||
assert "virgules" in row["action_detail"]
|
||||
|
||||
def test_export_csv_filter_by_user(self, audit):
|
||||
"""Export filtré par utilisateur."""
|
||||
audit.record(_make_entry(user_id="tim_dupont", action_id="act_001"))
|
||||
audit.record(_make_entry(user_id="tim_martin", action_id="act_002"))
|
||||
|
||||
csv_data = audit.export_csv(user_id="tim_dupont")
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
rows = list(reader)
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["user_id"] == "tim_dupont"
|
||||
|
||||
def test_export_csv_utf8(self, audit):
|
||||
"""L'export CSV gère correctement l'UTF-8 français."""
|
||||
audit.record(_make_entry(
|
||||
action_detail="Saisie 'Hépatite à cytomégalovirus' — réanimation néonatale",
|
||||
user_name="François Müller",
|
||||
))
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
assert "Hépatite" in csv_data
|
||||
assert "François Müller" in csv_data
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests de robustesse
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailRobustness:
|
||||
"""Tests de robustesse et cas limites."""
|
||||
|
||||
def test_directory_auto_creation(self, tmp_path):
|
||||
"""Le répertoire est créé automatiquement s'il n'existe pas."""
|
||||
audit_dir = str(tmp_path / "nonexistent" / "deep" / "audit")
|
||||
assert not Path(audit_dir).exists()
|
||||
|
||||
audit = AuditTrail(audit_dir=audit_dir)
|
||||
assert Path(audit_dir).exists()
|
||||
|
||||
def test_corrupted_jsonl_line(self, audit, audit_dir):
|
||||
"""Une ligne corrompue dans le fichier JSONL ne fait pas crasher la lecture."""
|
||||
# Écrire des entrées normales
|
||||
audit.record(_make_entry(action_id="act_001"))
|
||||
audit.record(_make_entry(action_id="act_002"))
|
||||
|
||||
# Injecter une ligne corrompue
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
with open(filepath, "a", encoding="utf-8") as f:
|
||||
f.write("{invalid json line\n")
|
||||
|
||||
# Ajouter encore une entrée valide
|
||||
audit.record(_make_entry(action_id="act_003"))
|
||||
|
||||
# La lecture doit fonctionner et ignorer la ligne corrompue
|
||||
entries = audit.query()
|
||||
assert len(entries) == 3 # 2 valides avant + 1 valide après
|
||||
|
||||
def test_empty_file(self, audit, audit_dir):
|
||||
"""Un fichier vide ne fait pas crasher."""
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
filepath.touch() # Fichier vide
|
||||
|
||||
entries = audit.query()
|
||||
assert len(entries) == 0
|
||||
|
||||
def test_concurrent_writes(self, audit):
|
||||
"""Écritures concurrentes grâce au verrou threading."""
|
||||
import threading
|
||||
|
||||
errors = []
|
||||
|
||||
def write_entries(start):
|
||||
try:
|
||||
for i in range(20):
|
||||
audit.record(_make_entry(action_id=f"act_{start}_{i}"))
|
||||
except Exception as e:
|
||||
errors.append(str(e))
|
||||
|
||||
threads = [
|
||||
threading.Thread(target=write_entries, args=(t,))
|
||||
for t in range(5)
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
assert not errors, f"Erreurs concurrentes: {errors}"
|
||||
entries = audit.query(limit=200)
|
||||
assert len(entries) == 100 # 5 threads x 20 entrées
|
||||
|
||||
def test_query_invalid_date(self, audit):
|
||||
"""Dates invalides ne font pas crasher."""
|
||||
# Ne doit pas lever d'exception
|
||||
results = audit.query(date_from="not-a-date")
|
||||
assert isinstance(results, list)
|
||||
|
||||
def test_summary_invalid_date(self, audit):
|
||||
"""Date invalide dans get_summary ne fait pas crasher."""
|
||||
summary = audit.get_summary("not-a-date")
|
||||
assert summary["total_actions"] == 0
|
||||
|
||||
def test_entry_all_fields_present_in_export(self, audit):
|
||||
"""Tous les champs du dataclass sont présents dans l'export CSV."""
|
||||
from dataclasses import fields as dc_fields
|
||||
entry = _make_entry()
|
||||
audit.record(entry)
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
row = next(reader)
|
||||
|
||||
expected_fields = {f.name for f in dc_fields(AuditEntry)}
|
||||
actual_fields = set(row.keys())
|
||||
assert expected_fields == actual_fields
|
||||
|
||||
def test_date_range_reversed(self, audit):
|
||||
"""Plage de dates inversée (date_to < date_from) fonctionne quand même."""
|
||||
today = date.today()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
audit.record(_make_entry(
|
||||
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||
))
|
||||
|
||||
# date_from > date_to → doit quand même fonctionner
|
||||
results = audit.query(
|
||||
date_from=today.isoformat(),
|
||||
date_to=yesterday.isoformat(),
|
||||
)
|
||||
# L'implémentation inverse automatiquement les dates
|
||||
assert isinstance(results, list)
|
||||
530
tests/unit/test_policy_grounding_recovery_learning.py
Normal file
530
tests/unit/test_policy_grounding_recovery_learning.py
Normal file
@@ -0,0 +1,530 @@
|
||||
"""
|
||||
Tests fonctionnels pour P2 (Policy/Grounding), P3 (Recovery), P4 (Learning).
|
||||
|
||||
Vérifie que chaque module fait bien son travail :
|
||||
- Grounding : localise ou retourne NOT_FOUND (pas de décision)
|
||||
- Policy : décide RETRY/SKIP/ABORT/SUPERVISE (pas de localisation)
|
||||
- Recovery : exécute Ctrl+Z / Escape / Alt+F4 selon le contexte
|
||||
- Learning : enregistre et requête les résultats structurés
|
||||
"""
|
||||
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch, PropertyMock
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P2 : Grounding — localisation pure
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestGroundingEngine:
|
||||
|
||||
def _make_engine(self):
|
||||
from agent_v0.agent_v1.core.grounding import GroundingEngine
|
||||
executor = MagicMock()
|
||||
executor._capture_screenshot_b64.return_value = "fake_b64_data"
|
||||
return GroundingEngine(executor), executor
|
||||
|
||||
def test_server_found_retourne_coordonnees(self):
|
||||
"""Si le serveur trouve l'élément, retourne ses coordonnées."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._server_resolve_target.return_value = {
|
||||
"resolved": True, "x_pct": 0.5, "y_pct": 0.3,
|
||||
"method": "som_text", "score": 0.95,
|
||||
"matched_element": {"label": "Enregistrer"},
|
||||
}
|
||||
result = engine.locate("http://server", {"by_text": "Enregistrer"}, 0.5, 0.3, 1920, 1080)
|
||||
assert result.found is True
|
||||
assert result.x_pct == 0.5
|
||||
assert result.y_pct == 0.3
|
||||
assert result.method == "som_text"
|
||||
|
||||
def test_server_not_found_cascade_template(self):
|
||||
"""Si serveur échoue, cascade vers template matching."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._server_resolve_target.return_value = None
|
||||
executor._template_match_anchor.return_value = {
|
||||
"resolved": True, "x_pct": 0.4, "y_pct": 0.6,
|
||||
"score": 0.85,
|
||||
}
|
||||
result = engine.locate(
|
||||
"http://server",
|
||||
{"by_text": "OK", "anchor_image_base64": "abc123"},
|
||||
0.5, 0.3, 1920, 1080,
|
||||
)
|
||||
assert result.found is True
|
||||
assert result.method == "anchor_template"
|
||||
|
||||
def test_toutes_strategies_echouent_retourne_not_found(self):
|
||||
"""Si toutes les stratégies échouent, retourne NOT_FOUND."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._server_resolve_target.return_value = None
|
||||
executor._template_match_anchor.return_value = None
|
||||
executor._hybrid_vlm_resolve.return_value = None
|
||||
result = engine.locate(
|
||||
"http://server",
|
||||
{"by_text": "Inexistant", "anchor_image_base64": "abc", "vlm_description": "bouton"},
|
||||
0.5, 0.3, 1920, 1080,
|
||||
)
|
||||
assert result.found is False
|
||||
assert "échoué" in result.detail
|
||||
|
||||
def test_screenshot_echoue_retourne_not_found(self):
|
||||
"""Si la capture screenshot échoue, NOT_FOUND immédiat."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._capture_screenshot_b64.return_value = None
|
||||
result = engine.locate("http://server", {"by_text": "OK"}, 0.5, 0.3, 1920, 1080)
|
||||
assert result.found is False
|
||||
assert "screenshot" in result.detail.lower()
|
||||
|
||||
def test_strategies_custom(self):
|
||||
"""On peut spécifier les stratégies à utiliser."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._template_match_anchor.return_value = {
|
||||
"resolved": True, "x_pct": 0.2, "y_pct": 0.8, "score": 0.9,
|
||||
}
|
||||
# Seulement template, pas de serveur
|
||||
result = engine.locate(
|
||||
"", {"anchor_image_base64": "abc"}, 0.5, 0.3, 1920, 1080,
|
||||
strategies=["template"],
|
||||
)
|
||||
assert result.found is True
|
||||
# Le serveur n'a PAS été appelé
|
||||
executor._server_resolve_target.assert_not_called()
|
||||
|
||||
def test_grounding_result_to_dict(self):
|
||||
"""Le GroundingResult se sérialise correctement."""
|
||||
from agent_v0.agent_v1.core.grounding import GroundingResult
|
||||
r = GroundingResult(found=True, x_pct=0.5, y_pct=0.3, method="som", score=0.9)
|
||||
d = r.to_dict()
|
||||
assert d["found"] is True
|
||||
assert d["x_pct"] == 0.5
|
||||
assert d["method"] == "som"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P2 : Policy — décisions quand grounding échoue
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestPolicyEngine:
|
||||
|
||||
def _make_engine(self):
|
||||
from agent_v0.agent_v1.core.policy import PolicyEngine
|
||||
executor = MagicMock()
|
||||
return PolicyEngine(executor), executor
|
||||
|
||||
def test_premier_essai_popup_fermee_retry(self):
|
||||
"""Premier échec + popup fermée → RETRY."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._handle_popup_vlm.return_value = True # Popup fermée
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "OK"},
|
||||
retry_count=0,
|
||||
)
|
||||
assert decision.decision == Decision.RETRY
|
||||
assert "popup" in decision.reason.lower()
|
||||
|
||||
def test_premier_essai_pas_de_popup_retry(self):
|
||||
"""Premier échec + pas de popup → RETRY quand même (max_retries > 0)."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._handle_popup_vlm.return_value = False
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "OK"},
|
||||
retry_count=0,
|
||||
max_retries=2,
|
||||
)
|
||||
assert decision.decision == Decision.RETRY
|
||||
|
||||
def test_max_retries_acteur_passer_skip(self):
|
||||
"""Max retries atteint + acteur dit PASSER → SKIP."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._actor_decide.return_value = "PASSER"
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "Onglet"},
|
||||
retry_count=1,
|
||||
max_retries=1,
|
||||
)
|
||||
assert decision.decision == Decision.SKIP
|
||||
|
||||
def test_max_retries_acteur_stopper_abort(self):
|
||||
"""Max retries atteint + acteur dit STOPPER → ABORT."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._actor_decide.return_value = "STOPPER"
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "X"},
|
||||
retry_count=1,
|
||||
max_retries=1,
|
||||
)
|
||||
assert decision.decision == Decision.ABORT
|
||||
|
||||
def test_max_retries_acteur_executer_supervise(self):
|
||||
"""Max retries + acteur dit EXECUTER → SUPERVISE (rendre la main)."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._actor_decide.return_value = "EXECUTER"
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "X"},
|
||||
retry_count=1,
|
||||
max_retries=1,
|
||||
)
|
||||
assert decision.decision == Decision.SUPERVISE
|
||||
|
||||
def test_policy_decision_to_dict(self):
|
||||
"""PolicyDecision se sérialise correctement."""
|
||||
from agent_v0.agent_v1.core.policy import PolicyDecision, Decision
|
||||
d = PolicyDecision(decision=Decision.SKIP, reason="État atteint").to_dict()
|
||||
assert d["decision"] == "skip"
|
||||
assert d["reason"] == "État atteint"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P3 : Recovery — rollback après échec
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestRecoveryEngine:
|
||||
|
||||
def _make_engine(self):
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryEngine
|
||||
executor = MagicMock()
|
||||
executor.keyboard = MagicMock()
|
||||
executor.sct = MagicMock()
|
||||
executor.sct.monitors = [{}, {"width": 1920, "height": 1080}]
|
||||
executor._click = MagicMock()
|
||||
return RecoveryEngine(executor), executor
|
||||
|
||||
def test_popup_detectee_escape(self):
|
||||
"""Critic dit "popup" → Recovery fait Escape."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "click"},
|
||||
critic_detail="Une popup d'erreur est apparue",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.ESCAPE
|
||||
assert result.success is True
|
||||
# Vérifie que Escape a été pressé
|
||||
executor.keyboard.press.assert_called()
|
||||
|
||||
def test_frappe_incorrecte_undo(self):
|
||||
"""Frappe incorrecte → Recovery fait Ctrl+Z."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "type"},
|
||||
critic_detail="Le texte a été tapé au mauvais endroit",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.UNDO
|
||||
assert result.success is True
|
||||
|
||||
def test_mauvaise_fenetre_close(self):
|
||||
"""Mauvaise fenêtre → Recovery fait Alt+F4."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "click"},
|
||||
critic_detail="Mauvaise fenêtre ouverte au lieu du bloc-notes",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.CLOSE_WINDOW
|
||||
assert result.success is True
|
||||
|
||||
def test_menu_ouvert_escape(self):
|
||||
"""Menu déroulant ouvert → Recovery fait Escape."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "click"},
|
||||
critic_detail="Un menu déroulant s'est ouvert",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.ESCAPE
|
||||
assert result.success is True
|
||||
|
||||
def test_aucune_strategie_applicable(self):
|
||||
"""Pas de pattern reconnu → NONE."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "wait"},
|
||||
critic_detail="Quelque chose d'inattendu",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.NONE
|
||||
assert result.success is False
|
||||
|
||||
def test_recovery_result_to_dict(self):
|
||||
"""RecoveryResult se sérialise correctement."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryResult, RecoveryAction
|
||||
d = RecoveryResult(
|
||||
action_taken=RecoveryAction.UNDO, success=True, detail="Ctrl+Z"
|
||||
).to_dict()
|
||||
assert d["action_taken"] == "undo"
|
||||
assert d["success"] is True
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P4 : Learning — apprentissage runtime
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestReplayLearner:
|
||||
|
||||
@pytest.fixture
|
||||
def learner(self):
|
||||
tmpdir = tempfile.mkdtemp(prefix="test_learning_")
|
||||
from agent_v0.server_v1.replay_learner import ReplayLearner
|
||||
l = ReplayLearner(learning_dir=tmpdir)
|
||||
yield l
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
def test_record_et_load_session(self, learner):
|
||||
"""Enregistrer un résultat et le relire depuis le fichier."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
outcome = ActionOutcome(
|
||||
session_id="test_session",
|
||||
action_id="act_001",
|
||||
action_type="click",
|
||||
target_description="Bouton Enregistrer",
|
||||
resolution_method="som_text",
|
||||
resolution_score=0.95,
|
||||
success=True,
|
||||
)
|
||||
learner.record(outcome)
|
||||
|
||||
# Relire
|
||||
loaded = learner.load_session("test_session")
|
||||
assert len(loaded) == 1
|
||||
assert loaded[0].action_id == "act_001"
|
||||
assert loaded[0].success is True
|
||||
assert loaded[0].resolution_method == "som_text"
|
||||
|
||||
def test_record_from_replay_result(self, learner):
|
||||
"""Convertir le format replay en ActionOutcome."""
|
||||
learner.record_from_replay_result(
|
||||
session_id="s1",
|
||||
action={"action_id": "a1", "type": "click", "target_spec": {"by_text": "OK", "window_title": "App"}},
|
||||
result={"success": True, "resolution_method": "template", "resolution_score": 0.9},
|
||||
verification={"verified": True, "semantic_verified": True, "semantic_detail": "OK"},
|
||||
)
|
||||
loaded = learner.load_session("s1")
|
||||
assert len(loaded) == 1
|
||||
assert loaded[0].target_description == "OK"
|
||||
assert loaded[0].semantic_verified is True
|
||||
|
||||
def test_query_similar(self, learner):
|
||||
"""Requêter des résultats similaires par description."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Enregistrer plusieurs résultats
|
||||
for i, (desc, method, success) in enumerate([
|
||||
("Bouton Enregistrer", "som_text", True),
|
||||
("Bouton Annuler", "template", True),
|
||||
("Bouton Enregistrer", "vlm_direct", False),
|
||||
("Menu Fichier", "som_text", True),
|
||||
]):
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id=f"a{i}",
|
||||
action_type="click", target_description=desc,
|
||||
resolution_method=method, success=success,
|
||||
))
|
||||
|
||||
# Chercher "Enregistrer"
|
||||
results = learner.query_similar(target_description="Enregistrer")
|
||||
assert len(results) == 2
|
||||
# Les deux résultats concernent "Enregistrer"
|
||||
for r in results:
|
||||
assert "enregistrer" in r["outcome"]["target_description"].lower()
|
||||
|
||||
def test_get_stats(self, learner):
|
||||
"""Les statistiques globales sont correctes."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
for success, method in [(True, "som"), (True, "som"), (False, "template"), (True, "vlm")]:
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a",
|
||||
action_type="click", success=success,
|
||||
resolution_method=method,
|
||||
))
|
||||
|
||||
stats = learner.get_stats()
|
||||
assert stats["total"] == 4
|
||||
assert stats["success_rate"] == 0.75
|
||||
assert stats["methods"]["som"]["success_rate"] == 1.0
|
||||
assert stats["methods"]["template"]["success_rate"] == 0.0
|
||||
|
||||
def test_gemma4_indisponible_pas_de_crash(self, learner):
|
||||
"""Le learning fonctionne même sans VLM."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Pas de crash, juste un record simple
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a1", action_type="click",
|
||||
success=False, error="target_not_found",
|
||||
))
|
||||
stats = learner.get_stats()
|
||||
assert stats["total"] == 1
|
||||
assert stats["success_rate"] == 0.0
|
||||
|
||||
def test_fichier_jsonl_format(self, learner):
|
||||
"""Le fichier JSONL contient du JSON valide ligne par ligne."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a1", action_type="click", success=True,
|
||||
))
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a2", action_type="type", success=False,
|
||||
))
|
||||
|
||||
jsonl_file = learner.learning_dir / "s1.jsonl"
|
||||
assert jsonl_file.is_file()
|
||||
|
||||
with open(jsonl_file) as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) == 2
|
||||
for line in lines:
|
||||
data = json.loads(line) # Doit être du JSON valide
|
||||
assert "action_id" in data
|
||||
assert "success" in data
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Boucle d'apprentissage : consolidation cross-workflow
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestLearningLoop:
|
||||
"""Tests de la boucle d'apprentissage : les replays passés améliorent les suivants."""
|
||||
|
||||
@pytest.fixture
|
||||
def learner(self):
|
||||
tmpdir = tempfile.mkdtemp(prefix="test_learning_loop_")
|
||||
from agent_v0.server_v1.replay_learner import ReplayLearner
|
||||
l = ReplayLearner(learning_dir=tmpdir)
|
||||
yield l
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
def test_best_strategy_apprend_du_succes(self, learner):
|
||||
"""La meilleure stratégie est celle qui a le plus de succès."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# template échoue 3 fois sur "Enregistrer"
|
||||
for i in range(3):
|
||||
learner.record(ActionOutcome(
|
||||
session_id=f"s{i}", action_id=f"a{i}", action_type="click",
|
||||
target_description="Enregistrer", resolution_method="anchor_template",
|
||||
success=False,
|
||||
))
|
||||
# som_text réussit 2 fois sur "Enregistrer"
|
||||
for i in range(2):
|
||||
learner.record(ActionOutcome(
|
||||
session_id=f"s{10+i}", action_id=f"a{10+i}", action_type="click",
|
||||
target_description="Enregistrer", resolution_method="som_text_match",
|
||||
success=True,
|
||||
))
|
||||
|
||||
best = learner.best_strategy_for("Enregistrer")
|
||||
assert best == "som_text_match"
|
||||
|
||||
def test_best_strategy_minimum_2_essais(self, learner):
|
||||
"""Il faut au moins 2 essais pour qu'une stratégie soit recommandée."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Un seul succès → pas assez pour recommander
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a1", action_type="click",
|
||||
target_description="OK", resolution_method="vlm_direct",
|
||||
success=True,
|
||||
))
|
||||
best = learner.best_strategy_for("OK")
|
||||
assert best is None
|
||||
|
||||
def test_best_strategy_rien_si_historique_vide(self, learner):
|
||||
"""Pas d'historique → pas de recommandation."""
|
||||
best = learner.best_strategy_for("Inexistant")
|
||||
assert best is None
|
||||
|
||||
def test_consolidate_workflow_enrichit_les_actions(self, learner):
|
||||
"""La consolidation injecte _learned_strategy dans les target_spec."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Historique : som_text_match marche pour "Fichier"
|
||||
for i in range(3):
|
||||
learner.record(ActionOutcome(
|
||||
session_id=f"s{i}", action_id=f"a{i}", action_type="click",
|
||||
target_description="Fichier", resolution_method="som_text_match",
|
||||
success=True,
|
||||
))
|
||||
|
||||
# Workflow avec une action "Fichier"
|
||||
actions = [
|
||||
{"type": "click", "target_spec": {"by_text": "Fichier", "window_title": "Bloc-notes"}},
|
||||
{"type": "type", "text": "bonjour"},
|
||||
{"type": "click", "target_spec": {"by_text": "Inconnu"}},
|
||||
]
|
||||
|
||||
enriched = learner.consolidate_workflow(actions)
|
||||
assert enriched == 1 # Seul "Fichier" a un historique
|
||||
assert actions[0]["target_spec"]["_learned_strategy"] == "som_text_match"
|
||||
assert "_learned_strategy" not in actions[2].get("target_spec", {})
|
||||
|
||||
def test_consolidation_cross_workflow(self, learner):
|
||||
"""Un succès dans le workflow A améliore le workflow B."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Workflow A : "Enregistrer" réussit avec grounding_vlm
|
||||
for i in range(3):
|
||||
learner.record(ActionOutcome(
|
||||
session_id="workflow_A", action_id=f"a{i}", action_type="click",
|
||||
target_description="Enregistrer",
|
||||
window_title="Bloc-notes",
|
||||
resolution_method="grounding_vlm", success=True,
|
||||
))
|
||||
|
||||
# Workflow B : contient aussi "Enregistrer"
|
||||
workflow_b = [
|
||||
{"type": "click", "target_spec": {"by_text": "Enregistrer", "window_title": "Bloc-notes"}},
|
||||
]
|
||||
enriched = learner.consolidate_workflow(workflow_b, "workflow_B")
|
||||
assert enriched == 1
|
||||
assert workflow_b[0]["target_spec"]["_learned_strategy"] == "grounding_vlm"
|
||||
|
||||
def test_grounding_reordonne_strategies(self):
|
||||
"""Le GroundingEngine réordonne ses stratégies selon _learned_strategy."""
|
||||
from agent_v0.agent_v1.core.grounding import GroundingEngine
|
||||
executor = MagicMock()
|
||||
executor._capture_screenshot_b64.return_value = "fake"
|
||||
# Simuler que template marche
|
||||
executor._server_resolve_target.return_value = None
|
||||
executor._template_match_anchor.return_value = {
|
||||
"resolved": True, "x_pct": 0.5, "y_pct": 0.5, "score": 0.9,
|
||||
}
|
||||
executor._hybrid_vlm_resolve.return_value = None
|
||||
|
||||
engine = GroundingEngine(executor)
|
||||
|
||||
# Avec _learned_strategy = anchor_template → template en premier
|
||||
result = engine.locate(
|
||||
"http://server",
|
||||
{"by_text": "OK", "anchor_image_base64": "abc", "_learned_strategy": "anchor_template"},
|
||||
0.5, 0.3, 1920, 1080,
|
||||
)
|
||||
assert result.found is True
|
||||
assert result.method == "anchor_template"
|
||||
# Le serveur n'a PAS été appelé (template était en premier)
|
||||
executor._server_resolve_target.assert_not_called()
|
||||
441
tests/unit/test_replay_critic.py
Normal file
441
tests/unit/test_replay_critic.py
Normal file
@@ -0,0 +1,441 @@
|
||||
"""
|
||||
Tests unitaires pour le Critic (ReplayVerifier.verify_with_critic)
|
||||
et l'enrichissement des actions avec intentions.
|
||||
|
||||
Vérifie les FONCTIONNALITÉS, pas juste la non-régression :
|
||||
1. Le Critic fusionne correctement pixel + sémantique
|
||||
2. La matrice de décision (4 cas) est correcte
|
||||
3. L'enrichissement intentions parse bien les réponses gemma4
|
||||
4. Les fallbacks fonctionnent quand le VLM est indisponible
|
||||
"""
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch, Mock
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
from agent_v0.server_v1.replay_verifier import ReplayVerifier, VerificationResult
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _make_screenshot_b64(width=100, height=100, color=(128, 128, 128)):
|
||||
"""Créer un screenshot base64 factice (JPEG)."""
|
||||
from PIL import Image
|
||||
img = Image.new("RGB", (width, height), color)
|
||||
buf = io.BytesIO()
|
||||
img.save(buf, format="JPEG", quality=50)
|
||||
return base64.b64encode(buf.getvalue()).decode()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def verifier():
|
||||
return ReplayVerifier()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def screenshot_gray():
|
||||
return _make_screenshot_b64(100, 100, (128, 128, 128))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def screenshot_white():
|
||||
return _make_screenshot_b64(100, 100, (255, 255, 255))
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests VerificationResult — nouveaux champs sémantiques
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestVerificationResult:
|
||||
|
||||
def test_to_dict_sans_semantique(self):
|
||||
"""Sans vérification sémantique, les champs semantic_ sont absents du dict."""
|
||||
r = VerificationResult(
|
||||
verified=True, confidence=0.8, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue", detail="test",
|
||||
)
|
||||
d = r.to_dict()
|
||||
assert "semantic_verified" not in d
|
||||
assert d["verified"] is True
|
||||
assert d["confidence"] == 0.8
|
||||
|
||||
def test_to_dict_avec_semantique(self):
|
||||
"""Avec vérification sémantique, les champs semantic_ sont présents."""
|
||||
r = VerificationResult(
|
||||
verified=True, confidence=0.9, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue", detail="test",
|
||||
semantic_verified=True, semantic_detail="Bouton visible",
|
||||
semantic_elapsed_ms=1500.0,
|
||||
)
|
||||
d = r.to_dict()
|
||||
assert d["semantic_verified"] is True
|
||||
assert d["semantic_detail"] == "Bouton visible"
|
||||
assert d["semantic_elapsed_ms"] == 1500.0
|
||||
|
||||
def test_to_dict_semantique_false(self):
|
||||
"""semantic_verified=False doit apparaître dans le dict."""
|
||||
r = VerificationResult(
|
||||
verified=False, confidence=0.7, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="retry",
|
||||
semantic_verified=False, semantic_detail="Mauvais écran",
|
||||
semantic_elapsed_ms=2000.0,
|
||||
)
|
||||
d = r.to_dict()
|
||||
assert d["semantic_verified"] is False
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests verify_with_critic — matrice de décision
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestVerifyWithCritic:
|
||||
|
||||
def test_sans_expected_result_retourne_pixel_seul(self, verifier, screenshot_gray):
|
||||
"""Sans expected_result, verify_with_critic = verify_action (pixel seul)."""
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_gray,
|
||||
expected_result="", # Pas d'attendu
|
||||
)
|
||||
# Pixel seul — pas de champ semantic
|
||||
assert result.semantic_verified is None
|
||||
|
||||
def test_sans_screenshots_pas_de_semantique(self, verifier):
|
||||
"""Sans screenshots, pas de vérification sémantique possible."""
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=None,
|
||||
screenshot_after=None,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
# Pas de screenshots → pixel seul (confidence basse)
|
||||
assert result.verified is True
|
||||
assert result.confidence < 0.5
|
||||
|
||||
def test_pixel_pas_change_et_expected_result_skip_vlm(
|
||||
self, verifier, screenshot_gray,
|
||||
):
|
||||
"""Si pixel identiques + expected_result → skip VLM (pas de changement = retry)."""
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test", "x_pct": 0.5, "y_pct": 0.5},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_gray, # Même image → aucun changement
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
# Pas de changement pixel → retry, VLM non appelé
|
||||
assert result.verified is False
|
||||
assert result.suggestion == "retry"
|
||||
assert result.semantic_verified is None # VLM non appelé
|
||||
|
||||
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||
def test_pixel_ok_semantic_ok(
|
||||
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||
):
|
||||
"""Pixel OK + Semantic OK → vérifié avec haute confiance."""
|
||||
mock_semantic.return_value = {
|
||||
"verified": True,
|
||||
"detail": "Le menu est bien ouvert",
|
||||
"elapsed_ms": 2000.0,
|
||||
}
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_white, # Différent → changement détecté
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is True
|
||||
assert result.confidence >= 0.7
|
||||
assert "Critic OK" in result.detail
|
||||
|
||||
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||
def test_pixel_ok_semantic_non(
|
||||
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||
):
|
||||
"""Pixel OK + Semantic NON → INATTENDU (changement mais pas le bon)."""
|
||||
mock_semantic.return_value = {
|
||||
"verified": False,
|
||||
"detail": "Une erreur est apparue au lieu du menu",
|
||||
"elapsed_ms": 2500.0,
|
||||
}
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
assert result.verified is False
|
||||
assert result.semantic_verified is False
|
||||
assert result.suggestion == "retry"
|
||||
assert "Critic NON" in result.detail
|
||||
|
||||
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||
def test_vlm_indisponible_fallback_pixel(
|
||||
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||
):
|
||||
"""VLM indisponible → fallback sur pixel seul."""
|
||||
mock_semantic.return_value = None # VLM down
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
# Fallback pixel seul — le changement est détecté
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is None # Pas de VLM
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests _verify_semantic — parsing de la réponse VLM
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestVerifySemantic:
|
||||
|
||||
@patch("requests.post")
|
||||
def test_parse_verdict_oui(self, mock_post, verifier, screenshot_white):
|
||||
"""Parse correctement VERDICT: OUI."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": "VERDICT: OUI\nRAISON: Le fichier est bien ouvert"}
|
||||
}
|
||||
mock_post.return_value = mock_resp
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=screenshot_white,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
assert result is not None
|
||||
assert result["verified"] is True
|
||||
assert "ouvert" in result["detail"]
|
||||
|
||||
@patch("requests.post")
|
||||
def test_parse_verdict_non(self, mock_post, verifier, screenshot_white):
|
||||
"""Parse correctement VERDICT: NON."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": "VERDICT: NON\nRAISON: L'écran n'a pas changé"}
|
||||
}
|
||||
mock_post.return_value = mock_resp
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=screenshot_white,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
assert result is not None
|
||||
assert result["verified"] is False
|
||||
|
||||
@patch("requests.post")
|
||||
def test_vlm_timeout_retourne_none(self, mock_post, verifier, screenshot_white):
|
||||
"""Timeout VLM → retourne None (fallback gracieux)."""
|
||||
import requests as _real_requests
|
||||
mock_post.side_effect = _real_requests.Timeout("timeout")
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=screenshot_white,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
assert result is None
|
||||
|
||||
def test_sans_screenshot_after_retourne_none(self, verifier):
|
||||
"""Sans screenshot_after, pas de vérification possible."""
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=None,
|
||||
screenshot_after=None,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
assert result is None
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests _merge_results — matrice pixel x sémantique
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestMergeResults:
|
||||
|
||||
def test_pixel_ok_sem_ok(self, verifier):
|
||||
pixel = VerificationResult(
|
||||
verified=True, confidence=0.7, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue",
|
||||
)
|
||||
semantic = {"verified": True, "detail": "OK", "elapsed_ms": 1000}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is True
|
||||
assert result.confidence >= 0.7
|
||||
|
||||
def test_pixel_ok_sem_non(self, verifier):
|
||||
"""Pixel OK + Sémantique NON = inattendu → retry."""
|
||||
pixel = VerificationResult(
|
||||
verified=True, confidence=0.7, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue",
|
||||
)
|
||||
semantic = {"verified": False, "detail": "Erreur popup", "elapsed_ms": 2000}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is False
|
||||
assert result.semantic_verified is False
|
||||
assert result.suggestion == "retry"
|
||||
|
||||
def test_pixel_non_sem_ok(self, verifier):
|
||||
"""Pixel inchangé + Sémantique OK = état subtil → continue."""
|
||||
pixel = VerificationResult(
|
||||
verified=False, confidence=0.5, changes_detected=False,
|
||||
change_area_pct=0.1, suggestion="retry",
|
||||
)
|
||||
semantic = {"verified": True, "detail": "Onglet déjà actif", "elapsed_ms": 1500}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is True
|
||||
assert result.suggestion == "continue"
|
||||
|
||||
def test_pixel_non_sem_non(self, verifier):
|
||||
"""Pixel inchangé + Sémantique NON = échec complet → retry."""
|
||||
pixel = VerificationResult(
|
||||
verified=False, confidence=0.5, changes_detected=False,
|
||||
change_area_pct=0.0, suggestion="retry",
|
||||
)
|
||||
semantic = {"verified": False, "detail": "Rien ne s'est passé", "elapsed_ms": 3000}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is False
|
||||
assert result.semantic_verified is False
|
||||
assert result.confidence >= 0.7 # Haute confiance dans l'échec
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests enrichissement intentions (stream_processor)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestEnrichActionsWithIntentions:
|
||||
|
||||
@patch("requests.post")
|
||||
@patch("requests.get")
|
||||
def test_enrichissement_parse_reponse_gemma4(self, mock_get, mock_post):
|
||||
"""La réponse gemma4 est correctement parsée en intention/avant/après."""
|
||||
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||
import tempfile, shutil
|
||||
|
||||
# Mock gemma4 disponible
|
||||
mock_tags_resp = MagicMock()
|
||||
mock_tags_resp.ok = True
|
||||
mock_get.return_value = mock_tags_resp
|
||||
|
||||
mock_chat_resp = MagicMock()
|
||||
mock_chat_resp.ok = True
|
||||
mock_chat_resp.json.return_value = {
|
||||
"message": {
|
||||
"content": (
|
||||
"INTENTION: Ouvrir le fichier client dans le logiciel\n"
|
||||
"AVANT: Le logiciel est ouvert sur la page d'accueil\n"
|
||||
"APRÈS: Le fichier client est affiché dans la fenêtre"
|
||||
)
|
||||
}
|
||||
}
|
||||
mock_post.return_value = mock_chat_resp
|
||||
|
||||
actions = [
|
||||
{
|
||||
"type": "click",
|
||||
"action_id": "act_001",
|
||||
"target_spec": {"by_text": "Ouvrir", "window_title": "Logiciel"},
|
||||
},
|
||||
{
|
||||
"type": "wait",
|
||||
"action_id": "act_002",
|
||||
"duration_ms": 1000,
|
||||
},
|
||||
]
|
||||
|
||||
tmpdir = Path(tempfile.mkdtemp())
|
||||
try:
|
||||
(tmpdir / "shots").mkdir()
|
||||
_enrich_actions_with_intentions(actions, tmpdir)
|
||||
|
||||
# L'action click doit être enrichie
|
||||
assert actions[0].get("intention") == "Ouvrir le fichier client dans le logiciel"
|
||||
assert actions[0].get("expected_state") == "Le logiciel est ouvert sur la page d'accueil"
|
||||
assert actions[0].get("expected_result") == "Le fichier client est affiché dans la fenêtre"
|
||||
# expected_state doit aussi être dans target_spec (pour l'Observer)
|
||||
assert actions[0]["target_spec"]["expected_state"] == "Le logiciel est ouvert sur la page d'accueil"
|
||||
|
||||
# L'action wait ne doit PAS être enrichie
|
||||
assert "intention" not in actions[1]
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
@patch("requests.get")
|
||||
def test_gemma4_indisponible_pas_de_crash(self, mock_get):
|
||||
"""Si gemma4 est down, l'enrichissement est silencieusement désactivé."""
|
||||
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||
import tempfile, shutil
|
||||
|
||||
mock_get.side_effect = ConnectionError("gemma4 down")
|
||||
|
||||
actions = [
|
||||
{"type": "click", "action_id": "act_001", "target_spec": {"by_text": "OK"}},
|
||||
]
|
||||
|
||||
tmpdir = Path(tempfile.mkdtemp())
|
||||
try:
|
||||
(tmpdir / "shots").mkdir()
|
||||
_enrich_actions_with_intentions(actions, tmpdir)
|
||||
# Aucun crash, aucune intention ajoutée
|
||||
assert "intention" not in actions[0]
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
@patch("requests.post")
|
||||
@patch("requests.get")
|
||||
def test_reponse_gemma4_malformee(self, mock_get, mock_post):
|
||||
"""Si gemma4 retourne du texte non structuré, pas de crash."""
|
||||
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||
import tempfile, shutil
|
||||
|
||||
mock_tags = MagicMock()
|
||||
mock_tags.ok = True
|
||||
mock_get.return_value = mock_tags
|
||||
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": "Je ne comprends pas cette demande."}
|
||||
}
|
||||
mock_post.return_value = mock_resp
|
||||
|
||||
actions = [
|
||||
{"type": "click", "action_id": "act_001", "target_spec": {"by_text": "OK"}},
|
||||
]
|
||||
|
||||
tmpdir = Path(tempfile.mkdtemp())
|
||||
try:
|
||||
(tmpdir / "shots").mkdir()
|
||||
_enrich_actions_with_intentions(actions, tmpdir)
|
||||
# Pas de crash, mais pas d'intention non plus
|
||||
assert "intention" not in actions[0]
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
762
tests/unit/test_task_planner.py
Normal file
762
tests/unit/test_task_planner.py
Normal file
@@ -0,0 +1,762 @@
|
||||
# tests/unit/test_task_planner.py
|
||||
"""
|
||||
Tests unitaires du TaskPlanner (planificateur MACRO).
|
||||
|
||||
Vérifie :
|
||||
1. La compréhension d'ordres simples (understand)
|
||||
2. Le matching de workflows par description sémantique
|
||||
3. La détection de boucles et l'extraction de paramètres
|
||||
4. La conversion étapes → actions JSON (format correct)
|
||||
5. L'extraction de descriptions de session
|
||||
|
||||
Toutes les réponses gemma4 sont mockées pour la reproductibilité.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch, Mock
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
from agent_v0.server_v1.task_planner import TaskPlanner, TaskPlan
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
@pytest.fixture
|
||||
def planner():
|
||||
"""TaskPlanner avec port gemma4 factice."""
|
||||
return TaskPlanner(gemma4_port="11435", domain_id="generic")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_workflows():
|
||||
"""Workflows disponibles pour les tests de matching."""
|
||||
return [
|
||||
{
|
||||
"session_id": "sess_001",
|
||||
"name": "Bloc-notes",
|
||||
"description": "Ouvrir Bloc-notes via Exécuter (Win+R) et écrire du texte",
|
||||
"machine": "PC-01",
|
||||
"event_count": 25,
|
||||
},
|
||||
{
|
||||
"session_id": "sess_002",
|
||||
"name": "Explorateur de fichiers",
|
||||
"description": "Naviguer dans l'Explorateur de fichiers et ouvrir des images",
|
||||
"machine": "PC-01",
|
||||
"event_count": 40,
|
||||
},
|
||||
{
|
||||
"session_id": "sess_003",
|
||||
"name": "DxCare, Codage CIM-10",
|
||||
"description": "Ouvrir un dossier patient dans DxCare et coder les diagnostics CIM-10",
|
||||
"machine": "PC-TIM",
|
||||
"event_count": 80,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def _mock_gemma4_response(content: str):
|
||||
"""Créer un mock de réponse HTTP gemma4."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.status_code = 200
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": content}
|
||||
}
|
||||
return mock_resp
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : understand — ordre simple
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandOrdreSimple:
|
||||
"""Vérifier que understand() parse correctement des réponses gemma4."""
|
||||
|
||||
def test_understand_ordre_simple(self, planner, sample_workflows):
|
||||
"""'Ouvre le bloc-notes' → understood=True."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 0.9\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir le Bloc-notes via Win+R\n"
|
||||
"2. Taper notepad et valider\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Ouvre le bloc-notes",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.understood is True
|
||||
assert plan.instruction == "Ouvre le bloc-notes"
|
||||
|
||||
def test_understand_instruction_non_comprise(self, planner):
|
||||
"""Instruction incompréhensible → understood=False."""
|
||||
gemma4_response = "COMPRIS: NON\nWORKFLOW: AUCUN\nBOUCLE: NON\n"
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("xyzzy blah blah")
|
||||
|
||||
assert plan.understood is False
|
||||
|
||||
def test_understand_gemma4_erreur_http(self, planner):
|
||||
"""Erreur HTTP gemma4 → plan.error renseigné."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = False
|
||||
mock_resp.status_code = 500
|
||||
|
||||
with patch("requests.post", return_value=mock_resp):
|
||||
plan = planner.understand("Ouvre le bloc-notes")
|
||||
|
||||
assert plan.understood is False
|
||||
assert "500" in plan.error
|
||||
|
||||
def test_understand_gemma4_timeout(self, planner):
|
||||
"""Timeout gemma4 → plan.error renseigné."""
|
||||
import requests
|
||||
with patch("requests.post", side_effect=requests.Timeout("timeout")):
|
||||
plan = planner.understand("Ouvre le bloc-notes")
|
||||
|
||||
assert plan.understood is False
|
||||
assert "erreur" in plan.error.lower() or "timeout" in plan.error.lower()
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : matching workflow
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandIdentifieWorkflow:
|
||||
"""Vérifier que le matching de workflow fonctionne."""
|
||||
|
||||
def test_understand_identifie_workflow(self, planner, sample_workflows):
|
||||
"""Quand un workflow matche, workflow_match est rempli."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 0.9\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Lancer le Bloc-notes\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Ouvre le bloc-notes",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.workflow_match == "sess_001"
|
||||
assert plan.workflow_name == "Bloc-notes"
|
||||
assert plan.mode == "replay"
|
||||
assert plan.match_confidence >= 0.8
|
||||
|
||||
def test_understand_workflow_aucun_match(self, planner, sample_workflows):
|
||||
"""Aucun workflow correspondant → mode libre."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir Chrome\n"
|
||||
"2. Aller sur Google\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Recherche voiture sur Google",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.understood is True
|
||||
assert plan.workflow_match == ""
|
||||
assert plan.mode == "free"
|
||||
|
||||
def test_understand_workflow_second_match(self, planner, sample_workflows):
|
||||
"""Workflow 2 sélectionné correctement."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 2\n"
|
||||
"CONFIANCE: 0.85\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir l'explorateur de fichiers\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Ouvre mes images",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.workflow_match == "sess_002"
|
||||
assert plan.workflow_name == "Explorateur de fichiers"
|
||||
|
||||
def test_understand_workflow_avec_description_dans_prompt(self, planner, sample_workflows):
|
||||
"""Le prompt envoyé à gemma4 inclut les descriptions des workflows."""
|
||||
captured_body = {}
|
||||
|
||||
def capture_post(url, json=None, **kwargs):
|
||||
captured_body.update(json or {})
|
||||
return _mock_gemma4_response("COMPRIS: OUI\nWORKFLOW: AUCUN\nBOUCLE: NON\n")
|
||||
|
||||
with patch("requests.post", side_effect=capture_post):
|
||||
planner.understand(
|
||||
"Ouvre le bloc-notes",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
prompt_content = captured_body["messages"][0]["content"]
|
||||
# La description doit apparaître dans le prompt
|
||||
assert "Ouvrir Bloc-notes via Exécuter" in prompt_content
|
||||
assert "Naviguer dans l'Explorateur" in prompt_content
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : détection de boucle
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandDetecteBoucle:
|
||||
"""Vérifier la détection de boucle."""
|
||||
|
||||
def test_understand_detecte_boucle(self, planner, sample_workflows):
|
||||
"""'traite TOUS les dossiers' → is_loop=True."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 3\n"
|
||||
"CONFIANCE: 0.8\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: OUI\n"
|
||||
"SOURCE_BOUCLE: écran\n"
|
||||
"PLAN:\n"
|
||||
"1. Pour chaque dossier dans la liste\n"
|
||||
"2. Ouvrir le dossier\n"
|
||||
"3. Coder les diagnostics\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Traite TOUS les dossiers de la liste",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.is_loop is True
|
||||
assert plan.loop_source == "écran"
|
||||
|
||||
def test_understand_pas_de_boucle(self, planner):
|
||||
"""Ordre simple → is_loop=False."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir le navigateur\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("Ouvre le navigateur")
|
||||
|
||||
assert plan.is_loop is False
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : extraction de paramètres
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandExtraitParametres:
|
||||
"""Vérifier l'extraction des paramètres."""
|
||||
|
||||
def test_understand_extrait_parametres(self, planner, sample_workflows):
|
||||
"""'dossiers de janvier' → parameters contient mois=janvier."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 3\n"
|
||||
"CONFIANCE: 0.85\n"
|
||||
"PARAMETRES: mois=janvier\n"
|
||||
"BOUCLE: OUI\n"
|
||||
"SOURCE_BOUCLE: écran\n"
|
||||
"PLAN:\n"
|
||||
"1. Filtrer les dossiers de janvier\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Traite les dossiers de janvier",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert "mois" in plan.parameters
|
||||
assert plan.parameters["mois"] == "janvier"
|
||||
|
||||
def test_understand_parametres_multiples(self, planner):
|
||||
"""Plusieurs paramètres sur des lignes séparées."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"PARAMETRES:\n"
|
||||
"- patient=DUPONT\n"
|
||||
"- date=2026-01-15\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"1. Rechercher le patient DUPONT\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("Cherche le dossier de DUPONT du 15 janvier")
|
||||
|
||||
assert plan.parameters.get("patient") == "DUPONT"
|
||||
assert plan.parameters.get("date") == "2026-01-15"
|
||||
|
||||
def test_understand_parametres_inline(self, planner):
|
||||
"""Paramètres sur la même ligne que PARAMETRES:."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"PARAMETRES: nom=Martin, ville=Paris\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"1. Chercher Martin à Paris\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("Cherche Martin à Paris")
|
||||
|
||||
assert plan.parameters.get("nom") == "Martin"
|
||||
assert plan.parameters.get("ville") == "Paris"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _parse_understanding (parsing tolérant)
|
||||
# =========================================================================
|
||||
|
||||
class TestParseUnderstanding:
|
||||
"""Tester le parsing tolérant de réponses gemma4 variées."""
|
||||
|
||||
def test_parse_markdown_gras(self, planner):
|
||||
"""Réponse avec **gras** → parsée correctement."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"**COMPRIS:** OUI\n"
|
||||
"**WORKFLOW:** AUCUN\n"
|
||||
"**BOUCLE:** NON\n"
|
||||
"**PLAN:**\n"
|
||||
"1. Première étape\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, [])
|
||||
assert result.understood is True
|
||||
assert result.mode == "free"
|
||||
|
||||
def test_parse_confiance_pourcentage(self, planner, sample_workflows):
|
||||
"""CONFIANCE: 90% → match_confidence=0.9."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 90%\n"
|
||||
"BOUCLE: NON\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.match_confidence == pytest.approx(0.9)
|
||||
|
||||
def test_parse_confiance_virgule(self, planner, sample_workflows):
|
||||
"""CONFIANCE: 0,85 → match_confidence=0.85."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 0,85\n"
|
||||
"BOUCLE: NON\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.match_confidence == pytest.approx(0.85)
|
||||
|
||||
def test_parse_workflow_avec_parentheses(self, planner, sample_workflows):
|
||||
"""WORKFLOW: 2 (Explorateur) → index 2 correctement extrait."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 2 (Explorateur de fichiers)\n"
|
||||
"BOUCLE: NON\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.workflow_match == "sess_002"
|
||||
|
||||
def test_parse_workflow_aucun_variantes(self, planner, sample_workflows):
|
||||
"""Toutes les variantes de 'aucun' sont reconnues."""
|
||||
for val in ("AUCUN", "None", "N/A", "-", "NON"):
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = f"COMPRIS: OUI\nWORKFLOW: {val}\nBOUCLE: NON\n"
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.workflow_match == "", f"Devrait être vide pour '{val}'"
|
||||
|
||||
def test_parse_etapes_tirets(self, planner):
|
||||
"""Étapes avec tirets → ajoutées au plan."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"- Ouvrir l'application\n"
|
||||
"- Cliquer sur Fichier\n"
|
||||
"- Sauvegarder\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, [])
|
||||
assert len(result.steps) == 3
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _steps_to_actions
|
||||
# =========================================================================
|
||||
|
||||
class TestStepsToActions:
|
||||
"""Vérifier la conversion étapes → actions JSON."""
|
||||
|
||||
def test_steps_to_actions_format(self, planner):
|
||||
"""Les actions générées ont le bon format (type, target_spec, etc.)."""
|
||||
gemma4_response = (
|
||||
'{"type": "click", "target_spec": {"by_text": "Rechercher"}}\n'
|
||||
'{"type": "type", "text": "bloc-notes"}\n'
|
||||
'{"type": "key_combo", "keys": ["enter"]}\n'
|
||||
'{"type": "wait", "duration_ms": 2000}\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Ouvrir le bloc-notes"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 4
|
||||
assert actions[0]["type"] == "click"
|
||||
assert actions[0]["visual_mode"] is True # Ajouté automatiquement
|
||||
assert actions[0]["target_spec"]["by_text"] == "Rechercher"
|
||||
assert actions[1]["type"] == "type"
|
||||
assert actions[1]["text"] == "bloc-notes"
|
||||
assert actions[2]["type"] == "key_combo"
|
||||
assert actions[2]["keys"] == ["enter"]
|
||||
assert actions[3]["type"] == "wait"
|
||||
assert actions[3]["duration_ms"] == 2000
|
||||
|
||||
def test_steps_to_actions_json_array(self, planner):
|
||||
"""gemma4 retourne un tableau JSON → parsé correctement."""
|
||||
gemma4_response = (
|
||||
'Voici les actions :\n'
|
||||
'```json\n'
|
||||
'[\n'
|
||||
' {"type": "click", "target_spec": {"by_text": "Fichier"}},\n'
|
||||
' {"type": "click", "target_spec": {"by_text": "Ouvrir"}}\n'
|
||||
']\n'
|
||||
'```\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Ouvrir un fichier"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 2
|
||||
assert actions[0]["target_spec"]["by_text"] == "Fichier"
|
||||
assert actions[1]["target_spec"]["by_text"] == "Ouvrir"
|
||||
|
||||
def test_steps_to_actions_nested_json(self, planner):
|
||||
"""JSON imbriqué (target_spec) → parsé correctement."""
|
||||
gemma4_response = (
|
||||
'{"type": "click", "target_spec": {"by_text": "OK", "window_title": "Confirmation"}}\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Confirmer"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 1
|
||||
assert actions[0]["target_spec"]["window_title"] == "Confirmation"
|
||||
|
||||
def test_steps_to_actions_gemma4_erreur(self, planner):
|
||||
"""Erreur gemma4 → liste vide."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = False
|
||||
|
||||
with patch("requests.post", return_value=mock_resp):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Faire quelque chose"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert actions == []
|
||||
|
||||
def test_steps_to_actions_filtre_types_invalides(self, planner):
|
||||
"""Seuls les types valides (click, type, key_combo, wait) sont acceptés."""
|
||||
gemma4_response = (
|
||||
'{"type": "click", "target_spec": {"by_text": "OK"}}\n'
|
||||
'{"type": "invalid_action", "foo": "bar"}\n'
|
||||
'{"type": "wait", "duration_ms": 500}\n'
|
||||
'{"not_a_type": "test"}\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Test"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 2
|
||||
assert actions[0]["type"] == "click"
|
||||
assert actions[1]["type"] == "wait"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _parse_actions_json (parsing robuste)
|
||||
# =========================================================================
|
||||
|
||||
class TestParseActionsJson:
|
||||
"""Tester le parsing robuste d'actions JSON."""
|
||||
|
||||
def test_parse_json_une_par_ligne(self):
|
||||
"""Actions JSON une par ligne."""
|
||||
content = (
|
||||
'{"type": "click", "target_spec": {"by_text": "A"}}\n'
|
||||
'{"type": "type", "text": "hello"}\n'
|
||||
)
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 2
|
||||
|
||||
def test_parse_json_array(self):
|
||||
"""Tableau JSON."""
|
||||
content = '[{"type": "click", "target_spec": {"by_text": "A"}}, {"type": "wait", "duration_ms": 1000}]'
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 2
|
||||
|
||||
def test_parse_json_avec_texte_autour(self):
|
||||
"""JSON entouré de commentaires texte."""
|
||||
content = (
|
||||
"Voici les actions RPA :\n\n"
|
||||
'{"type": "click", "target_spec": {"by_text": "Envoyer"}}\n'
|
||||
"\n"
|
||||
"C'est tout.\n"
|
||||
)
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 1
|
||||
assert actions[0]["target_spec"]["by_text"] == "Envoyer"
|
||||
|
||||
def test_parse_json_vide(self):
|
||||
"""Contenu vide → liste vide."""
|
||||
assert TaskPlanner._parse_actions_json("") == []
|
||||
assert TaskPlanner._parse_actions_json("Pas de JSON ici") == []
|
||||
|
||||
def test_parse_json_markdown_code_block(self):
|
||||
"""JSON dans un bloc de code markdown."""
|
||||
content = (
|
||||
"```json\n"
|
||||
'{"type": "type", "text": "bonjour"}\n'
|
||||
"```\n"
|
||||
)
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 1
|
||||
assert actions[0]["text"] == "bonjour"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _extract_session_description
|
||||
# =========================================================================
|
||||
|
||||
class TestExtractSessionDescription:
|
||||
"""Vérifier que les descriptions de session sont lisibles et sémantiques."""
|
||||
|
||||
def _write_events(self, tmp_path, events):
|
||||
"""Écrire des événements dans un fichier JSONL temporaire."""
|
||||
events_file = tmp_path / "live_events.jsonl"
|
||||
with open(events_file, "w") as f:
|
||||
for evt in events:
|
||||
f.write(json.dumps(evt, ensure_ascii=False) + "\n")
|
||||
return events_file
|
||||
|
||||
def test_extract_session_description_bloc_notes(self, tmp_path):
|
||||
"""Session Bloc-notes via Win+R → description sémantique."""
|
||||
events = [
|
||||
{"event": {"type": "key_combo", "keys": ["win", "r"],
|
||||
"window": {"title": "Bureau"}}},
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "Exécuter"}}},
|
||||
{"event": {"type": "text_input", "text": "notepad",
|
||||
"window": {"title": "Exécuter"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Exécuter"}}},
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Exécuter"},
|
||||
"to": {"title": "Sans titre – Bloc-notes"}}},
|
||||
{"event": {"type": "text_input", "text": "Bonjour le monde",
|
||||
"window": {"title": "Sans titre – Bloc-notes"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
# Importer depuis api_stream (la fonction est au niveau module)
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert desc["event_count"] == 6
|
||||
# La description doit être lisible et pas juste "Bloc-notes, Exécuter"
|
||||
description = desc["description"]
|
||||
assert "Bloc-notes" in description or "bloc-notes" in description.lower()
|
||||
# Le nom doit contenir l'app
|
||||
assert "Bloc-notes" in desc["name"]
|
||||
|
||||
def test_extract_session_description_explorateur(self, tmp_path):
|
||||
"""Session Explorateur de fichiers → description pertinente."""
|
||||
events = [
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "Images – Explorateur de fichiers"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert "Explorateur" in desc["name"] or "Explorateur" in desc["description"]
|
||||
|
||||
def test_extract_session_description_vide(self, tmp_path):
|
||||
"""Fichier vide → description par défaut."""
|
||||
events_file = self._write_events(tmp_path, [])
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert desc["event_count"] == 0
|
||||
assert desc["name"] == "Session sans nom"
|
||||
|
||||
def test_extract_session_description_cmd(self, tmp_path):
|
||||
"""Session avec cmd.exe → description contient cmd."""
|
||||
events = [
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||
{"event": {"type": "text_input", "text": "dir",
|
||||
"window": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||
{"event": {"type": "text_input", "text": "cd documents",
|
||||
"window": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert desc["event_count"] == 3
|
||||
# Le nom ou la description doit mentionner cmd
|
||||
full = f"{desc['name']} {desc['description']}"
|
||||
assert "cmd" in full.lower()
|
||||
|
||||
def test_extract_session_description_recherche_windows(self, tmp_path):
|
||||
"""Session avec recherche Windows (Win+S) → description mentionne recherche."""
|
||||
events = [
|
||||
{"event": {"type": "key_combo", "keys": ["win", "s"],
|
||||
"window": {"title": "Bureau"}}},
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "Rechercher"}}},
|
||||
{"event": {"type": "text_input", "text": "calculator",
|
||||
"window": {"title": "Rechercher"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
# La description doit mentionner la recherche Windows
|
||||
assert "recherche" in desc["description"].lower()
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : list_capabilities
|
||||
# =========================================================================
|
||||
|
||||
class TestListCapabilities:
|
||||
"""Vérifier le listing des capacités."""
|
||||
|
||||
def test_list_capabilities_avec_workflows(self, planner, sample_workflows):
|
||||
"""Avec des workflows → texte lisible avec descriptions."""
|
||||
text = planner.list_capabilities(sample_workflows)
|
||||
assert "Léa sait faire" in text
|
||||
assert "Bloc-notes" in text
|
||||
|
||||
def test_list_capabilities_sans_workflows(self, planner):
|
||||
"""Sans workflows → message d'aide."""
|
||||
text = planner.list_capabilities([])
|
||||
assert "pas encore appris" in text
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : execute (mode replay et free)
|
||||
# =========================================================================
|
||||
|
||||
class TestExecute:
|
||||
"""Vérifier l'exécution des plans."""
|
||||
|
||||
def test_execute_replay(self, planner):
|
||||
"""Mode replay → callback appelé avec le bon session_id."""
|
||||
plan = TaskPlan(
|
||||
instruction="Ouvre le bloc-notes",
|
||||
understood=True,
|
||||
workflow_match="sess_001",
|
||||
workflow_name="Bloc-notes",
|
||||
mode="replay",
|
||||
)
|
||||
|
||||
callback = MagicMock(return_value="replay_123")
|
||||
result = planner.execute(plan, replay_callback=callback)
|
||||
|
||||
assert result.success is True
|
||||
callback.assert_called_once_with(
|
||||
session_id="sess_001",
|
||||
machine_id="default",
|
||||
params={},
|
||||
)
|
||||
|
||||
def test_execute_non_compris(self, planner):
|
||||
"""Plan non compris → échec."""
|
||||
plan = TaskPlan(instruction="blah", understood=False)
|
||||
result = planner.execute(plan)
|
||||
assert result.success is False
|
||||
assert "non comprise" in result.summary.lower() or "non comprise" in result.summary
|
||||
|
||||
def test_execute_sans_callback(self, planner):
|
||||
"""Mode replay sans callback → échec."""
|
||||
plan = TaskPlan(
|
||||
instruction="test",
|
||||
understood=True,
|
||||
workflow_match="sess_001",
|
||||
mode="replay",
|
||||
)
|
||||
result = planner.execute(plan, replay_callback=None)
|
||||
assert result.success is False
|
||||
Reference in New Issue
Block a user