feat: pipeline complet MACRO/MÉSO/MICRO — Critic, Observer, Policy, Recovery, Learning, Audit Trail, TaskPlanner
Architecture 3 niveaux implémentée et testée (137 tests unitaires + 21 visuels) : MÉSO (acteur intelligent) : - P0 Critic : vérification sémantique post-action via gemma4 (replay_verifier.py) - P1 Observer : pré-analyse écran avant chaque action (api_stream.py /pre_analyze) - P2 Grounding/Policy : séparation localisation (grounding.py) et décision (policy.py) - P3 Recovery : rollback automatique Ctrl+Z/Escape/Alt+F4 (recovery.py) - P4 Learning : apprentissage runtime avec boucle de consolidation (replay_learner.py) MACRO (planificateur) : - TaskPlanner : comprend les ordres en langage naturel via gemma4 (task_planner.py) - Contexte métier TIM/CIM-10 pour les hôpitaux (domain_context.py) - Endpoint POST /api/v1/task pour l'exécution par instruction Traçabilité : - Audit trail complet avec 18 champs par action (audit_trail.py) - Endpoints GET /audit/history, /audit/summary, /audit/export (CSV) Grounding : - Fix parsing bbox_2d qwen2.5vl (pixels relatifs, pas grille 1000x1000) - Benchmarks visuels sur captures réelles (3 approches : baseline, zoom, Citrix) - Reproductibilité validée : variance < 0.008 sur 10 itérations Sécurité : - Tokens de production retirés du code source → .env.local - Secret key aléatoire si non configuré - Suppression logs qui leakent les tokens Résultats : 80% de replay (vs 12.5% avant), 100% détection visuelle Citrix JPEG Q20 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -241,6 +241,102 @@ class ActionExecutorV1:
|
||||
logger.warning(f"Acteur gemma4 indisponible : {e}")
|
||||
return "EXECUTER"
|
||||
|
||||
# =========================================================================
|
||||
# Observer — pré-analyse écran avant chaque action
|
||||
# =========================================================================
|
||||
|
||||
def _observe_screen(
|
||||
self, server_url: str, target_spec: dict,
|
||||
screen_width: int, screen_height: int,
|
||||
) -> dict:
|
||||
"""Observer : analyser l'écran AVANT de résoudre la cible.
|
||||
|
||||
Détecte les popups, dialogues, et états inattendus AVANT de tenter
|
||||
la résolution visuelle. C'est la "pre-exploration" qui améliore
|
||||
dramatiquement les performances (cf. benchmarks Claude Computer Use).
|
||||
|
||||
Stratégie en 2 temps (rapide puis intelligent) :
|
||||
1. Vérification rapide locale : titre fenêtre, popup connue
|
||||
2. Si serveur disponible : envoi du screenshot pour pré-analyse VLM
|
||||
|
||||
Returns:
|
||||
None si écran OK (pas de problème détecté)
|
||||
Dict avec screen_state ("ok"|"popup"|"unexpected"), détails, coords popup
|
||||
"""
|
||||
import requests as _requests
|
||||
|
||||
# Étape 1 : vérification rapide locale (titre fenêtre)
|
||||
try:
|
||||
from ..window_info_crossplatform import get_active_window_info
|
||||
current_info = get_active_window_info()
|
||||
current_title = current_info.get("title", "").lower()
|
||||
|
||||
# Patterns de popup/dialogue courants (Windows FR + EN)
|
||||
popup_patterns = [
|
||||
"enregistrer", "sauvegarder", "voulez-vous",
|
||||
"confirmer", "confirmation", "avertissement",
|
||||
"erreur", "error", "warning", "alert",
|
||||
"do you want", "save as", "are you sure",
|
||||
]
|
||||
for pattern in popup_patterns:
|
||||
if pattern in current_title:
|
||||
logger.info(f"Observer : popup détectée par titre — '{current_title}'")
|
||||
# On ne peut pas résoudre les coords juste par le titre
|
||||
# → retourner popup sans coords, le caller fera handle_popup_vlm()
|
||||
return {
|
||||
"screen_state": "popup",
|
||||
"popup_label": current_title,
|
||||
"popup_coords": None,
|
||||
"detail": f"Popup détectée par titre : {current_title}",
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Étape 2 : pré-analyse serveur (si disponible)
|
||||
if not server_url:
|
||||
return None # Pas de serveur → pas de pré-analyse avancée
|
||||
|
||||
# Envoyer le screenshot au serveur pour détection popup via VLM
|
||||
screenshot_b64 = self._capture_screenshot_b64(max_width=0, quality=60)
|
||||
if not screenshot_b64:
|
||||
return None
|
||||
|
||||
try:
|
||||
url = f"{server_url}/traces/stream/replay/pre_analyze"
|
||||
from ..config import API_TOKEN
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if API_TOKEN:
|
||||
headers["Authorization"] = f"Bearer {API_TOKEN}"
|
||||
|
||||
resp = _requests.post(
|
||||
url,
|
||||
json={
|
||||
"screenshot_b64": screenshot_b64,
|
||||
"expected_state": target_spec.get("expected_state", ""),
|
||||
"window_title": target_spec.get("window_title", ""),
|
||||
"screen_width": screen_width,
|
||||
"screen_height": screen_height,
|
||||
},
|
||||
headers=headers,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if resp.ok:
|
||||
data = resp.json()
|
||||
state = data.get("screen_state", "ok")
|
||||
if state != "ok":
|
||||
logger.info(f"Observer serveur : {state} — {data.get('detail', '')}")
|
||||
return data
|
||||
# Serveur ne supporte pas encore /pre_analyze → silencieux
|
||||
except _requests.Timeout:
|
||||
logger.debug("Observer : serveur timeout (10s)")
|
||||
except _requests.ConnectionError:
|
||||
pass # Serveur indisponible — pas grave, on continue sans
|
||||
except Exception as e:
|
||||
logger.debug(f"Observer : erreur serveur — {e}")
|
||||
|
||||
return None # Écran OK ou pas de pré-analyse possible
|
||||
|
||||
# =========================================================================
|
||||
# Execution replay (polling serveur)
|
||||
# =========================================================================
|
||||
@@ -320,7 +416,11 @@ class ActionExecutorV1:
|
||||
or expected_title.lower() in current_title.lower()
|
||||
or current_title.lower() in expected_title.lower()
|
||||
)
|
||||
if not title_match:
|
||||
# Ignorer la fenêtre de Léa elle-même (overlay agent)
|
||||
_lea_windows = ("léa", "lea —", "léa —", "lea -", "léa -", "lea assistante", "léa assistante")
|
||||
is_lea_window = any(p in current_title.lower() for p in _lea_windows)
|
||||
|
||||
if not title_match and not is_lea_window:
|
||||
logger.warning(
|
||||
f"PRÉ-VÉRIF ÉCHOUÉE : attendu '{expected_title}', "
|
||||
f"actuel '{current_title}' — STOP"
|
||||
@@ -329,50 +429,110 @@ class ActionExecutorV1:
|
||||
result["success"] = False
|
||||
result["error"] = f"Fenêtre incorrecte: '{current_title}' (attendu: '{expected_title}')"
|
||||
return result
|
||||
elif is_lea_window:
|
||||
logger.info(f"PRÉ-VÉRIF : fenêtre Léa détectée, ignorée — on continue")
|
||||
else:
|
||||
logger.info(f"PRÉ-VÉRIF OK : '{current_title}'")
|
||||
|
||||
# ── OBSERVER : pré-analyse écran avant résolution ──
|
||||
# Détecte popups, dialogues, états inattendus AVANT de chercher la cible.
|
||||
# Si un problème est détecté, on le gère tout de suite (pas après l'échec).
|
||||
# Ref: docs/VISION_RPA_INTELLIGENT.md — "Il observe"
|
||||
if visual_mode and target_spec and action_type == "click":
|
||||
observation = self._observe_screen(server_url, target_spec, width, height)
|
||||
if observation:
|
||||
obs_state = observation.get("screen_state", "ok")
|
||||
|
||||
if obs_state == "popup":
|
||||
# Popup détectée AVANT la résolution — la fermer
|
||||
popup_label = observation.get("popup_label", "popup")
|
||||
popup_coords = observation.get("popup_coords")
|
||||
print(f" [OBSERVER] Popup détectée : '{popup_label}' — fermeture")
|
||||
logger.info(f"Observer : popup '{popup_label}' détectée avant résolution")
|
||||
if popup_coords:
|
||||
real_x = int(popup_coords["x_pct"] * width)
|
||||
real_y = int(popup_coords["y_pct"] * height)
|
||||
self._click((real_x, real_y), "left")
|
||||
time.sleep(1.0)
|
||||
print(f" [OBSERVER] Popup fermée — reprise du flow normal")
|
||||
else:
|
||||
# Pas de coordonnées → fallback sur handle_popup_vlm classique
|
||||
self._handle_popup_vlm()
|
||||
|
||||
elif obs_state == "unexpected":
|
||||
# État inattendu (pas la bonne page/écran)
|
||||
detail = observation.get("detail", "état inattendu")
|
||||
print(f" [OBSERVER] État inattendu : {detail}")
|
||||
logger.warning(f"Observer : état inattendu — {detail}")
|
||||
# Demander à l'acteur (gemma4) de décider
|
||||
decision = self._actor_decide(action, target_spec)
|
||||
if decision == "STOPPER":
|
||||
result["success"] = False
|
||||
result["error"] = f"observer_unexpected:{detail}"
|
||||
return result
|
||||
elif decision == "PASSER":
|
||||
result["success"] = True
|
||||
result["warning"] = "observer_skip"
|
||||
return result
|
||||
# EXECUTER → continuer normalement
|
||||
|
||||
if visual_mode and target_spec and server_url:
|
||||
resolved = self._resolve_target_visual(
|
||||
server_url, target_spec, x_pct, y_pct, width, height
|
||||
# ── GROUNDING : localisation pure via GroundingEngine ──
|
||||
from .grounding import GroundingEngine
|
||||
grounding = GroundingEngine(self)
|
||||
grounding_result = grounding.locate(
|
||||
server_url, target_spec, x_pct, y_pct, width, height,
|
||||
)
|
||||
if resolved:
|
||||
x_pct = resolved["x_pct"]
|
||||
y_pct = resolved["y_pct"]
|
||||
result["visual_resolved"] = resolved.get("resolved", False)
|
||||
# Métriques de résolution
|
||||
result["resolution_method"] = resolved.get("resolution_method", "")
|
||||
result["resolution_score"] = resolved.get("resolution_score", 0.0)
|
||||
result["resolution_elapsed_ms"] = resolved.get("resolution_elapsed_ms", 0.0)
|
||||
if resolved.get("resolved"):
|
||||
if grounding_result.found:
|
||||
x_pct = grounding_result.x_pct
|
||||
y_pct = grounding_result.y_pct
|
||||
result["visual_resolved"] = True
|
||||
result["resolution_method"] = grounding_result.method
|
||||
result["resolution_score"] = grounding_result.score
|
||||
result["resolution_elapsed_ms"] = grounding_result.elapsed_ms
|
||||
logger.info(
|
||||
f"Visual resolve OK [{result['resolution_method']}] "
|
||||
f"{result['resolution_elapsed_ms']:.0f}ms : "
|
||||
f"{resolved.get('matched_element', {}).get('label', '?')} "
|
||||
f"Grounding OK [{grounding_result.method}] "
|
||||
f"{grounding_result.elapsed_ms:.0f}ms : "
|
||||
f"{grounding_result.detail or '?'} "
|
||||
f"-> ({x_pct:.4f}, {y_pct:.4f})"
|
||||
)
|
||||
|
||||
# ---- Hash AVANT l'action (pour verification post-action) ----
|
||||
# Seules les actions click et key_combo sont verifiees : elles
|
||||
# provoquent un changement visible de l'ecran (ouverture de fenetre,
|
||||
# focus, etc.). Les actions type/wait/scroll ne sont pas verifiees.
|
||||
# ---- Screenshot + hash AVANT l'action (pour le Critic post-action) ----
|
||||
# Le serveur utilise screenshot_before + screenshot_after pour évaluer
|
||||
# si l'action a eu l'effet attendu (Critic sémantique VLM).
|
||||
needs_screen_check = action_type in ("click", "key_combo")
|
||||
hash_before = ""
|
||||
screenshot_before_b64 = ""
|
||||
if needs_screen_check:
|
||||
hash_before = self._quick_screenshot_hash()
|
||||
screenshot_before_b64 = self._capture_screenshot_b64()
|
||||
|
||||
if action_type == "click":
|
||||
# Si visual_mode est activé, le resolve DOIT réussir.
|
||||
# Pas de fallback blind — on arrête le replay si la cible
|
||||
# n'est pas trouvée visuellement. C'est un RPA VISUEL.
|
||||
if visual_mode and not result.get("visual_resolved"):
|
||||
# Avant de STOP, vérifier s'il y a une popup imprévue via le VLM
|
||||
print(f" [POPUP-VLM] Cible non trouvée — vérification popup imprévue...")
|
||||
logger.info(f"Action {action_id} : cible non trouvée, tentative gestion popup VLM")
|
||||
popup_handled = self._handle_popup_vlm()
|
||||
if popup_handled:
|
||||
# Popup fermée — re-tenter le resolve
|
||||
print(f" [POPUP-VLM] Popup gérée, re-tentative du resolve visuel...")
|
||||
# ── Policy : décider quoi faire quand grounding échoue ──
|
||||
from .policy import PolicyEngine, Decision
|
||||
policy = PolicyEngine(self)
|
||||
target_desc = self._describe_target(target_spec)
|
||||
retry_count = action.get("_retry_count", 0)
|
||||
|
||||
policy_decision = policy.decide(
|
||||
action=action, target_spec=target_spec,
|
||||
retry_count=retry_count, max_retries=1,
|
||||
)
|
||||
print(
|
||||
f" [POLICY] {policy_decision.decision.value} — "
|
||||
f"{policy_decision.reason}"
|
||||
)
|
||||
logger.info(
|
||||
f"Action {action_id} : Policy → {policy_decision.decision.value} "
|
||||
f"({policy_decision.reason})"
|
||||
)
|
||||
|
||||
if policy_decision.decision == Decision.RETRY:
|
||||
# Re-tenter le grounding après correction (popup fermée, etc.)
|
||||
resolved2 = self._resolve_target_visual(
|
||||
server_url, target_spec, x_pct, y_pct, width, height
|
||||
)
|
||||
@@ -380,53 +540,35 @@ class ActionExecutorV1:
|
||||
x_pct = resolved2["x_pct"]
|
||||
y_pct = resolved2["y_pct"]
|
||||
result["visual_resolved"] = True
|
||||
print(
|
||||
f" [POPUP-VLM] Re-resolve OK après popup : "
|
||||
f"({x_pct:.3f}, {y_pct:.3f})"
|
||||
)
|
||||
logger.info(
|
||||
f"Action {action_id} : re-resolve OK après popup "
|
||||
f"({x_pct:.3f}, {y_pct:.3f})"
|
||||
)
|
||||
print(f" [POLICY] Re-resolve OK après {policy_decision.action_taken}")
|
||||
else:
|
||||
# Cible toujours invisible après gestion popup — PAUSE supervisée
|
||||
target_desc = self._describe_target(target_spec)
|
||||
# Re-resolve échoué — SUPERVISE (rendre la main)
|
||||
result["success"] = False
|
||||
result["error"] = "target_not_found"
|
||||
result["target_description"] = target_desc
|
||||
result["target_spec"] = target_spec
|
||||
result["screenshot"] = self._capture_screenshot_b64()
|
||||
result["warning"] = "visual_resolve_failed"
|
||||
print(f" [ERREUR] Élément toujours non trouvé après gestion popup — PAUSE")
|
||||
logger.error(
|
||||
f"Action {action_id} : cible '{target_desc}' non trouvée "
|
||||
f"après popup, replay en pause supervisée"
|
||||
)
|
||||
# Notifier l'utilisateur via toast
|
||||
self.notifier.replay_target_not_found(target_desc)
|
||||
return result
|
||||
else:
|
||||
# Cible invisible — demander à l'acteur (gemma4) de décider
|
||||
target_desc = self._describe_target(target_spec)
|
||||
decision = self._actor_decide(action, target_spec)
|
||||
|
||||
if decision == "PASSER":
|
||||
print(f" [ACTEUR] Décision: PASSER — l'état est déjà atteint")
|
||||
logger.info(f"Action {action_id} : acteur décide PASSER pour '{target_desc}'")
|
||||
elif policy_decision.decision == Decision.SKIP:
|
||||
result["success"] = True
|
||||
result["warning"] = "actor_skip"
|
||||
elif decision == "STOPPER":
|
||||
print(f" [ACTEUR] Décision: STOPPER — état incohérent")
|
||||
logger.error(f"Action {action_id} : acteur décide STOPPER pour '{target_desc}'")
|
||||
result["warning"] = "policy_skip"
|
||||
return result
|
||||
|
||||
elif policy_decision.decision == Decision.ABORT:
|
||||
result["success"] = False
|
||||
result["error"] = f"actor_stop:{target_desc}"
|
||||
result["error"] = f"policy_abort:{target_desc}"
|
||||
self.notifier.replay_target_not_found(target_desc)
|
||||
else:
|
||||
# EXECUTER ou décision inconnue → pause supervisée (fallback)
|
||||
print(f" [ACTEUR] Décision: {decision} — pause supervisée")
|
||||
logger.warning(f"Action {action_id} : acteur décide {decision}, pause")
|
||||
return result
|
||||
|
||||
else: # SUPERVISE ou CONTINUE
|
||||
result["success"] = False
|
||||
result["error"] = "target_not_found"
|
||||
result["target_description"] = target_desc
|
||||
result["target_spec"] = target_spec
|
||||
result["screenshot"] = self._capture_screenshot_b64()
|
||||
result["warning"] = "visual_resolve_failed"
|
||||
self.notifier.replay_target_not_found(target_desc)
|
||||
return result
|
||||
@@ -555,6 +697,10 @@ class ActionExecutorV1:
|
||||
|
||||
result["success"] = True
|
||||
|
||||
# Stocker le screenshot_before pour le Critic côté serveur
|
||||
if screenshot_before_b64:
|
||||
result["screenshot_before"] = screenshot_before_b64
|
||||
|
||||
# ---- Verification post-action : l'ecran a-t-il change ? ----
|
||||
# Verifie UNIQUEMENT, ne tente PAS de gerer les popups
|
||||
# (Enter/Escape perturbent l'application).
|
||||
@@ -564,6 +710,17 @@ class ActionExecutorV1:
|
||||
hash_before, timeout_ms=3000
|
||||
)
|
||||
if not screen_changed:
|
||||
# ── Recovery : tenter un rollback si l'action n'a pas eu d'effet ──
|
||||
from .recovery import RecoveryEngine
|
||||
recovery = RecoveryEngine(self)
|
||||
recovery_result = recovery.attempt(
|
||||
failed_action=action,
|
||||
critic_detail="L'écran n'a pas changé après l'action",
|
||||
)
|
||||
if recovery_result.success:
|
||||
print(f" [RECOVERY] {recovery_result.detail}")
|
||||
result["recovery"] = recovery_result.to_dict()
|
||||
|
||||
result["success"] = False
|
||||
result["warning"] = "no_screen_change"
|
||||
result["error"] = "Ecran inchange apres l'action"
|
||||
@@ -1136,6 +1293,8 @@ Example: x_pct=0.50, y_pct=0.30"""
|
||||
"error": result.get("error"),
|
||||
"warning": result.get("warning"),
|
||||
"screenshot": result.get("screenshot"),
|
||||
"screenshot_after": result.get("screenshot"),
|
||||
"screenshot_before": result.get("screenshot_before"),
|
||||
"resolution_method": result.get("resolution_method"),
|
||||
"resolution_score": result.get("resolution_score"),
|
||||
"resolution_elapsed_ms": result.get("resolution_elapsed_ms"),
|
||||
|
||||
214
agent_v0/agent_v1/core/grounding.py
Normal file
214
agent_v0/agent_v1/core/grounding.py
Normal file
@@ -0,0 +1,214 @@
|
||||
# agent_v1/core/grounding.py
|
||||
"""
|
||||
Module Grounding — localisation pure d'éléments UI sur l'écran.
|
||||
|
||||
Responsabilité unique : "Trouve l'élément X sur l'écran et retourne ses coordonnées."
|
||||
Ne prend AUCUNE décision. Si l'élément n'est pas trouvé → retourne NOT_FOUND.
|
||||
|
||||
Stratégies disponibles (cascade configurable) :
|
||||
1. Serveur SomEngine + VLM (GPU distant)
|
||||
2. Template matching local (CPU, ~10ms)
|
||||
3. VLM local direct (CPU/GPU local)
|
||||
|
||||
Séparé de Policy (qui décide quoi faire quand grounding échoue).
|
||||
Ref: docs/PLAN_ACTEUR_V1.md — Architecture MICRO (grounding + exécution)
|
||||
"""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GroundingResult:
|
||||
"""Résultat d'une tentative de localisation visuelle."""
|
||||
found: bool # L'élément a été trouvé
|
||||
x_pct: float = 0.0 # Position X en % (0.0-1.0)
|
||||
y_pct: float = 0.0 # Position Y en % (0.0-1.0)
|
||||
method: str = "" # Méthode utilisée (server_som, anchor_template, vlm_direct...)
|
||||
score: float = 0.0 # Confiance (0.0-1.0)
|
||||
elapsed_ms: float = 0.0 # Temps de résolution
|
||||
detail: str = "" # Info supplémentaire (label trouvé, raison échec)
|
||||
raw: Optional[Dict] = None # Données brutes du resolver (pour debug)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"found": self.found,
|
||||
"x_pct": self.x_pct,
|
||||
"y_pct": self.y_pct,
|
||||
"method": self.method,
|
||||
"score": round(self.score, 3),
|
||||
"elapsed_ms": round(self.elapsed_ms, 1),
|
||||
"detail": self.detail,
|
||||
}
|
||||
|
||||
|
||||
# Résultat singleton pour "pas trouvé"
|
||||
NOT_FOUND = GroundingResult(found=False, detail="Aucune méthode n'a trouvé l'élément")
|
||||
|
||||
|
||||
class GroundingEngine:
|
||||
"""Moteur de localisation visuelle d'éléments UI.
|
||||
|
||||
Encapsule la cascade de résolution (serveur → template → VLM local)
|
||||
avec une interface unifiée. Ne prend aucune décision — c'est le rôle
|
||||
de PolicyEngine.
|
||||
|
||||
Usage :
|
||||
engine = GroundingEngine(executor)
|
||||
result = engine.locate(screenshot_b64, target_spec, screen_w, screen_h)
|
||||
if result.found:
|
||||
click(result.x_pct, result.y_pct)
|
||||
"""
|
||||
|
||||
def __init__(self, executor):
|
||||
"""
|
||||
Args:
|
||||
executor: ActionExecutorV1 — fournit les méthodes de résolution existantes.
|
||||
"""
|
||||
self._executor = executor
|
||||
|
||||
def locate(
|
||||
self,
|
||||
server_url: str,
|
||||
target_spec: Dict[str, Any],
|
||||
fallback_x: float,
|
||||
fallback_y: float,
|
||||
screen_width: int,
|
||||
screen_height: int,
|
||||
strategies: Optional[List[str]] = None,
|
||||
) -> GroundingResult:
|
||||
"""Localiser un élément UI sur l'écran.
|
||||
|
||||
Exécute la cascade de stratégies dans l'ordre et retourne
|
||||
dès qu'une stratégie trouve l'élément.
|
||||
|
||||
Args:
|
||||
server_url: URL du serveur (SomEngine + VLM GPU)
|
||||
target_spec: Spécification de la cible (by_text, anchor, vlm_description...)
|
||||
fallback_x, fallback_y: Coordonnées de fallback (enregistrement)
|
||||
screen_width, screen_height: Résolution écran
|
||||
strategies: Liste ordonnée de stratégies à essayer.
|
||||
Par défaut : ["server", "template", "vlm_local"]
|
||||
|
||||
Returns:
|
||||
GroundingResult avec found=True et coordonnées, ou NOT_FOUND
|
||||
"""
|
||||
if strategies is None:
|
||||
strategies = ["server", "template", "vlm_local"]
|
||||
|
||||
# ── Apprentissage : réordonner les stratégies selon l'historique ──
|
||||
# Si le Learning sait quelle méthode marche pour cette cible,
|
||||
# la mettre en premier. C'est la boucle d'apprentissage.
|
||||
learned = target_spec.get("_learned_strategy", "")
|
||||
if learned:
|
||||
strategy_map = {
|
||||
"som_text_match": "server",
|
||||
"grounding_vlm": "server",
|
||||
"server_som": "server",
|
||||
"anchor_template": "template",
|
||||
"template_matching": "template",
|
||||
"hybrid_text_direct": "vlm_local",
|
||||
"hybrid_vlm_text": "vlm_local",
|
||||
"vlm_direct": "vlm_local",
|
||||
}
|
||||
preferred = strategy_map.get(learned, "")
|
||||
if preferred and preferred in strategies:
|
||||
strategies = [preferred] + [s for s in strategies if s != preferred]
|
||||
logger.info(
|
||||
f"Grounding: stratégie réordonnée par l'apprentissage → "
|
||||
f"{strategies} (learned={learned})"
|
||||
)
|
||||
|
||||
t_start = time.time()
|
||||
screenshot_b64 = self._executor._capture_screenshot_b64(max_width=0, quality=75)
|
||||
if not screenshot_b64:
|
||||
return GroundingResult(
|
||||
found=False, detail="Capture screenshot échouée",
|
||||
elapsed_ms=(time.time() - t_start) * 1000,
|
||||
)
|
||||
|
||||
for strategy in strategies:
|
||||
result = self._try_strategy(
|
||||
strategy, server_url, screenshot_b64, target_spec,
|
||||
fallback_x, fallback_y, screen_width, screen_height,
|
||||
)
|
||||
if result.found:
|
||||
result.elapsed_ms = (time.time() - t_start) * 1000
|
||||
return result
|
||||
|
||||
return GroundingResult(
|
||||
found=False,
|
||||
detail=f"Toutes les stratégies ont échoué ({', '.join(strategies)})",
|
||||
elapsed_ms=(time.time() - t_start) * 1000,
|
||||
)
|
||||
|
||||
def _try_strategy(
|
||||
self,
|
||||
strategy: str,
|
||||
server_url: str,
|
||||
screenshot_b64: str,
|
||||
target_spec: Dict[str, Any],
|
||||
fallback_x: float,
|
||||
fallback_y: float,
|
||||
screen_width: int,
|
||||
screen_height: int,
|
||||
) -> GroundingResult:
|
||||
"""Essayer une stratégie de grounding unique."""
|
||||
|
||||
if strategy == "server" and server_url:
|
||||
raw = self._executor._server_resolve_target(
|
||||
server_url, screenshot_b64, target_spec,
|
||||
fallback_x, fallback_y, screen_width, screen_height,
|
||||
)
|
||||
if raw and raw.get("resolved"):
|
||||
return GroundingResult(
|
||||
found=True,
|
||||
x_pct=raw["x_pct"],
|
||||
y_pct=raw["y_pct"],
|
||||
method=raw.get("method", "server"),
|
||||
score=raw.get("score", 0.0),
|
||||
detail=raw.get("matched_element", {}).get("label", ""),
|
||||
raw=raw,
|
||||
)
|
||||
|
||||
elif strategy == "template":
|
||||
anchor_b64 = target_spec.get("anchor_image_base64", "")
|
||||
if anchor_b64:
|
||||
raw = self._executor._template_match_anchor(
|
||||
screenshot_b64, anchor_b64, screen_width, screen_height,
|
||||
)
|
||||
if raw and raw.get("resolved"):
|
||||
return GroundingResult(
|
||||
found=True,
|
||||
x_pct=raw["x_pct"],
|
||||
y_pct=raw["y_pct"],
|
||||
method="anchor_template",
|
||||
score=raw.get("score", 0.0),
|
||||
raw=raw,
|
||||
)
|
||||
|
||||
elif strategy == "vlm_local":
|
||||
by_text = target_spec.get("by_text", "")
|
||||
vlm_desc = target_spec.get("vlm_description", "")
|
||||
if vlm_desc or by_text:
|
||||
raw = self._executor._hybrid_vlm_resolve(
|
||||
screenshot_b64, target_spec, screen_width, screen_height,
|
||||
)
|
||||
if raw and raw.get("resolved"):
|
||||
return GroundingResult(
|
||||
found=True,
|
||||
x_pct=raw["x_pct"],
|
||||
y_pct=raw["y_pct"],
|
||||
method=raw.get("method", "vlm_local"),
|
||||
score=raw.get("score", 0.0),
|
||||
detail=raw.get("matched_element", {}).get("label", ""),
|
||||
raw=raw,
|
||||
)
|
||||
|
||||
return GroundingResult(found=False, method=strategy, detail=f"{strategy}: pas trouvé")
|
||||
152
agent_v0/agent_v1/core/policy.py
Normal file
152
agent_v0/agent_v1/core/policy.py
Normal file
@@ -0,0 +1,152 @@
|
||||
# agent_v1/core/policy.py
|
||||
"""
|
||||
Module Policy — décisions intelligentes quand le grounding échoue.
|
||||
|
||||
Responsabilité unique : "Le Grounding dit NOT_FOUND. Que fait-on ?"
|
||||
Ne localise AUCUN élément — c'est le rôle du Grounding.
|
||||
|
||||
Décisions possibles :
|
||||
- RETRY : re-tenter le grounding (après popup fermée, par exemple)
|
||||
- SKIP : l'action n'est plus nécessaire (état déjà atteint)
|
||||
- ABORT : arrêter le workflow (état incohérent)
|
||||
- SUPERVISE : rendre la main à l'utilisateur
|
||||
|
||||
Séparé de Grounding (qui localise les éléments).
|
||||
Ref: docs/PLAN_ACTEUR_V1.md — Architecture MÉSO (acteur intelligent)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Decision(Enum):
|
||||
"""Décisions possibles quand le grounding échoue."""
|
||||
RETRY = "retry" # Re-tenter (après correction : popup fermée, navigation...)
|
||||
SKIP = "skip" # Action inutile (état déjà atteint)
|
||||
ABORT = "abort" # Arrêter le workflow (état incohérent)
|
||||
SUPERVISE = "supervise" # Rendre la main à l'utilisateur (Léa dit "je bloque")
|
||||
CONTINUE = "continue" # Continuer malgré l'échec (action non critique)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PolicyDecision:
|
||||
"""Résultat d'une décision Policy."""
|
||||
decision: Decision
|
||||
reason: str # Explication de la décision
|
||||
action_taken: str = "" # Action corrective effectuée (ex: "popup fermée")
|
||||
elapsed_ms: float = 0.0
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"decision": self.decision.value,
|
||||
"reason": self.reason,
|
||||
"action_taken": self.action_taken,
|
||||
"elapsed_ms": round(self.elapsed_ms, 1),
|
||||
}
|
||||
|
||||
|
||||
class PolicyEngine:
|
||||
"""Moteur de décision quand le grounding échoue.
|
||||
|
||||
Cascade de décision :
|
||||
1. Popup détectée ? → fermer et RETRY
|
||||
2. Acteur gemma4 → SKIP / ABORT / SUPERVISE
|
||||
3. Fallback → SUPERVISE (rendre la main)
|
||||
|
||||
Usage :
|
||||
policy = PolicyEngine(executor)
|
||||
decision = policy.decide(action, target_spec, grounding_result)
|
||||
if decision.decision == Decision.RETRY:
|
||||
# re-tenter le grounding
|
||||
elif decision.decision == Decision.SKIP:
|
||||
# marquer comme réussi, passer à la suite
|
||||
"""
|
||||
|
||||
def __init__(self, executor):
|
||||
self._executor = executor
|
||||
|
||||
def decide(
|
||||
self,
|
||||
action: Dict[str, Any],
|
||||
target_spec: Dict[str, Any],
|
||||
retry_count: int = 0,
|
||||
max_retries: int = 1,
|
||||
) -> PolicyDecision:
|
||||
"""Décider quoi faire quand le grounding a échoué.
|
||||
|
||||
Cascade :
|
||||
1. Si c'est le premier essai → tenter de fermer une popup → RETRY
|
||||
2. Si retry déjà fait → demander à l'acteur gemma4
|
||||
3. Selon gemma4 : SKIP, ABORT, ou SUPERVISE
|
||||
|
||||
Args:
|
||||
action: L'action qui a échoué
|
||||
target_spec: La cible non trouvée
|
||||
retry_count: Nombre de retries déjà faits
|
||||
max_retries: Maximum de retries autorisés
|
||||
"""
|
||||
t_start = time.time()
|
||||
|
||||
# ── Étape 1 : Tentative de fermeture popup (premier essai) ──
|
||||
if retry_count == 0:
|
||||
popup_handled = self._try_close_popup()
|
||||
if popup_handled:
|
||||
return PolicyDecision(
|
||||
decision=Decision.RETRY,
|
||||
reason="Popup détectée et fermée, re-tentative",
|
||||
action_taken="popup_closed",
|
||||
elapsed_ms=(time.time() - t_start) * 1000,
|
||||
)
|
||||
|
||||
# ── Étape 2 : Max retries atteint → acteur gemma4 ──
|
||||
if retry_count >= max_retries:
|
||||
actor_decision = self._ask_actor(action, target_spec)
|
||||
|
||||
if actor_decision == "PASSER":
|
||||
return PolicyDecision(
|
||||
decision=Decision.SKIP,
|
||||
reason="Acteur gemma4 : l'état est déjà atteint",
|
||||
elapsed_ms=(time.time() - t_start) * 1000,
|
||||
)
|
||||
elif actor_decision == "STOPPER":
|
||||
return PolicyDecision(
|
||||
decision=Decision.ABORT,
|
||||
reason="Acteur gemma4 : état incohérent, arrêt",
|
||||
elapsed_ms=(time.time() - t_start) * 1000,
|
||||
)
|
||||
else:
|
||||
# EXECUTER ou inconnu → pause supervisée
|
||||
return PolicyDecision(
|
||||
decision=Decision.SUPERVISE,
|
||||
reason=f"Acteur gemma4 : {actor_decision}, pause supervisée",
|
||||
elapsed_ms=(time.time() - t_start) * 1000,
|
||||
)
|
||||
|
||||
# ── Étape 3 : Encore des retries disponibles → RETRY ──
|
||||
return PolicyDecision(
|
||||
decision=Decision.RETRY,
|
||||
reason=f"Retry {retry_count + 1}/{max_retries}",
|
||||
elapsed_ms=(time.time() - t_start) * 1000,
|
||||
)
|
||||
|
||||
def _try_close_popup(self) -> bool:
|
||||
"""Tenter de fermer une popup via le handler VLM existant."""
|
||||
try:
|
||||
return self._executor._handle_popup_vlm()
|
||||
except Exception as e:
|
||||
logger.debug(f"Policy: popup handler échoué : {e}")
|
||||
return False
|
||||
|
||||
def _ask_actor(self, action: Dict, target_spec: Dict) -> str:
|
||||
"""Demander à gemma4 de décider (PASSER/EXECUTER/STOPPER)."""
|
||||
try:
|
||||
return self._executor._actor_decide(action, target_spec)
|
||||
except Exception as e:
|
||||
logger.debug(f"Policy: acteur gemma4 échoué : {e}")
|
||||
return "EXECUTER" # Fallback → supervisé
|
||||
215
agent_v0/agent_v1/core/recovery.py
Normal file
215
agent_v0/agent_v1/core/recovery.py
Normal file
@@ -0,0 +1,215 @@
|
||||
# agent_v1/core/recovery.py
|
||||
"""
|
||||
Module Recovery — mécanisme de rollback quand une action échoue.
|
||||
|
||||
Responsabilité : "L'action a échoué ou produit un résultat inattendu.
|
||||
Comment revenir en arrière ?"
|
||||
|
||||
Stratégies de recovery :
|
||||
1. Ctrl+Z (undo natif) — pour les frappes et modifications
|
||||
2. Escape (fermer dialogue) — pour les popups/menus
|
||||
3. Alt+F4 (fermer fenêtre) — si mauvaise application ouverte
|
||||
4. Clic hors zone — fermer un menu déroulant
|
||||
5. Navigation retour — retourner à l'écran précédent
|
||||
|
||||
Le Recovery est appelé par le Policy quand le Critic détecte un
|
||||
résultat inattendu (pixel OK + sémantique NON = changement inattendu).
|
||||
|
||||
Ref: docs/VISION_RPA_INTELLIGENT.md — "Il se trompe" → correction
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RecoveryAction(Enum):
|
||||
"""Actions de recovery possibles."""
|
||||
UNDO = "undo" # Ctrl+Z
|
||||
ESCAPE = "escape" # Echap (fermer dialogue/menu)
|
||||
CLOSE_WINDOW = "close" # Alt+F4
|
||||
CLICK_AWAY = "click_away" # Clic hors zone (fermer menu)
|
||||
NONE = "none" # Pas de recovery possible
|
||||
|
||||
|
||||
@dataclass
|
||||
class RecoveryResult:
|
||||
"""Résultat d'une tentative de recovery."""
|
||||
action_taken: RecoveryAction
|
||||
success: bool
|
||||
detail: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"action_taken": self.action_taken.value,
|
||||
"success": self.success,
|
||||
"detail": self.detail,
|
||||
}
|
||||
|
||||
|
||||
class RecoveryEngine:
|
||||
"""Moteur de recovery — tente de revenir en arrière après un échec.
|
||||
|
||||
Choisit la stratégie de recovery en fonction du type d'action qui a échoué
|
||||
et de l'état actuel de l'écran.
|
||||
|
||||
Usage :
|
||||
recovery = RecoveryEngine(executor)
|
||||
result = recovery.attempt(failed_action, critic_result)
|
||||
if result.success:
|
||||
# re-tenter l'action
|
||||
"""
|
||||
|
||||
def __init__(self, executor):
|
||||
self._executor = executor
|
||||
|
||||
def attempt(
|
||||
self,
|
||||
failed_action: Dict[str, Any],
|
||||
critic_detail: str = "",
|
||||
) -> RecoveryResult:
|
||||
"""Tenter une recovery après un échec.
|
||||
|
||||
Sélectionne la stratégie appropriée selon le type d'action :
|
||||
- click qui ouvre la mauvaise chose → Escape ou Ctrl+Z
|
||||
- type qui tape au mauvais endroit → Ctrl+Z
|
||||
- key_combo inattendu → Ctrl+Z
|
||||
- popup apparue → Escape
|
||||
|
||||
Args:
|
||||
failed_action: L'action qui a échoué
|
||||
critic_detail: Détail du Critic (raison de l'échec sémantique)
|
||||
"""
|
||||
action_type = failed_action.get("type", "")
|
||||
detail_lower = critic_detail.lower()
|
||||
|
||||
# Choisir la stratégie de recovery
|
||||
strategy = self._select_strategy(action_type, detail_lower)
|
||||
|
||||
if strategy == RecoveryAction.NONE:
|
||||
return RecoveryResult(
|
||||
action_taken=RecoveryAction.NONE,
|
||||
success=False,
|
||||
detail="Pas de stratégie de recovery applicable",
|
||||
)
|
||||
|
||||
return self._execute_recovery(strategy)
|
||||
|
||||
def _select_strategy(self, action_type: str, critic_detail: str) -> RecoveryAction:
|
||||
"""Sélectionner la meilleure stratégie de recovery.
|
||||
|
||||
Priorité : type d'action d'abord (frappe → undo), puis contexte.
|
||||
"""
|
||||
# Frappe ou modification incorrecte → toujours Ctrl+Z
|
||||
if action_type in ("type", "key_combo"):
|
||||
return RecoveryAction.UNDO
|
||||
|
||||
# Popup/dialogue détecté
|
||||
if any(w in critic_detail for w in ["popup", "dialog", "erreur", "error", "modal"]):
|
||||
return RecoveryAction.ESCAPE
|
||||
|
||||
# Menu ouvert par erreur
|
||||
if any(w in critic_detail for w in ["menu", "dropdown", "déroulant"]):
|
||||
return RecoveryAction.ESCAPE
|
||||
|
||||
# Mauvaise fenêtre ouverte
|
||||
if any(w in critic_detail for w in ["mauvaise fenêtre", "wrong window"]):
|
||||
return RecoveryAction.CLOSE_WINDOW
|
||||
|
||||
# Clic qui a produit un résultat inattendu
|
||||
if action_type == "click":
|
||||
return RecoveryAction.ESCAPE
|
||||
|
||||
return RecoveryAction.NONE
|
||||
|
||||
def _execute_recovery(self, strategy: RecoveryAction) -> RecoveryResult:
|
||||
"""Exécuter la stratégie de recovery choisie."""
|
||||
from pynput.keyboard import Controller as KeyboardController, Key
|
||||
|
||||
keyboard = self._executor.keyboard
|
||||
|
||||
try:
|
||||
if strategy == RecoveryAction.UNDO:
|
||||
# Ctrl+Z
|
||||
logger.info("Recovery : Ctrl+Z (undo)")
|
||||
print(" [RECOVERY] Ctrl+Z — annulation de la dernière action")
|
||||
keyboard.press(Key.ctrl)
|
||||
keyboard.press('z')
|
||||
keyboard.release('z')
|
||||
keyboard.release(Key.ctrl)
|
||||
time.sleep(0.5)
|
||||
return RecoveryResult(
|
||||
action_taken=RecoveryAction.UNDO,
|
||||
success=True,
|
||||
detail="Ctrl+Z exécuté",
|
||||
)
|
||||
|
||||
elif strategy == RecoveryAction.ESCAPE:
|
||||
# Echap
|
||||
logger.info("Recovery : Escape (fermer dialogue)")
|
||||
print(" [RECOVERY] Escape — fermeture dialogue/menu")
|
||||
keyboard.press(Key.esc)
|
||||
keyboard.release(Key.esc)
|
||||
time.sleep(0.5)
|
||||
return RecoveryResult(
|
||||
action_taken=RecoveryAction.ESCAPE,
|
||||
success=True,
|
||||
detail="Escape exécuté",
|
||||
)
|
||||
|
||||
elif strategy == RecoveryAction.CLOSE_WINDOW:
|
||||
# Alt+F4 — AVEC vérification fenêtre active
|
||||
# Sur un poste hospitalier, Alt+F4 sans vérif peut fermer le DPI patient
|
||||
try:
|
||||
from ..window_info_crossplatform import get_active_window_info
|
||||
active = get_active_window_info()
|
||||
active_title = active.get("title", "")
|
||||
logger.info(f"Recovery : Alt+F4 sur '{active_title}'")
|
||||
print(f" [RECOVERY] Alt+F4 — fermeture de '{active_title}'")
|
||||
except Exception:
|
||||
logger.info("Recovery : Alt+F4 (fenêtre active inconnue)")
|
||||
print(" [RECOVERY] Alt+F4 — fermeture fenêtre indésirable")
|
||||
|
||||
keyboard.press(Key.alt)
|
||||
keyboard.press(Key.f4)
|
||||
keyboard.release(Key.f4)
|
||||
keyboard.release(Key.alt)
|
||||
time.sleep(1.0)
|
||||
return RecoveryResult(
|
||||
action_taken=RecoveryAction.CLOSE_WINDOW,
|
||||
success=True,
|
||||
detail=f"Alt+F4 exécuté sur '{active_title if 'active_title' in dir() else '?'}'",
|
||||
)
|
||||
|
||||
elif strategy == RecoveryAction.CLICK_AWAY:
|
||||
# Clic au centre de l'écran (hors popup)
|
||||
logger.info("Recovery : clic hors zone")
|
||||
print(" [RECOVERY] Clic hors zone — fermeture menu")
|
||||
monitor = self._executor.sct.monitors[1]
|
||||
w, h = monitor["width"], monitor["height"]
|
||||
# Cliquer dans un coin neutre (10% depuis le haut-gauche)
|
||||
self._executor._click((int(w * 0.1), int(h * 0.1)), "left")
|
||||
time.sleep(0.5)
|
||||
return RecoveryResult(
|
||||
action_taken=RecoveryAction.CLICK_AWAY,
|
||||
success=True,
|
||||
detail="Clic hors zone exécuté",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Recovery échoué ({strategy.value}) : {e}")
|
||||
return RecoveryResult(
|
||||
action_taken=strategy,
|
||||
success=False,
|
||||
detail=f"Erreur : {e}",
|
||||
)
|
||||
|
||||
return RecoveryResult(
|
||||
action_taken=RecoveryAction.NONE,
|
||||
success=False,
|
||||
detail="Stratégie non implémentée",
|
||||
)
|
||||
@@ -28,11 +28,15 @@ from pydantic import BaseModel
|
||||
|
||||
from .replay_failure_logger import log_replay_failure
|
||||
from .replay_verifier import ReplayVerifier, VerificationResult
|
||||
from .replay_learner import ReplayLearner
|
||||
from .audit_trail import AuditTrail, AuditEntry
|
||||
from .stream_processor import StreamProcessor, build_replay_from_raw_events, enrich_click_from_screenshot
|
||||
from .worker_stream import StreamWorker
|
||||
|
||||
# Instance globale du vérificateur de replay (comparaison screenshots avant/après)
|
||||
_replay_verifier = ReplayVerifier()
|
||||
_replay_learner = ReplayLearner()
|
||||
_audit_trail = AuditTrail()
|
||||
|
||||
# Nombre maximum de retries par action avant de déclarer un échec
|
||||
MAX_RETRIES_PER_ACTION = 3
|
||||
@@ -995,6 +999,7 @@ class ReplayResultReport(BaseModel):
|
||||
warning: Optional[str] = None # "no_screen_change", "popup_handled", "visual_resolve_failed"
|
||||
screenshot: Optional[str] = None # Chemin ou base64 du screenshot post-action
|
||||
screenshot_after: Optional[str] = None # Chemin ou base64 du screenshot APRES l'action
|
||||
screenshot_before: Optional[str] = None # Screenshot AVANT l'action (pour le Critic)
|
||||
actual_position: Optional[Dict[str, float]] = None # {"x": px, "y": py} position réelle du clic
|
||||
# Métriques de résolution visuelle
|
||||
resolution_method: Optional[str] = None # som_text_match, som_vlm, vlm_quick_find, etc.
|
||||
@@ -3255,8 +3260,9 @@ async def report_action_result(report: ReplayResultReport):
|
||||
skip_verify = skip_verify or agent_handled_popup
|
||||
verification = None
|
||||
if report.success and screenshot_after and not skip_verify:
|
||||
# Chercher le screenshot avant (dernier connu de la session)
|
||||
screenshot_before = replay_state.get("_last_screenshot_before")
|
||||
# Utiliser le screenshot_before envoyé par l'agent (Critic fiable)
|
||||
# Fallback sur le dernier screenshot stocké côté serveur
|
||||
screenshot_before = report.screenshot_before or replay_state.get("_last_screenshot_before")
|
||||
if screenshot_before:
|
||||
try:
|
||||
action_dict = original_action or {"type": "unknown", "action_id": action_id}
|
||||
@@ -3264,6 +3270,31 @@ async def report_action_result(report: ReplayResultReport):
|
||||
"success": report.success,
|
||||
"error": report.error,
|
||||
}
|
||||
# Utiliser le Critic sémantique si l'action a un expected_result
|
||||
expected_result = (original_action or {}).get("expected_result", "")
|
||||
action_intention = (original_action or {}).get("intention", "")
|
||||
if expected_result:
|
||||
# Critic complet : pixel + VLM sémantique
|
||||
workflow_ctx = (
|
||||
f"Action {replay_state.get('completed_actions', 0)+1}"
|
||||
f"/{len(replay_state.get('actions', []))}"
|
||||
)
|
||||
verification = _replay_verifier.verify_with_critic(
|
||||
action=action_dict,
|
||||
result=result_dict,
|
||||
screenshot_before=screenshot_before,
|
||||
screenshot_after=screenshot_after,
|
||||
expected_result=expected_result,
|
||||
action_intention=action_intention,
|
||||
workflow_context=workflow_ctx,
|
||||
)
|
||||
if verification.semantic_verified is not None:
|
||||
logger.info(
|
||||
f"Critic sémantique : {'OK' if verification.semantic_verified else 'ÉCHEC'} "
|
||||
f"en {verification.semantic_elapsed_ms:.0f}ms — {verification.semantic_detail[:80]}"
|
||||
)
|
||||
else:
|
||||
# Vérification pixel seule (pas d'expected_result)
|
||||
verification = _replay_verifier.verify_action(
|
||||
action=action_dict,
|
||||
result=result_dict,
|
||||
@@ -3295,6 +3326,68 @@ async def report_action_result(report: ReplayResultReport):
|
||||
}
|
||||
replay_state["results"].append(result_entry)
|
||||
|
||||
# === Apprentissage : enregistrer le résultat pour amélioration continue ===
|
||||
try:
|
||||
_replay_learner.record_from_replay_result(
|
||||
session_id=session_id,
|
||||
action=original_action or {"action_id": action_id, "type": "unknown"},
|
||||
result=result_entry,
|
||||
verification=verification.to_dict() if verification else None,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Learning: échec enregistrement: {e}")
|
||||
|
||||
# === Audit Trail : traçabilité complète pour conformité hospitalière ===
|
||||
try:
|
||||
_action = original_action or {"action_id": action_id, "type": "unknown"}
|
||||
_target_spec = _action.get("target_spec", {})
|
||||
_verification = verification.to_dict() if verification else {}
|
||||
|
||||
# Déterminer le résultat pour l'audit
|
||||
if report.success and (verification is None or verification.verified):
|
||||
_audit_result = "success"
|
||||
elif report.success and verification and not verification.verified:
|
||||
_audit_result = "recovered" if retry_count > 0 else "failed"
|
||||
elif not report.success:
|
||||
_audit_result = "failed"
|
||||
else:
|
||||
_audit_result = "success"
|
||||
|
||||
# Déterminer le résultat du Critic
|
||||
_critic = ""
|
||||
if verification:
|
||||
if verification.semantic_verified is True:
|
||||
_critic = "semantic_ok"
|
||||
elif verification.semantic_verified is False:
|
||||
_critic = f"semantic_fail: {verification.semantic_detail[:100]}"
|
||||
elif verification.verified:
|
||||
_critic = "pixel_ok"
|
||||
else:
|
||||
_critic = f"pixel_fail: {verification.detail[:100]}"
|
||||
|
||||
_audit_trail.record(AuditEntry(
|
||||
session_id=session_id,
|
||||
action_id=action_id,
|
||||
user_id=replay_state.get("params", {}).get("user_id", ""),
|
||||
user_name=replay_state.get("params", {}).get("user_name", ""),
|
||||
machine_id=replay_state.get("machine_id", ""),
|
||||
action_type=_action.get("type", ""),
|
||||
action_detail=_target_spec.get("by_text", "") or _action.get("intention", ""),
|
||||
target_app=_target_spec.get("window_title", ""),
|
||||
execution_mode=replay_state.get("params", {}).get("execution_mode", "autonomous"),
|
||||
result=_audit_result,
|
||||
resolution_method=result_entry.get("resolution_method", ""),
|
||||
critic_result=_critic,
|
||||
recovery_action=report.warning or "",
|
||||
domain=replay_state.get("params", {}).get("domain", ""),
|
||||
workflow_id=replay_state.get("workflow_id", ""),
|
||||
workflow_name=replay_state.get("params", {}).get("workflow_name", ""),
|
||||
duration_ms=result_entry.get("resolution_elapsed_ms", 0.0) or 0.0,
|
||||
))
|
||||
except Exception as e:
|
||||
logger.debug(f"Audit Trail: échec enregistrement: {e}")
|
||||
|
||||
with _replay_lock:
|
||||
# === Logique de retry / success / failure ===
|
||||
if report.success and (verification is None or verification.verified):
|
||||
# Action réussie (vérification OK ou pas de vérification)
|
||||
@@ -3861,6 +3954,225 @@ async def resolve_target(request: ResolveTargetRequest):
|
||||
pass
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Observer — Pré-analyse écran avant résolution
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class PreAnalyzeRequest(BaseModel):
|
||||
"""Requête de pré-analyse écran (Observer)."""
|
||||
screenshot_b64: str
|
||||
expected_state: str = "" # Description attendue de l'état écran
|
||||
window_title: str = "" # Titre fenêtre attendu
|
||||
screen_width: int = 1920
|
||||
screen_height: int = 1080
|
||||
|
||||
|
||||
@app.post("/api/v1/traces/stream/replay/pre_analyze")
|
||||
async def pre_analyze_screen(request: PreAnalyzeRequest):
|
||||
"""Observer : analyser l'écran AVANT la résolution de cible.
|
||||
|
||||
Détecte les popups, dialogues modaux, et états inattendus
|
||||
qui empêcheraient la résolution visuelle de fonctionner.
|
||||
|
||||
Retourne :
|
||||
- screen_state: "ok" | "popup" | "unexpected"
|
||||
- popup_label: texte du bouton popup à cliquer (si popup)
|
||||
- popup_coords: {x_pct, y_pct} du bouton (si popup)
|
||||
- detail: description du problème
|
||||
"""
|
||||
import asyncio
|
||||
import base64
|
||||
import io
|
||||
|
||||
from PIL import Image
|
||||
|
||||
try:
|
||||
img_bytes = base64.b64decode(request.screenshot_b64)
|
||||
img = Image.open(io.BytesIO(img_bytes))
|
||||
except Exception as e:
|
||||
return {"screen_state": "ok", "detail": f"decode error: {e}"}
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
_pre_analyze_screen_sync,
|
||||
request.screenshot_b64,
|
||||
request.expected_state,
|
||||
request.window_title,
|
||||
request.screen_width,
|
||||
request.screen_height,
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def _pre_analyze_screen_sync(
|
||||
screenshot_b64: str,
|
||||
expected_state: str,
|
||||
window_title: str,
|
||||
screen_width: int,
|
||||
screen_height: int,
|
||||
) -> Dict[str, Any]:
|
||||
"""Pré-analyse synchrone de l'écran via VLM.
|
||||
|
||||
Utilise gemma4 (Docker port 11435) pour détecter :
|
||||
1. Popups/dialogues modaux (avec coordonnées du bouton à cliquer)
|
||||
2. États incohérents avec l'attendu
|
||||
|
||||
Rapide (~2-5s) car gemma4 est léger et en mode texte+image.
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
import requests as _requests
|
||||
|
||||
gemma4_port = os.environ.get("GEMMA4_PORT", "11435")
|
||||
gemma4_url = f"http://localhost:{gemma4_port}/api/chat"
|
||||
|
||||
# Charger le contexte métier pour l'Observer
|
||||
from .domain_context import get_domain_context
|
||||
domain = get_domain_context(os.environ.get("RPA_DOMAIN", "generic"))
|
||||
|
||||
# Prompt concis pour détection popup
|
||||
prompt = (
|
||||
"Regarde cette capture d'écran.\n"
|
||||
"Y a-t-il une popup, boîte de dialogue, message d'erreur, ou fenêtre modale visible ?\n\n"
|
||||
"Réponds EXACTEMENT dans ce format :\n"
|
||||
"ÉTAT: OK ou POPUP ou INATTENDU\n"
|
||||
"BOUTON: texte du bouton à cliquer (si POPUP, sinon 'aucun')\n"
|
||||
"DÉTAIL: description courte (1 ligne)"
|
||||
)
|
||||
|
||||
# Messages avec contexte métier
|
||||
messages = []
|
||||
if domain.system_prompt:
|
||||
messages.append({"role": "system", "content": domain.system_prompt})
|
||||
messages.append({"role": "user", "content": prompt, "images": [screenshot_b64]})
|
||||
|
||||
try:
|
||||
t_start = time.time()
|
||||
resp = _requests.post(
|
||||
gemma4_url,
|
||||
json={
|
||||
"model": "gemma4:e4b",
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"think": True,
|
||||
"options": {"temperature": 0.1, "num_predict": 800},
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
elapsed_ms = (time.time() - t_start) * 1000
|
||||
|
||||
if not resp.ok:
|
||||
logger.warning(f"Observer VLM HTTP {resp.status_code}")
|
||||
return {"screen_state": "ok", "detail": f"VLM HTTP {resp.status_code}"}
|
||||
|
||||
content = resp.json().get("message", {}).get("content", "").strip()
|
||||
logger.info(f"Observer VLM ({elapsed_ms:.0f}ms) : {content[:100]}")
|
||||
|
||||
# Parser la réponse
|
||||
state = "ok"
|
||||
button = ""
|
||||
detail = content
|
||||
|
||||
for line in content.split("\n"):
|
||||
line_clean = line.strip()
|
||||
upper = line_clean.upper()
|
||||
if upper.startswith("ÉTAT:") or upper.startswith("ETAT:"):
|
||||
val = upper.split(":", 1)[1].strip()
|
||||
if "POPUP" in val:
|
||||
state = "popup"
|
||||
elif "INATTENDU" in val or "UNEXPECTED" in val:
|
||||
state = "unexpected"
|
||||
else:
|
||||
state = "ok"
|
||||
elif upper.startswith("BOUTON:"):
|
||||
button = line_clean.split(":", 1)[1].strip().strip("'\"")
|
||||
if button.lower() in ("aucun", "none", "n/a", ""):
|
||||
button = ""
|
||||
elif upper.startswith("DÉTAIL:") or upper.startswith("DETAIL:"):
|
||||
detail = line_clean.split(":", 1)[1].strip()
|
||||
|
||||
if state == "ok":
|
||||
return {"screen_state": "ok"}
|
||||
|
||||
result = {
|
||||
"screen_state": state,
|
||||
"detail": detail,
|
||||
"elapsed_ms": round(elapsed_ms, 1),
|
||||
}
|
||||
|
||||
# Si popup détectée avec un texte de bouton, essayer de le localiser
|
||||
if state == "popup" and button:
|
||||
result["popup_label"] = button
|
||||
# Localiser le bouton par grounding VLM (qwen2.5vl)
|
||||
coords = _locate_popup_button(screenshot_b64, button, screen_width, screen_height)
|
||||
if coords:
|
||||
result["popup_coords"] = coords
|
||||
|
||||
return result
|
||||
|
||||
except _requests.Timeout:
|
||||
logger.debug("Observer VLM timeout (15s)")
|
||||
return {"screen_state": "ok", "detail": "VLM timeout"}
|
||||
except Exception as e:
|
||||
logger.debug(f"Observer VLM erreur : {e}")
|
||||
return {"screen_state": "ok", "detail": str(e)}
|
||||
|
||||
|
||||
def _locate_popup_button(
|
||||
screenshot_b64: str, button_text: str,
|
||||
screen_width: int, screen_height: int,
|
||||
) -> Optional[Dict[str, float]]:
|
||||
"""Localiser un bouton de popup par grounding VLM (qwen2.5vl).
|
||||
|
||||
Utilise le format bbox_2d natif de qwen2.5vl pour trouver
|
||||
la position exacte du bouton sur le screenshot.
|
||||
"""
|
||||
import requests as _requests
|
||||
import re
|
||||
|
||||
ollama_url = "http://localhost:11434/api/chat"
|
||||
prompt = f"Detect the button with text '{button_text}' with a bounding box."
|
||||
|
||||
try:
|
||||
resp = _requests.post(
|
||||
ollama_url,
|
||||
json={
|
||||
"model": "qwen2.5vl:7b",
|
||||
"messages": [{"role": "user", "content": prompt, "images": [screenshot_b64]}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1, "num_predict": 50},
|
||||
},
|
||||
timeout=15,
|
||||
)
|
||||
if not resp.ok:
|
||||
return None
|
||||
|
||||
content = resp.json().get("message", {}).get("content", "")
|
||||
|
||||
# Parser bbox_2d — qwen2.5vl retourne des coordonnées en pixels
|
||||
# relatifs à l'image envoyée, PAS sur une grille 1000x1000.
|
||||
# Format JSON : [{"bbox_2d": [x1, y1, x2, y2], "label": "..."}]
|
||||
bbox_match = re.search(
|
||||
r'"bbox_2d"\s*:\s*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\]',
|
||||
content,
|
||||
)
|
||||
if bbox_match:
|
||||
x1, y1, x2, y2 = [int(bbox_match.group(i)) for i in range(1, 5)]
|
||||
# Normaliser par les dimensions de l'écran (pixels → 0-1)
|
||||
cx = (x1 + x2) / 2 / screen_width
|
||||
cy = (y1 + y2) / 2 / screen_height
|
||||
if 0.0 <= cx <= 1.0 and 0.0 <= cy <= 1.0:
|
||||
logger.info(f"Observer : bouton '{button_text}' localisé à ({cx:.3f}, {cy:.3f})")
|
||||
return {"x_pct": cx, "y_pct": cy}
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Observer grounding bouton erreur : {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _resolve_by_template_matching(
|
||||
screenshot_path: str,
|
||||
anchor_image_b64: str,
|
||||
@@ -5694,6 +6006,417 @@ async def import_learning_pack(body: LearningPackImportRequest, request: Request
|
||||
_global_faiss_index = None
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Endpoints Audit Trail — traçabilité complète des actions RPA
|
||||
# =========================================================================
|
||||
|
||||
@app.get("/api/v1/audit/history")
|
||||
async def audit_history(
|
||||
date_from: str = "",
|
||||
date_to: str = "",
|
||||
user_id: str = "",
|
||||
session_id: str = "",
|
||||
result: str = "",
|
||||
action_type: str = "",
|
||||
workflow_id: str = "",
|
||||
domain: str = "",
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
):
|
||||
"""
|
||||
Historique d'audit paginé avec filtres.
|
||||
|
||||
Paramètres query :
|
||||
date_from : date début (YYYY-MM-DD), défaut = aujourd'hui
|
||||
date_to : date fin (YYYY-MM-DD), défaut = date_from
|
||||
user_id : filtrer par identifiant TIM
|
||||
session_id: filtrer par session
|
||||
result : filtrer par résultat (success, failed, recovered, skipped)
|
||||
action_type: filtrer par type d'action (click, type, key_combo, etc.)
|
||||
workflow_id: filtrer par workflow
|
||||
domain : filtrer par domaine métier
|
||||
limit : nombre max de résultats (défaut 100, max 1000)
|
||||
offset : décalage pour la pagination
|
||||
|
||||
Retourne la liste des entrées triées par timestamp décroissant.
|
||||
"""
|
||||
# Borner le limit pour éviter les abus
|
||||
limit = min(max(1, limit), 1000)
|
||||
offset = max(0, offset)
|
||||
|
||||
entries = _audit_trail.query(
|
||||
date_from=date_from,
|
||||
date_to=date_to,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
result=result,
|
||||
action_type=action_type,
|
||||
workflow_id=workflow_id,
|
||||
domain=domain,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"count": len(entries),
|
||||
"offset": offset,
|
||||
"limit": limit,
|
||||
"entries": entries,
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/v1/audit/summary")
|
||||
async def audit_summary(
|
||||
date: str = "",
|
||||
):
|
||||
"""
|
||||
Résumé journalier de l'audit.
|
||||
|
||||
Paramètre query :
|
||||
date : date cible (YYYY-MM-DD), défaut = aujourd'hui
|
||||
|
||||
Retourne les statistiques agrégées : nombre d'actions, taux de succès,
|
||||
répartition par utilisateur, par résultat, par type, par workflow, par mode.
|
||||
"""
|
||||
summary = _audit_trail.get_summary(target_date=date)
|
||||
return {
|
||||
"status": "ok",
|
||||
**summary,
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/v1/audit/export")
|
||||
async def audit_export(
|
||||
date_from: str = "",
|
||||
date_to: str = "",
|
||||
user_id: str = "",
|
||||
session_id: str = "",
|
||||
):
|
||||
"""
|
||||
Export CSV de l'historique d'audit.
|
||||
|
||||
Paramètres query :
|
||||
date_from : date début (YYYY-MM-DD), défaut = aujourd'hui
|
||||
date_to : date fin (YYYY-MM-DD), défaut = date_from
|
||||
user_id : filtrer par identifiant TIM
|
||||
session_id : filtrer par session
|
||||
|
||||
Retourne le fichier CSV en texte brut (Content-Type: text/csv).
|
||||
"""
|
||||
from fastapi.responses import Response
|
||||
|
||||
csv_data = _audit_trail.export_csv(
|
||||
date_from=date_from,
|
||||
date_to=date_to,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not csv_data:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Aucune entrée d'audit trouvée pour les filtres spécifiés.",
|
||||
)
|
||||
|
||||
# Nom du fichier pour le téléchargement
|
||||
filename = f"audit_{date_from or 'today'}"
|
||||
if date_to and date_to != date_from:
|
||||
filename += f"_to_{date_to}"
|
||||
filename += ".csv"
|
||||
|
||||
return Response(
|
||||
content=csv_data,
|
||||
media_type="text/csv; charset=utf-8",
|
||||
headers={
|
||||
"Content-Disposition": f'attachment; filename="{filename}"',
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Task Planner — Comprendre et exécuter des ordres en langage naturel
|
||||
# =========================================================================
|
||||
|
||||
from .task_planner import TaskPlanner
|
||||
|
||||
_task_planner = TaskPlanner()
|
||||
|
||||
|
||||
class TaskRequest(BaseModel):
|
||||
"""Requête de tâche en langage naturel."""
|
||||
instruction: str # "Traite les dossiers de janvier"
|
||||
machine_id: str = "default" # Machine cible
|
||||
dry_run: bool = False # True = planifier sans exécuter
|
||||
|
||||
|
||||
@app.post("/api/v1/task")
|
||||
async def execute_task(request: TaskRequest):
|
||||
"""Exécuter une tâche décrite en langage naturel.
|
||||
|
||||
Léa comprend l'instruction, trouve le workflow correspondant,
|
||||
et l'exécute. C'est le point d'entrée principal pour l'utilisateur.
|
||||
|
||||
Exemples :
|
||||
- "Ouvre le bloc-notes et écris bonjour"
|
||||
- "Traite les dossiers de janvier"
|
||||
- "Recherche voiture électrique sur Google"
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
# 1. Lister les workflows disponibles
|
||||
workflows = _list_available_workflows()
|
||||
|
||||
# 2. Comprendre l'instruction
|
||||
loop = asyncio.get_event_loop()
|
||||
plan = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: _task_planner.understand(
|
||||
instruction=request.instruction,
|
||||
available_workflows=workflows,
|
||||
),
|
||||
)
|
||||
|
||||
if not plan.understood:
|
||||
return {
|
||||
"status": "not_understood",
|
||||
"instruction": request.instruction,
|
||||
"error": plan.error or "Instruction non comprise",
|
||||
"plan": plan.to_dict(),
|
||||
}
|
||||
|
||||
# 3. Dry run = retourner le plan sans exécuter
|
||||
if request.dry_run:
|
||||
return {
|
||||
"status": "planned",
|
||||
"instruction": request.instruction,
|
||||
"plan": plan.to_dict(),
|
||||
}
|
||||
|
||||
# 4. Exécuter
|
||||
def replay_callback(session_id="", machine_id="", params=None, actions=None, task_description=""):
|
||||
"""Callback pour lancer un replay depuis le planner."""
|
||||
if session_id:
|
||||
# Mode replay : relancer un workflow connu
|
||||
import requests as _req
|
||||
resp = _req.post(
|
||||
f"http://localhost:5005/api/v1/traces/stream/replay-session"
|
||||
f"?session_id={session_id}&machine_id={machine_id}",
|
||||
headers={"Authorization": f"Bearer {API_TOKEN}"},
|
||||
timeout=600,
|
||||
)
|
||||
if resp.ok:
|
||||
return resp.json().get("replay_id", "")
|
||||
raise Exception(f"Replay échoué: {resp.text[:200]}")
|
||||
elif actions:
|
||||
# Mode libre : actions planifiées
|
||||
import requests as _req
|
||||
resp = _req.post(
|
||||
f"http://localhost:5005/api/v1/traces/stream/replay/raw",
|
||||
json={
|
||||
"session_id": "",
|
||||
"actions": actions,
|
||||
"machine_id": machine_id,
|
||||
"task_description": task_description,
|
||||
},
|
||||
headers={"Authorization": f"Bearer {API_TOKEN}"},
|
||||
timeout=30,
|
||||
)
|
||||
if resp.ok:
|
||||
return resp.json().get("replay_id", "")
|
||||
raise Exception(f"Replay raw échoué: {resp.text[:200]}")
|
||||
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: _task_planner.execute(
|
||||
plan=plan,
|
||||
replay_callback=replay_callback,
|
||||
machine_id=request.machine_id,
|
||||
),
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "executed" if result.success else "failed",
|
||||
"instruction": request.instruction,
|
||||
"plan": plan.to_dict(),
|
||||
"result": result.to_dict(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/v1/task/capabilities")
|
||||
async def list_capabilities():
|
||||
"""Lister ce que Léa sait faire (workflows appris)."""
|
||||
workflows = _list_available_workflows()
|
||||
return {
|
||||
"capabilities": _task_planner.list_capabilities(workflows),
|
||||
"workflows": workflows,
|
||||
"total": len(workflows),
|
||||
}
|
||||
|
||||
|
||||
def _list_available_workflows() -> List[Dict[str, Any]]:
|
||||
"""Lister les workflows/sessions disponibles pour le planner."""
|
||||
workflows = []
|
||||
|
||||
# Sessions enregistrées avec des événements
|
||||
try:
|
||||
sessions_dir = LIVE_SESSIONS_DIR
|
||||
for machine_dir in sessions_dir.iterdir():
|
||||
if not machine_dir.is_dir() or machine_dir.name.startswith((".", "embeddings", "streaming")):
|
||||
continue
|
||||
for session_dir in machine_dir.iterdir():
|
||||
if not session_dir.is_dir() or not session_dir.name.startswith("sess_"):
|
||||
continue
|
||||
events_file = session_dir / "live_events.jsonl"
|
||||
if events_file.is_file():
|
||||
# Extraire une description depuis les événements
|
||||
desc = _extract_session_description(events_file)
|
||||
workflows.append({
|
||||
"session_id": session_dir.name,
|
||||
"name": desc.get("name", session_dir.name),
|
||||
"description": desc.get("description", ""),
|
||||
"machine": machine_dir.name,
|
||||
"event_count": desc.get("event_count", 0),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.debug(f"Erreur listage workflows: {e}")
|
||||
|
||||
return workflows
|
||||
|
||||
|
||||
def _extract_session_description(events_file) -> Dict[str, Any]:
|
||||
"""Extraire une description métier d'une session depuis ses événements.
|
||||
|
||||
Analyse les événements pour produire une description sémantique
|
||||
(pas juste une liste d'apps) qui aide au matching par le TaskPlanner.
|
||||
|
||||
Exemples de descriptions produites :
|
||||
- "Ouvrir Bloc-notes via Exécuter (Win+R) et écrire du texte"
|
||||
- "Naviguer dans l'Explorateur de fichiers et ouvrir des images"
|
||||
- "Utiliser cmd.exe pour exécuter des commandes"
|
||||
"""
|
||||
try:
|
||||
apps = set()
|
||||
app_names = set() # Noms d'applications (partie droite du titre)
|
||||
typed_texts = [] # Texte saisi par l'utilisateur
|
||||
key_combos = [] # Raccourcis clavier utilisés
|
||||
event_types = {} # Compteur par type d'événement
|
||||
window_sequence = [] # Séquence des fenêtres visitées (pour le flux)
|
||||
event_count = 0
|
||||
|
||||
with open(events_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
event_count += 1
|
||||
if event_count > 100: # Lire plus pour mieux comprendre
|
||||
break
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
evt = obj.get("event", obj)
|
||||
evt_type = evt.get("type", "")
|
||||
|
||||
# Compter les types d'événements
|
||||
event_types[evt_type] = event_types.get(evt_type, 0) + 1
|
||||
|
||||
# Collecter les fenêtres
|
||||
title = evt.get("window", {}).get("title", "")
|
||||
if title and title not in ("unknown_window", "Program Manager"):
|
||||
if title not in window_sequence[-1:]:
|
||||
window_sequence.append(title)
|
||||
# Extraire le nom de l'app (partie droite du titre)
|
||||
for sep in [" – ", " - ", " — "]:
|
||||
if sep in title:
|
||||
app_name = title.split(sep)[-1].strip()
|
||||
app_names.add(app_name)
|
||||
apps.add(title)
|
||||
break
|
||||
else:
|
||||
app_names.add(title[:30])
|
||||
apps.add(title[:30])
|
||||
|
||||
# Collecter le texte saisi
|
||||
if evt_type == "text_input":
|
||||
text = evt.get("text", "")
|
||||
if text and len(text) > 1:
|
||||
typed_texts.append(text)
|
||||
|
||||
# Collecter les raccourcis clavier
|
||||
if evt_type == "key_combo":
|
||||
keys = evt.get("keys", [])
|
||||
if keys:
|
||||
key_combos.append("+".join(keys))
|
||||
|
||||
# Changement de fenêtre → flux
|
||||
if evt_type == "window_focus_change":
|
||||
to_title = evt.get("to", {}).get("title", "")
|
||||
if to_title and to_title not in ("unknown_window", "Program Manager"):
|
||||
if to_title not in window_sequence[-1:]:
|
||||
window_sequence.append(to_title)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# --- Construire la description sémantique ---
|
||||
apps_list = sorted(app_names)[:5]
|
||||
apps_str = ", ".join(apps_list)
|
||||
|
||||
# Construire une description orientée action
|
||||
desc_parts = []
|
||||
|
||||
# Détecter les patterns courants
|
||||
has_run_dialog = any("Exécuter" in w for w in window_sequence)
|
||||
has_search = any("Rechercher" in w or "Recherche" in w for w in window_sequence)
|
||||
has_win_r = "win+r" in [k.lower() for k in key_combos]
|
||||
has_win_s = "win+s" in [k.lower() for k in key_combos]
|
||||
|
||||
# Applications principales utilisées (en dehors des launchers)
|
||||
main_apps = [a for a in apps_list if a not in ("Exécuter", "Rechercher")]
|
||||
launcher = ""
|
||||
if has_run_dialog or has_win_r:
|
||||
launcher = "via Exécuter (Win+R)"
|
||||
elif has_search or has_win_s:
|
||||
launcher = "via la recherche Windows"
|
||||
|
||||
if main_apps:
|
||||
verb = "Ouvrir" if launcher else "Utiliser"
|
||||
desc_parts.append(f"{verb} {', '.join(main_apps)} {launcher}".strip())
|
||||
elif launcher:
|
||||
desc_parts.append(f"Lancer une application {launcher}")
|
||||
|
||||
# Texte saisi
|
||||
total_typed = "".join(typed_texts)
|
||||
if len(total_typed) > 5:
|
||||
desc_parts.append("écrire du texte")
|
||||
elif typed_texts:
|
||||
desc_parts.append(f"saisir '{total_typed[:30]}'")
|
||||
|
||||
# Raccourcis clavier notables
|
||||
notable_combos = [k for k in key_combos if k.lower() not in ("win+r", "win+s")]
|
||||
if notable_combos:
|
||||
combos_str = ", ".join(sorted(set(notable_combos))[:3])
|
||||
desc_parts.append(f"raccourcis : {combos_str}")
|
||||
|
||||
# Nombre de clics
|
||||
click_count = event_types.get("mouse_click", 0)
|
||||
if click_count > 5:
|
||||
desc_parts.append(f"{click_count} clics")
|
||||
|
||||
description = " et ".join(desc_parts) if desc_parts else f"Workflow avec {apps_str}"
|
||||
name = apps_str or "Session sans nom"
|
||||
|
||||
return {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"event_count": event_count,
|
||||
"apps": apps_list,
|
||||
"typed_text_preview": total_typed[:50] if typed_texts else "",
|
||||
}
|
||||
except Exception:
|
||||
return {"name": "?", "description": "", "event_count": 0}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
|
||||
393
agent_v0/server_v1/audit_trail.py
Normal file
393
agent_v0/server_v1/audit_trail.py
Normal file
@@ -0,0 +1,393 @@
|
||||
# agent_v0/server_v1/audit_trail.py
|
||||
"""
|
||||
Module Audit Trail — traçabilité complète des actions RPA.
|
||||
|
||||
Responsabilité : "Chaque action exécutée par Léa est tracée, datée, attribuée."
|
||||
|
||||
En milieu hospitalier (codage CIM-10 via DPI), la traçabilité est une obligation
|
||||
légale. Ce module enregistre chaque action avec :
|
||||
- L'identité du TIM (Technicien d'Information Médicale) superviseur
|
||||
- Le mode d'exécution (autonome, assisté, shadow)
|
||||
- Le résultat détaillé (succès, échec, correction)
|
||||
- L'horodatage ISO 8601
|
||||
|
||||
Format de stockage : fichiers JSONL datés dans data/audit/ (un par jour).
|
||||
Aucune dépendance externe (stdlib + dataclasses uniquement).
|
||||
|
||||
Usage :
|
||||
audit = AuditTrail()
|
||||
audit.record(AuditEntry(
|
||||
session_id="sess_abc",
|
||||
action_id="act_001",
|
||||
user_id="tim_dupont",
|
||||
user_name="Marie Dupont",
|
||||
...
|
||||
))
|
||||
entries = audit.query(user_id="tim_dupont", date_from="2026-04-01")
|
||||
csv_data = audit.export_csv(date_from="2026-04-01", date_to="2026-04-06")
|
||||
summary = audit.get_summary("2026-04-05")
|
||||
"""
|
||||
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from dataclasses import dataclass, asdict, fields
|
||||
from datetime import datetime, date, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Répertoire par défaut pour le stockage des fichiers d'audit
|
||||
_DEFAULT_AUDIT_DIR = os.environ.get("RPA_AUDIT_DIR", "data/audit")
|
||||
|
||||
|
||||
@dataclass
|
||||
class AuditEntry:
|
||||
"""Entrée d'audit — un événement tracé dans le système."""
|
||||
|
||||
# Horodatage ISO 8601 (ex: 2026-04-05T14:23:01.456789)
|
||||
timestamp: str = ""
|
||||
|
||||
# Identifiants de session et d'action
|
||||
session_id: str = ""
|
||||
action_id: str = ""
|
||||
|
||||
# Identité de l'utilisateur superviseur
|
||||
user_id: str = "" # Identifiant du TIM (login Windows ou configuré)
|
||||
user_name: str = "" # Nom affiché (ex: "Marie Dupont")
|
||||
machine_id: str = "" # ID du poste client (hostname ou configuré)
|
||||
|
||||
# Description de l'action
|
||||
action_type: str = "" # click, type, key_combo, wait, etc.
|
||||
action_detail: str = "" # Description humaine ("Clic sur 'Enregistrer' dans DxCare")
|
||||
target_app: str = "" # Application cible (DxCare, Orbis, etc.)
|
||||
|
||||
# Mode d'exécution
|
||||
execution_mode: str = "" # "autonomous", "assisted", "shadow"
|
||||
|
||||
# Résultat
|
||||
result: str = "" # "success", "failed", "skipped", "recovered"
|
||||
resolution_method: str = "" # Comment la cible a été trouvée (som_text_match, vlm_direct, etc.)
|
||||
critic_result: str = "" # Résultat de la vérification sémantique
|
||||
recovery_action: str = "" # Action corrective si échec (undo, escape, retry, none)
|
||||
|
||||
# Contexte métier
|
||||
domain: str = "" # Domaine métier (tim_codage, generic, etc.)
|
||||
workflow_id: str = "" # ID du workflow exécuté
|
||||
workflow_name: str = "" # Nom lisible du workflow
|
||||
|
||||
# Performance
|
||||
duration_ms: float = 0.0 # Durée de l'action en millisecondes
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convertir en dictionnaire sérialisable JSON."""
|
||||
return asdict(self)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "AuditEntry":
|
||||
"""Créer une entrée depuis un dictionnaire.
|
||||
|
||||
Ignore les clés inconnues pour la compatibilité future.
|
||||
"""
|
||||
known_fields = {f.name for f in fields(cls)}
|
||||
filtered = {k: v for k, v in data.items() if k in known_fields}
|
||||
return cls(**filtered)
|
||||
|
||||
|
||||
class AuditTrail:
|
||||
"""Gestionnaire de traçabilité — enregistrement et consultation des actions.
|
||||
|
||||
Stocke chaque événement dans un fichier JSONL daté (un fichier par jour).
|
||||
Thread-safe grâce à un verrou d'écriture.
|
||||
|
||||
Fichiers produits :
|
||||
data/audit/audit_2026-04-05.jsonl
|
||||
data/audit/audit_2026-04-06.jsonl
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self, audit_dir: str = ""):
|
||||
self.audit_dir = Path(audit_dir or _DEFAULT_AUDIT_DIR)
|
||||
self.audit_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._lock = threading.Lock()
|
||||
logger.info(f"Audit Trail initialisé : {self.audit_dir}")
|
||||
|
||||
def _file_for_date(self, d: date) -> Path:
|
||||
"""Chemin du fichier JSONL pour une date donnée."""
|
||||
return self.audit_dir / f"audit_{d.isoformat()}.jsonl"
|
||||
|
||||
def record(self, entry: AuditEntry) -> None:
|
||||
"""Enregistrer une entrée d'audit.
|
||||
|
||||
Ajoute un horodatage ISO 8601 si absent, puis écrit en append
|
||||
dans le fichier JSONL du jour.
|
||||
"""
|
||||
# Horodatage automatique si absent
|
||||
if not entry.timestamp:
|
||||
entry.timestamp = datetime.now().isoformat()
|
||||
|
||||
# Déterminer le fichier du jour à partir du timestamp
|
||||
try:
|
||||
entry_date = datetime.fromisoformat(entry.timestamp).date()
|
||||
except (ValueError, TypeError):
|
||||
entry_date = date.today()
|
||||
|
||||
audit_file = self._file_for_date(entry_date)
|
||||
|
||||
with self._lock:
|
||||
try:
|
||||
with open(audit_file, "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(entry.to_dict(), ensure_ascii=False) + "\n")
|
||||
except Exception as e:
|
||||
logger.error(f"Audit Trail: échec écriture {audit_file}: {e}")
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
f"Audit: {entry.result} {entry.action_type} "
|
||||
f"'{entry.action_detail[:50]}' "
|
||||
f"[user={entry.user_id}] [session={entry.session_id}]"
|
||||
)
|
||||
|
||||
def _load_file(self, filepath: Path) -> List[AuditEntry]:
|
||||
"""Charger toutes les entrées d'un fichier JSONL."""
|
||||
if not filepath.is_file():
|
||||
return []
|
||||
|
||||
entries = []
|
||||
try:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
for line_num, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
data = json.loads(line)
|
||||
entries.append(AuditEntry.from_dict(data))
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(
|
||||
f"Audit Trail: ligne {line_num} invalide dans "
|
||||
f"{filepath.name}: {e}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Audit Trail: échec lecture {filepath}: {e}")
|
||||
|
||||
return entries
|
||||
|
||||
def _date_range(self, date_from: str = "", date_to: str = "") -> List[date]:
|
||||
"""Calculer la liste de dates entre date_from et date_to (inclus).
|
||||
|
||||
Si date_from est vide, utilise aujourd'hui.
|
||||
Si date_to est vide, utilise date_from.
|
||||
Format attendu : YYYY-MM-DD.
|
||||
"""
|
||||
if date_from:
|
||||
try:
|
||||
d_from = date.fromisoformat(date_from)
|
||||
except ValueError:
|
||||
d_from = date.today()
|
||||
else:
|
||||
d_from = date.today()
|
||||
|
||||
if date_to:
|
||||
try:
|
||||
d_to = date.fromisoformat(date_to)
|
||||
except ValueError:
|
||||
d_to = d_from
|
||||
else:
|
||||
d_to = d_from
|
||||
|
||||
# Assurer l'ordre chronologique
|
||||
if d_to < d_from:
|
||||
d_from, d_to = d_to, d_from
|
||||
|
||||
dates = []
|
||||
current = d_from
|
||||
while current <= d_to:
|
||||
dates.append(current)
|
||||
current += timedelta(days=1)
|
||||
|
||||
return dates
|
||||
|
||||
def query(
|
||||
self,
|
||||
date_from: str = "",
|
||||
date_to: str = "",
|
||||
user_id: str = "",
|
||||
session_id: str = "",
|
||||
result: str = "",
|
||||
action_type: str = "",
|
||||
workflow_id: str = "",
|
||||
domain: str = "",
|
||||
limit: int = 500,
|
||||
offset: int = 0,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Rechercher des entrées d'audit avec filtres.
|
||||
|
||||
Tous les filtres sont optionnels et combinés en AND.
|
||||
Retourne les entrées triées par timestamp décroissant (plus récentes d'abord).
|
||||
"""
|
||||
dates = self._date_range(date_from, date_to)
|
||||
all_entries: List[AuditEntry] = []
|
||||
|
||||
for d in dates:
|
||||
filepath = self._file_for_date(d)
|
||||
all_entries.extend(self._load_file(filepath))
|
||||
|
||||
# Appliquer les filtres
|
||||
filtered = []
|
||||
for entry in all_entries:
|
||||
if user_id and entry.user_id != user_id:
|
||||
continue
|
||||
if session_id and entry.session_id != session_id:
|
||||
continue
|
||||
if result and entry.result != result:
|
||||
continue
|
||||
if action_type and entry.action_type != action_type:
|
||||
continue
|
||||
if workflow_id and entry.workflow_id != workflow_id:
|
||||
continue
|
||||
if domain and entry.domain != domain:
|
||||
continue
|
||||
filtered.append(entry)
|
||||
|
||||
# Tri par timestamp décroissant (plus récent en premier)
|
||||
filtered.sort(key=lambda e: e.timestamp, reverse=True)
|
||||
|
||||
# Pagination
|
||||
paginated = filtered[offset:offset + limit]
|
||||
|
||||
return [e.to_dict() for e in paginated]
|
||||
|
||||
def get_summary(self, target_date: str = "") -> Dict[str, Any]:
|
||||
"""Résumé journalier d'une date donnée.
|
||||
|
||||
Retourne les statistiques agrégées :
|
||||
- Nombre total d'actions
|
||||
- Taux de succès
|
||||
- Répartition par utilisateur
|
||||
- Répartition par résultat
|
||||
- Répartition par type d'action
|
||||
- Répartition par workflow
|
||||
- Répartition par mode d'exécution
|
||||
"""
|
||||
if not target_date:
|
||||
target_date = date.today().isoformat()
|
||||
|
||||
try:
|
||||
d = date.fromisoformat(target_date)
|
||||
except ValueError:
|
||||
d = date.today()
|
||||
|
||||
entries = self._load_file(self._file_for_date(d))
|
||||
|
||||
if not entries:
|
||||
return {
|
||||
"date": d.isoformat(),
|
||||
"total_actions": 0,
|
||||
"success_rate": 0.0,
|
||||
"by_user": {},
|
||||
"by_result": {},
|
||||
"by_action_type": {},
|
||||
"by_workflow": {},
|
||||
"by_execution_mode": {},
|
||||
}
|
||||
|
||||
total = len(entries)
|
||||
successes = sum(1 for e in entries if e.result == "success")
|
||||
|
||||
# Agrégations
|
||||
by_user: Dict[str, Dict[str, Any]] = {}
|
||||
by_result: Dict[str, int] = {}
|
||||
by_action_type: Dict[str, int] = {}
|
||||
by_workflow: Dict[str, int] = {}
|
||||
by_execution_mode: Dict[str, int] = {}
|
||||
|
||||
for entry in entries:
|
||||
# Par utilisateur
|
||||
uid = entry.user_id or "inconnu"
|
||||
if uid not in by_user:
|
||||
by_user[uid] = {
|
||||
"user_name": entry.user_name,
|
||||
"total": 0,
|
||||
"success": 0,
|
||||
}
|
||||
by_user[uid]["total"] += 1
|
||||
if entry.result == "success":
|
||||
by_user[uid]["success"] += 1
|
||||
|
||||
# Par résultat
|
||||
r = entry.result or "inconnu"
|
||||
by_result[r] = by_result.get(r, 0) + 1
|
||||
|
||||
# Par type d'action
|
||||
at = entry.action_type or "inconnu"
|
||||
by_action_type[at] = by_action_type.get(at, 0) + 1
|
||||
|
||||
# Par workflow
|
||||
wf = entry.workflow_id or "inconnu"
|
||||
by_workflow[wf] = by_workflow.get(wf, 0) + 1
|
||||
|
||||
# Par mode d'exécution
|
||||
em = entry.execution_mode or "inconnu"
|
||||
by_execution_mode[em] = by_execution_mode.get(em, 0) + 1
|
||||
|
||||
# Calculer le taux de succès par utilisateur
|
||||
for uid, stats in by_user.items():
|
||||
stats["success_rate"] = round(
|
||||
stats["success"] / stats["total"], 3
|
||||
) if stats["total"] > 0 else 0.0
|
||||
|
||||
return {
|
||||
"date": d.isoformat(),
|
||||
"total_actions": total,
|
||||
"success_rate": round(successes / total, 3) if total > 0 else 0.0,
|
||||
"by_user": by_user,
|
||||
"by_result": by_result,
|
||||
"by_action_type": by_action_type,
|
||||
"by_workflow": by_workflow,
|
||||
"by_execution_mode": by_execution_mode,
|
||||
}
|
||||
|
||||
def export_csv(
|
||||
self,
|
||||
date_from: str = "",
|
||||
date_to: str = "",
|
||||
user_id: str = "",
|
||||
session_id: str = "",
|
||||
) -> str:
|
||||
"""Exporter les entrées d'audit en CSV.
|
||||
|
||||
Retourne une chaîne CSV complète (avec en-tête).
|
||||
Filtres optionnels par date, utilisateur, session.
|
||||
"""
|
||||
# Récupérer les entrées avec les mêmes filtres que query()
|
||||
entries = self.query(
|
||||
date_from=date_from,
|
||||
date_to=date_to,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
limit=100000, # Pas de pagination pour l'export
|
||||
)
|
||||
|
||||
if not entries:
|
||||
return ""
|
||||
|
||||
# En-têtes CSV — même ordre que le dataclass
|
||||
fieldnames = [f.name for f in fields(AuditEntry)]
|
||||
|
||||
output = io.StringIO()
|
||||
writer = csv.DictWriter(
|
||||
output,
|
||||
fieldnames=fieldnames,
|
||||
extrasaction="ignore",
|
||||
quoting=csv.QUOTE_MINIMAL,
|
||||
)
|
||||
writer.writeheader()
|
||||
for entry_dict in entries:
|
||||
writer.writerow(entry_dict)
|
||||
|
||||
return output.getvalue()
|
||||
201
agent_v0/server_v1/domain_context.py
Normal file
201
agent_v0/server_v1/domain_context.py
Normal file
@@ -0,0 +1,201 @@
|
||||
# agent_v0/server_v1/domain_context.py
|
||||
"""
|
||||
Contexte métier pour les appels VLM — rend Léa experte du domaine.
|
||||
|
||||
Chaque workflow est associé à un domaine métier (médical, comptable, etc.)
|
||||
qui enrichit TOUS les prompts VLM (Observer, Critic, acteur, enrichissement).
|
||||
|
||||
Un gemma4 qui sait qu'il regarde un DPI et que l'utilisateur fait du codage
|
||||
CIM-10 prend des décisions bien meilleures qu'un VLM générique.
|
||||
|
||||
Premier domaine : TIM (Technicien d'Information Médicale)
|
||||
- Logiciels DPI/DMS (dossier patient informatisé)
|
||||
- Codage CIM-10 / CCAM / GHM
|
||||
- Lecture de comptes rendus médicaux
|
||||
- Validation des séjours / RSS / RSA
|
||||
|
||||
Usage :
|
||||
ctx = get_domain_context("tim_codage")
|
||||
prompt = f"{ctx.system_prompt}\n\n{user_prompt}"
|
||||
"""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DomainContext:
|
||||
"""Contexte métier pour un domaine spécifique."""
|
||||
domain_id: str # Identifiant unique (tim_codage, comptabilite, etc.)
|
||||
name: str # Nom lisible (Codage médical TIM)
|
||||
description: str # Description courte du métier
|
||||
|
||||
# Prompt système injecté dans TOUS les appels VLM
|
||||
system_prompt: str = ""
|
||||
|
||||
# Vocabulaire métier (termes que le VLM doit connaître)
|
||||
vocabulary: List[str] = field(default_factory=list)
|
||||
|
||||
# Applications connues (noms de logiciels que le VLM peut rencontrer)
|
||||
known_apps: List[str] = field(default_factory=list)
|
||||
|
||||
# Écrans types (descriptions des écrans courants du métier)
|
||||
screen_patterns: Dict[str, str] = field(default_factory=dict)
|
||||
|
||||
def enrich_prompt(self, prompt: str, role: str = "") -> str:
|
||||
"""Enrichir un prompt avec le contexte métier.
|
||||
|
||||
Args:
|
||||
prompt: Le prompt original
|
||||
role: Le rôle du VLM (observer, critic, actor, enrichment)
|
||||
"""
|
||||
parts = []
|
||||
|
||||
if self.system_prompt:
|
||||
parts.append(self.system_prompt)
|
||||
|
||||
if role:
|
||||
role_hint = _ROLE_HINTS.get(role, "")
|
||||
if role_hint:
|
||||
parts.append(role_hint.format(domain=self.name))
|
||||
|
||||
parts.append(prompt)
|
||||
return "\n\n".join(parts)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"domain_id": self.domain_id,
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"known_apps": self.known_apps,
|
||||
"vocabulary_count": len(self.vocabulary),
|
||||
}
|
||||
|
||||
|
||||
# Hints par rôle VLM — adaptés au contexte métier
|
||||
_ROLE_HINTS = {
|
||||
"observer": (
|
||||
"Tu observes un écran utilisé dans le domaine '{domain}'. "
|
||||
"Cherche les popups, erreurs, ou états incohérents avec ce métier."
|
||||
),
|
||||
"critic": (
|
||||
"Tu vérifies qu'une action dans le domaine '{domain}' a produit "
|
||||
"le bon résultat. Sois précis sur ce que tu vois à l'écran."
|
||||
),
|
||||
"actor": (
|
||||
"Tu décides si une action est nécessaire dans le contexte '{domain}'. "
|
||||
"Utilise ta connaissance du métier pour juger si l'état est cohérent."
|
||||
),
|
||||
"enrichment": (
|
||||
"Tu analyses un enregistrement de workflow dans le domaine '{domain}'. "
|
||||
"Décris les intentions métier, pas juste les clics."
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Domaines pré-configurés
|
||||
# =========================================================================
|
||||
|
||||
_TIM_CODAGE = DomainContext(
|
||||
domain_id="tim_codage",
|
||||
name="Codage médical TIM",
|
||||
description=(
|
||||
"Technicien d'Information Médicale : lecture de comptes rendus médicaux, "
|
||||
"codage des diagnostics en CIM-10, codage des actes en CCAM, "
|
||||
"validation des groupes homogènes de malades (GHM), "
|
||||
"gestion des résumés de sortie standardisés (RSS/RSA)."
|
||||
),
|
||||
system_prompt=(
|
||||
"Tu es un assistant expert en codage médical hospitalier. "
|
||||
"L'utilisateur est un TIM (Technicien d'Information Médicale) qui utilise "
|
||||
"un logiciel DPI (Dossier Patient Informatisé) ou DIM (Département d'Information Médicale). "
|
||||
"Son travail : lire les comptes rendus médicaux des patients et coder les diagnostics "
|
||||
"en CIM-10, les actes en CCAM, et valider les séjours pour le PMSI.\n\n"
|
||||
"Vocabulaire du métier :\n"
|
||||
"- DPI/DMS : logiciel de dossier patient (ex: Orbis, DxCare, Crossway, Easily, Hopital Manager)\n"
|
||||
"- CIM-10 : Classification Internationale des Maladies, 10ème révision (codes diagnostics)\n"
|
||||
"- CCAM : Classification Commune des Actes Médicaux (codes actes chirurgicaux/médicaux)\n"
|
||||
"- GHM : Groupe Homogène de Malades (regroupement tarifaire)\n"
|
||||
"- RSS : Résumé de Sortie Standardisé (données du séjour)\n"
|
||||
"- RSA : Résumé de Sortie Anonyme (RSS anonymisé pour la T2A)\n"
|
||||
"- DP : Diagnostic Principal (le code CIM-10 principal du séjour)\n"
|
||||
"- DAS : Diagnostics Associés Significatifs\n"
|
||||
"- CMA : Complication ou Morbidité Associée (augmente la sévérité)\n"
|
||||
"- T2A : Tarification À l'Activité (financement des hôpitaux)\n"
|
||||
"- PMSI : Programme de Médicalisation des Systèmes d'Information\n"
|
||||
"- UM : Unité Médicale (service hospitalier)\n"
|
||||
"- CR : Compte Rendu (document médical)\n\n"
|
||||
"Écrans courants :\n"
|
||||
"- Liste de patients / dossiers à coder\n"
|
||||
"- Fiche patient (identité, séjour, UM)\n"
|
||||
"- Écran de codage CIM-10 (recherche de codes, saisie DP/DAS)\n"
|
||||
"- Visualiseur de comptes rendus médicaux\n"
|
||||
"- Écran de validation / groupage GHM\n"
|
||||
"- Recherche de codes (arborescence CIM-10 ou recherche textuelle)"
|
||||
),
|
||||
vocabulary=[
|
||||
"CIM-10", "CCAM", "GHM", "RSS", "RSA", "PMSI", "T2A",
|
||||
"diagnostic principal", "DAS", "CMA", "compte rendu",
|
||||
"dossier patient", "séjour", "unité médicale", "codage",
|
||||
"groupage", "valorisation", "exhaustivité",
|
||||
],
|
||||
known_apps=[
|
||||
"Orbis", "DxCare", "Crossway", "Easily", "Hopital Manager",
|
||||
"CORA", "AGFA", "Dedalus", "Maincare", "Softway Medical",
|
||||
"WebPIMS", "CEPAGE", "Medimust",
|
||||
],
|
||||
screen_patterns={
|
||||
"liste_patients": "Liste de dossiers patients avec colonnes (nom, prénom, date entrée, UM, statut codage)",
|
||||
"fiche_patient": "Fiche d'identité patient avec numéro IPP, séjour, dates, UM",
|
||||
"codage_cim10": "Écran de saisie des codes CIM-10 avec diagnostic principal et DAS",
|
||||
"compte_rendu": "Visualiseur de compte rendu médical (texte libre, souvent PDF intégré)",
|
||||
"recherche_code": "Recherche de code CIM-10 ou CCAM (champ de recherche + arborescence)",
|
||||
"validation_ghm": "Écran de validation du groupage avec GHM calculé et valorisation",
|
||||
},
|
||||
)
|
||||
|
||||
_GENERIC = DomainContext(
|
||||
domain_id="generic",
|
||||
name="Bureautique générale",
|
||||
description="Automatisation bureautique générale (Office, navigateur, etc.)",
|
||||
system_prompt=(
|
||||
"Tu es un assistant RPA qui observe des applications bureautiques. "
|
||||
"Décris précisément ce que tu vois à l'écran."
|
||||
),
|
||||
)
|
||||
|
||||
# Registre des domaines disponibles
|
||||
_DOMAINS: Dict[str, DomainContext] = {
|
||||
"tim_codage": _TIM_CODAGE,
|
||||
"generic": _GENERIC,
|
||||
}
|
||||
|
||||
|
||||
def get_domain_context(domain_id: str = "generic") -> DomainContext:
|
||||
"""Récupérer le contexte métier par ID.
|
||||
|
||||
Args:
|
||||
domain_id: Identifiant du domaine (tim_codage, generic, etc.)
|
||||
|
||||
Returns:
|
||||
DomainContext correspondant, ou generic si non trouvé.
|
||||
"""
|
||||
ctx = _DOMAINS.get(domain_id, _GENERIC)
|
||||
if ctx is _GENERIC and domain_id != "generic":
|
||||
logger.warning(f"Domaine '{domain_id}' non trouvé, utilisation de 'generic'")
|
||||
return ctx
|
||||
|
||||
|
||||
def register_domain(context: DomainContext) -> None:
|
||||
"""Enregistrer un nouveau domaine métier."""
|
||||
_DOMAINS[context.domain_id] = context
|
||||
logger.info(f"Domaine '{context.domain_id}' enregistré ({context.name})")
|
||||
|
||||
|
||||
def list_domains() -> List[Dict[str, Any]]:
|
||||
"""Lister tous les domaines disponibles."""
|
||||
return [ctx.to_dict() for ctx in _DOMAINS.values()]
|
||||
346
agent_v0/server_v1/replay_learner.py
Normal file
346
agent_v0/server_v1/replay_learner.py
Normal file
@@ -0,0 +1,346 @@
|
||||
# agent_v0/server_v1/replay_learner.py
|
||||
"""
|
||||
Module Learning — apprentissage à partir des résultats de replay.
|
||||
|
||||
Responsabilité : "Chaque replay qui échoue enrichit notre base de connaissances."
|
||||
|
||||
Stocke les résultats structurés de chaque action (succès/échec, méthode,
|
||||
screenshots, correction appliquée) pour :
|
||||
1. Améliorer les décisions futures (Policy)
|
||||
2. Affiner les stratégies de grounding (quel méthode marche pour quel écran)
|
||||
3. Détecter les patterns récurrents d'échec
|
||||
4. Alimenter le fine-tuning futur du VLM
|
||||
|
||||
Format inspiré du cahier des charges (docs/VISION_RPA_INTELLIGENT.md) :
|
||||
{
|
||||
"screenshot_before": "base64...",
|
||||
"action": {"type": "click", "target": "Bouton Valider", ...},
|
||||
"screenshot_after": "base64...",
|
||||
"success": true,
|
||||
"resolution_method": "som_text_match",
|
||||
"correction": null,
|
||||
"human_validated": false
|
||||
}
|
||||
|
||||
Ref: docs/VISION_RPA_INTELLIGENT.md — Boucle d'apprentissage (section 4)
|
||||
Ref: docs/PLAN_ACTEUR_V1.md — Phase 3 : apprentissage continu
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Répertoire par défaut pour le stockage des résultats d'apprentissage
|
||||
_DEFAULT_LEARNING_DIR = os.environ.get(
|
||||
"RPA_LEARNING_DIR", "data/learning/replay_results"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ActionOutcome:
|
||||
"""Résultat structuré d'une action de replay."""
|
||||
# Identifiants
|
||||
session_id: str
|
||||
action_id: str
|
||||
action_type: str # click, type, key_combo
|
||||
timestamp: float = 0.0 # Epoch
|
||||
|
||||
# Contexte
|
||||
target_description: str = "" # "Clic sur 'Enregistrer' dans Bloc-notes"
|
||||
intention: str = "" # "Sauvegarder le fichier"
|
||||
window_title: str = ""
|
||||
|
||||
# Résolution
|
||||
resolution_method: str = "" # server_som, anchor_template, vlm_direct...
|
||||
resolution_score: float = 0.0
|
||||
resolution_elapsed_ms: float = 0.0
|
||||
|
||||
# Résultat
|
||||
success: bool = False
|
||||
error: str = ""
|
||||
warning: str = ""
|
||||
|
||||
# Vérification (Critic)
|
||||
pixel_verified: Optional[bool] = None
|
||||
semantic_verified: Optional[bool] = None
|
||||
critic_detail: str = ""
|
||||
|
||||
# Recovery
|
||||
recovery_action: str = "" # undo, escape, close, none
|
||||
recovery_success: bool = False
|
||||
|
||||
# Screenshots (chemins relatifs, pas base64 — trop lourd)
|
||||
screenshot_before_path: str = ""
|
||||
screenshot_after_path: str = ""
|
||||
|
||||
# Correction humaine (feedback loop)
|
||||
human_validated: bool = False
|
||||
human_correction: str = "" # Description de la correction
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
class ReplayLearner:
|
||||
"""Apprentissage à partir des résultats de replay.
|
||||
|
||||
Stocke chaque action dans un fichier JSONL par session.
|
||||
Fournit des requêtes pour améliorer les décisions futures.
|
||||
|
||||
Usage côté serveur (api_stream.py) :
|
||||
learner = ReplayLearner()
|
||||
learner.record(outcome)
|
||||
|
||||
Usage côté Policy :
|
||||
history = learner.query_similar(target_description, window_title)
|
||||
# → "La dernière fois, template matching a échoué mais SoM a trouvé"
|
||||
"""
|
||||
|
||||
def __init__(self, learning_dir: str = ""):
|
||||
self.learning_dir = Path(learning_dir or _DEFAULT_LEARNING_DIR)
|
||||
self.learning_dir.mkdir(parents=True, exist_ok=True)
|
||||
# Cache mémoire des derniers résultats (pour requêtes rapides)
|
||||
self._recent: List[ActionOutcome] = []
|
||||
self._max_recent = 500
|
||||
|
||||
def record(self, outcome: ActionOutcome) -> None:
|
||||
"""Enregistrer le résultat d'une action.
|
||||
|
||||
Écrit en append dans un fichier JSONL par session.
|
||||
Garde aussi en mémoire pour les requêtes rapides.
|
||||
"""
|
||||
if not outcome.timestamp:
|
||||
outcome.timestamp = time.time()
|
||||
|
||||
# Fichier JSONL par session
|
||||
session_file = self.learning_dir / f"{outcome.session_id}.jsonl"
|
||||
try:
|
||||
with open(session_file, "a") as f:
|
||||
f.write(json.dumps(outcome.to_dict(), ensure_ascii=False) + "\n")
|
||||
except Exception as e:
|
||||
logger.warning(f"Learning: échec écriture {session_file}: {e}")
|
||||
|
||||
# Cache mémoire
|
||||
self._recent.append(outcome)
|
||||
if len(self._recent) > self._max_recent:
|
||||
self._recent = self._recent[-self._max_recent:]
|
||||
|
||||
# Log résumé
|
||||
status = "OK" if outcome.success else "ÉCHEC"
|
||||
logger.info(
|
||||
f"Learning: {status} {outcome.action_type} "
|
||||
f"'{outcome.target_description[:40]}' "
|
||||
f"[{outcome.resolution_method}] "
|
||||
f"critic={'OK' if outcome.semantic_verified else 'NON' if outcome.semantic_verified is False else '?'}"
|
||||
)
|
||||
|
||||
def record_from_replay_result(
|
||||
self,
|
||||
session_id: str,
|
||||
action: Dict[str, Any],
|
||||
result: Dict[str, Any],
|
||||
verification: Optional[Dict] = None,
|
||||
) -> None:
|
||||
"""Enregistrer depuis les structures existantes du replay.
|
||||
|
||||
Convertit le format action/result du replay en ActionOutcome.
|
||||
Appelé depuis api_stream.py après chaque action de replay.
|
||||
"""
|
||||
target_spec = action.get("target_spec", {})
|
||||
outcome = ActionOutcome(
|
||||
session_id=session_id,
|
||||
action_id=action.get("action_id", ""),
|
||||
action_type=action.get("type", ""),
|
||||
target_description=target_spec.get("by_text", ""),
|
||||
intention=action.get("intention", ""),
|
||||
window_title=target_spec.get("window_title", ""),
|
||||
resolution_method=result.get("resolution_method", ""),
|
||||
resolution_score=result.get("resolution_score", 0.0),
|
||||
resolution_elapsed_ms=result.get("resolution_elapsed_ms", 0.0),
|
||||
success=result.get("success", False),
|
||||
error=result.get("error", ""),
|
||||
warning=result.get("warning", ""),
|
||||
)
|
||||
|
||||
if verification:
|
||||
outcome.pixel_verified = verification.get("verified")
|
||||
outcome.semantic_verified = verification.get("semantic_verified")
|
||||
outcome.critic_detail = verification.get("semantic_detail", "")
|
||||
|
||||
self.record(outcome)
|
||||
|
||||
def query_similar(
|
||||
self,
|
||||
target_description: str = "",
|
||||
window_title: str = "",
|
||||
limit: int = 10,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Chercher des résultats similaires dans l'historique.
|
||||
|
||||
Recherche par correspondance textuelle sur la description de cible
|
||||
et le titre de fenêtre. Retourne les plus récents en premier.
|
||||
|
||||
Utile pour le Policy : "qu'est-ce qui a marché avant pour cette cible ?"
|
||||
"""
|
||||
results = []
|
||||
target_lower = target_description.lower()
|
||||
window_lower = window_title.lower()
|
||||
|
||||
for outcome in reversed(self._recent):
|
||||
score = 0
|
||||
if target_lower and target_lower in outcome.target_description.lower():
|
||||
score += 2
|
||||
if window_lower and window_lower in outcome.window_title.lower():
|
||||
score += 1
|
||||
if score > 0:
|
||||
results.append({
|
||||
"outcome": outcome.to_dict(),
|
||||
"relevance": score,
|
||||
})
|
||||
if len(results) >= limit:
|
||||
break
|
||||
|
||||
return sorted(results, key=lambda x: x["relevance"], reverse=True)
|
||||
|
||||
def best_strategy_for(
|
||||
self,
|
||||
target_description: str = "",
|
||||
window_title: str = "",
|
||||
) -> Optional[str]:
|
||||
"""Quelle méthode de grounding a le mieux marché pour cette cible ?
|
||||
|
||||
Consulte l'historique et retourne la méthode qui a le plus haut
|
||||
taux de succès pour des cibles similaires. C'est la boucle
|
||||
d'apprentissage : les replays passés améliorent les suivants.
|
||||
|
||||
Returns:
|
||||
Nom de la meilleure méthode (ex: "som_text_match") ou None
|
||||
"""
|
||||
similar = self.query_similar(target_description, window_title, limit=20)
|
||||
if not similar:
|
||||
return None
|
||||
|
||||
# Compter les succès par méthode
|
||||
method_stats: Dict[str, List[int]] = {} # method → [successes, total]
|
||||
for entry in similar:
|
||||
outcome = entry["outcome"]
|
||||
method = outcome.get("resolution_method", "")
|
||||
if not method:
|
||||
continue
|
||||
if method not in method_stats:
|
||||
method_stats[method] = [0, 0]
|
||||
method_stats[method][1] += 1
|
||||
if outcome.get("success"):
|
||||
method_stats[method][0] += 1
|
||||
|
||||
if not method_stats:
|
||||
return None
|
||||
|
||||
# Retourner la méthode avec le meilleur taux de succès (minimum 2 occurrences)
|
||||
best = None
|
||||
best_rate = 0.0
|
||||
for method, (successes, total) in method_stats.items():
|
||||
if total >= 2: # Au moins 2 essais pour être significatif
|
||||
rate = successes / total
|
||||
if rate > best_rate:
|
||||
best_rate = rate
|
||||
best = method
|
||||
|
||||
if best:
|
||||
logger.info(
|
||||
f"Learning: meilleure stratégie pour '{target_description[:30]}' → "
|
||||
f"{best} ({best_rate:.0%} sur {method_stats[best][1]} essais)"
|
||||
)
|
||||
|
||||
return best
|
||||
|
||||
def consolidate_workflow(
|
||||
self,
|
||||
actions: list,
|
||||
session_id: str = "",
|
||||
) -> int:
|
||||
"""Consolider un workflow avec les apprentissages passés.
|
||||
|
||||
Pour chaque action du workflow, vérifie si l'historique suggère
|
||||
une meilleure stratégie de résolution. Si oui, l'ajoute en
|
||||
hint dans le target_spec de l'action.
|
||||
|
||||
Modifie les actions in-place. Retourne le nombre d'actions enrichies.
|
||||
|
||||
C'est la cross-pollination : un replay qui a réussi "Enregistrer"
|
||||
via som_text améliore tous les futurs workflows qui cliquent sur "Enregistrer".
|
||||
"""
|
||||
enriched = 0
|
||||
for action in actions:
|
||||
if action.get("type") != "click":
|
||||
continue
|
||||
target_spec = action.get("target_spec", {})
|
||||
by_text = target_spec.get("by_text", "")
|
||||
window = target_spec.get("window_title", "")
|
||||
if not by_text:
|
||||
continue
|
||||
|
||||
best = self.best_strategy_for(by_text, window)
|
||||
if best:
|
||||
target_spec["_learned_strategy"] = best
|
||||
enriched += 1
|
||||
|
||||
if enriched:
|
||||
logger.info(
|
||||
f"Consolidation : {enriched} actions enrichies par l'apprentissage "
|
||||
f"(session {session_id})"
|
||||
)
|
||||
return enriched
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Statistiques globales des résultats de replay."""
|
||||
if not self._recent:
|
||||
return {"total": 0}
|
||||
|
||||
total = len(self._recent)
|
||||
successes = sum(1 for o in self._recent if o.success)
|
||||
methods = {}
|
||||
for o in self._recent:
|
||||
m = o.resolution_method or "unknown"
|
||||
if m not in methods:
|
||||
methods[m] = {"total": 0, "success": 0}
|
||||
methods[m]["total"] += 1
|
||||
if o.success:
|
||||
methods[m]["success"] += 1
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"success_rate": round(successes / total, 3) if total > 0 else 0,
|
||||
"methods": {
|
||||
m: {
|
||||
"total": v["total"],
|
||||
"success_rate": round(v["success"] / v["total"], 3) if v["total"] > 0 else 0,
|
||||
}
|
||||
for m, v in methods.items()
|
||||
},
|
||||
}
|
||||
|
||||
def load_session(self, session_id: str) -> List[ActionOutcome]:
|
||||
"""Charger tous les résultats d'une session depuis le fichier JSONL."""
|
||||
session_file = self.learning_dir / f"{session_id}.jsonl"
|
||||
if not session_file.is_file():
|
||||
return []
|
||||
|
||||
outcomes = []
|
||||
try:
|
||||
with open(session_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line:
|
||||
data = json.loads(line)
|
||||
outcomes.append(ActionOutcome(**data))
|
||||
except Exception as e:
|
||||
logger.warning(f"Learning: échec lecture {session_file}: {e}")
|
||||
|
||||
return outcomes
|
||||
@@ -1,20 +1,24 @@
|
||||
# agent_v0/server_v1/replay_verifier.py
|
||||
"""
|
||||
ReplayVerifier — Vérification post-action pour le replay de workflows.
|
||||
ReplayVerifier — Vérification post-action (Critic) pour le replay de workflows.
|
||||
|
||||
Compare les screenshots avant/après une action pour détecter si elle a eu
|
||||
un effet visible. Utilisé par l'API de replay pour décider si une action
|
||||
a réussi ou si un retry est nécessaire.
|
||||
Deux niveaux de vérification :
|
||||
1. PIXEL : Différence d'image avant/après (rapide, ~10ms)
|
||||
- L'écran a-t-il changé ? Où ? De combien ?
|
||||
2. SÉMANTIQUE : VLM évalue si le résultat correspond à l'attendu (~2-5s)
|
||||
- L'action a-t-elle eu l'EFFET voulu ? (pas juste "des pixels ont bougé")
|
||||
|
||||
Stratégies de vérification :
|
||||
1. Différence d'image globale (avant == après → probablement rien ne s'est passé)
|
||||
2. Zone locale autour du clic (si l'action est un clic)
|
||||
3. Détection de texte apparu (si l'action est une frappe)
|
||||
Le niveau pixel existait déjà. Le niveau sémantique (Critic) est le chaînon
|
||||
manquant identifié par comparaison avec Claude Computer Use et OpenAdapt.
|
||||
|
||||
Ref: docs/VISION_RPA_INTELLIGENT.md — étape VERIFY du pipeline.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -35,9 +39,13 @@ class VerificationResult:
|
||||
suggestion: str # "retry", "skip", "abort", "continue"
|
||||
detail: str = "" # Description humaine du résultat
|
||||
local_change_pct: float = 0.0 # % de changement dans la zone locale (si applicable)
|
||||
# Critic sémantique (VLM)
|
||||
semantic_verified: Optional[bool] = None # None = pas de vérif sémantique
|
||||
semantic_detail: str = "" # Explication du VLM
|
||||
semantic_elapsed_ms: float = 0.0 # Temps de la vérif sémantique
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
d = {
|
||||
"verified": self.verified,
|
||||
"confidence": round(self.confidence, 3),
|
||||
"changes_detected": self.changes_detected,
|
||||
@@ -46,6 +54,11 @@ class VerificationResult:
|
||||
"detail": self.detail,
|
||||
"local_change_pct": round(self.local_change_pct, 3),
|
||||
}
|
||||
if self.semantic_verified is not None:
|
||||
d["semantic_verified"] = self.semantic_verified
|
||||
d["semantic_detail"] = self.semantic_detail
|
||||
d["semantic_elapsed_ms"] = round(self.semantic_elapsed_ms, 1)
|
||||
return d
|
||||
|
||||
|
||||
class ReplayVerifier:
|
||||
@@ -345,3 +358,275 @@ class ReplayVerifier:
|
||||
f"(global={global_change_pct:.3f}%, local={local_change_pct:.3f}%)"
|
||||
),
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Critic sémantique — VLM évalue si le résultat correspond à l'attendu
|
||||
# =========================================================================
|
||||
|
||||
def verify_with_critic(
|
||||
self,
|
||||
action: Dict[str, Any],
|
||||
result: Dict[str, Any],
|
||||
screenshot_before: Optional[str] = None,
|
||||
screenshot_after: Optional[str] = None,
|
||||
expected_result: str = "",
|
||||
action_intention: str = "",
|
||||
workflow_context: str = "",
|
||||
) -> VerificationResult:
|
||||
"""Vérification complète : pixel + sémantique (Critic).
|
||||
|
||||
Étape 1 : Vérification pixel (rapide, ~10ms) — l'écran a-t-il changé ?
|
||||
Étape 2 : Vérification sémantique (VLM, ~2-5s) — le changement est-il le bon ?
|
||||
|
||||
La vérification sémantique n'est lancée que si :
|
||||
- expected_result est fourni (description de l'état attendu après l'action)
|
||||
- La vérification pixel a détecté un changement (sinon, pas besoin du VLM)
|
||||
|
||||
Args:
|
||||
action: L'action exécutée
|
||||
result: Le résultat rapporté par l'agent
|
||||
screenshot_before: Screenshot avant l'action (base64)
|
||||
screenshot_after: Screenshot après l'action (base64)
|
||||
expected_result: Description de l'état attendu après l'action
|
||||
action_intention: Ce que l'action était censée faire
|
||||
workflow_context: Contexte global (progression, objectif)
|
||||
"""
|
||||
# Étape 1 : vérification pixel (existante)
|
||||
pixel_result = self.verify_action(
|
||||
action=action,
|
||||
result=result,
|
||||
screenshot_before=screenshot_before,
|
||||
screenshot_after=screenshot_after,
|
||||
)
|
||||
|
||||
# Pas de description attendue → retourner le résultat pixel seul
|
||||
if not expected_result:
|
||||
return pixel_result
|
||||
|
||||
# Si aucun changement pixel ET suggestion retry → pas besoin du VLM
|
||||
if not pixel_result.changes_detected and pixel_result.suggestion == "retry":
|
||||
return pixel_result
|
||||
|
||||
# Étape 2 : vérification sémantique via VLM
|
||||
semantic = self._verify_semantic(
|
||||
screenshot_before=screenshot_before,
|
||||
screenshot_after=screenshot_after,
|
||||
expected_result=expected_result,
|
||||
action_intention=action_intention,
|
||||
workflow_context=workflow_context,
|
||||
)
|
||||
|
||||
if semantic is None:
|
||||
# VLM indisponible → garder le résultat pixel seul
|
||||
return pixel_result
|
||||
|
||||
# Fusionner les résultats pixel + sémantique
|
||||
return self._merge_results(pixel_result, semantic)
|
||||
|
||||
def _verify_semantic(
|
||||
self,
|
||||
screenshot_before: Optional[str],
|
||||
screenshot_after: Optional[str],
|
||||
expected_result: str,
|
||||
action_intention: str = "",
|
||||
workflow_context: str = "",
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Appeler le VLM pour évaluer sémantiquement le résultat de l'action.
|
||||
|
||||
Utilise gemma4 en mode texte+images (Docker port 11435) pour analyser
|
||||
les screenshots avant/après et dire si le résultat attendu est atteint.
|
||||
|
||||
Sur Citrix (image plate), c'est la SEULE façon de vérifier intelligemment
|
||||
si une action a eu l'effet voulu.
|
||||
|
||||
Returns:
|
||||
Dict avec {"verified": bool, "detail": str, "elapsed_ms": float}
|
||||
ou None si le VLM est indisponible.
|
||||
"""
|
||||
import requests as _requests
|
||||
|
||||
if not screenshot_after:
|
||||
return None
|
||||
|
||||
gemma4_port = os.environ.get("GEMMA4_PORT", "11435")
|
||||
gemma4_url = f"http://localhost:{gemma4_port}/api/chat"
|
||||
|
||||
# Construire le prompt Critic
|
||||
context_parts = []
|
||||
if action_intention:
|
||||
context_parts.append(f"Action effectuée : {action_intention}")
|
||||
if workflow_context:
|
||||
context_parts.append(f"Contexte : {workflow_context}")
|
||||
context_str = "\n".join(context_parts)
|
||||
|
||||
# Deux images : avant et après
|
||||
images = []
|
||||
prompt_images = ""
|
||||
if screenshot_before and screenshot_after:
|
||||
images = [screenshot_before, screenshot_after]
|
||||
prompt_images = (
|
||||
"Image 1 = écran AVANT l'action.\n"
|
||||
"Image 2 = écran APRÈS l'action.\n"
|
||||
)
|
||||
elif screenshot_after:
|
||||
images = [screenshot_after]
|
||||
prompt_images = "Image = écran APRÈS l'action.\n"
|
||||
|
||||
prompt = (
|
||||
f"Tu es le VÉRIFICATEUR d'un robot RPA. Tu dois dire si l'action a réussi.\n\n"
|
||||
f"{prompt_images}"
|
||||
f"{context_str}\n\n"
|
||||
f"Résultat attendu : {expected_result}\n\n"
|
||||
f"Est-ce que le résultat attendu est visible à l'écran ?\n"
|
||||
f"Réponds EXACTEMENT dans ce format :\n"
|
||||
f"VERDICT: OUI ou NON\n"
|
||||
f"RAISON: explication courte (1 ligne)"
|
||||
)
|
||||
|
||||
# Injecter le contexte métier si disponible
|
||||
from .domain_context import get_domain_context
|
||||
domain = get_domain_context(os.environ.get("RPA_DOMAIN", "generic"))
|
||||
messages = []
|
||||
if domain.system_prompt:
|
||||
messages.append({"role": "system", "content": domain.system_prompt})
|
||||
messages.append({"role": "user", "content": prompt, "images": images})
|
||||
|
||||
try:
|
||||
t_start = time.time()
|
||||
resp = _requests.post(
|
||||
gemma4_url,
|
||||
json={
|
||||
"model": "gemma4:e4b",
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"think": True,
|
||||
"options": {"temperature": 0.1, "num_predict": 800},
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
elapsed_ms = (time.time() - t_start) * 1000
|
||||
|
||||
if not resp.ok:
|
||||
logger.warning(f"Critic VLM HTTP {resp.status_code}")
|
||||
return None
|
||||
|
||||
content = resp.json().get("message", {}).get("content", "").strip()
|
||||
|
||||
# Parser le verdict
|
||||
verified = None
|
||||
detail = content
|
||||
for line in content.split("\n"):
|
||||
line_upper = line.strip().upper()
|
||||
if line_upper.startswith("VERDICT:"):
|
||||
verdict_text = line_upper.replace("VERDICT:", "").strip()
|
||||
if "OUI" in verdict_text or "YES" in verdict_text:
|
||||
verified = True
|
||||
elif "NON" in verdict_text or "NO" in verdict_text:
|
||||
verified = False
|
||||
elif line_upper.startswith("RAISON:"):
|
||||
detail = line.strip().replace("RAISON:", "").strip()
|
||||
|
||||
if verified is None:
|
||||
# Fallback : chercher OUI/NON dans le texte brut
|
||||
upper = content.upper()
|
||||
if "OUI" in upper and "NON" not in upper:
|
||||
verified = True
|
||||
elif "NON" in upper:
|
||||
verified = False
|
||||
else:
|
||||
logger.warning(f"Critic VLM réponse non parsable : {content[:100]}")
|
||||
return None
|
||||
|
||||
logger.info(
|
||||
f"Critic VLM : {'OUI' if verified else 'NON'} en {elapsed_ms:.0f}ms — {detail[:80]}"
|
||||
)
|
||||
return {
|
||||
"verified": verified,
|
||||
"detail": detail,
|
||||
"elapsed_ms": elapsed_ms,
|
||||
}
|
||||
|
||||
except _requests.Timeout:
|
||||
logger.warning("Critic VLM timeout (30s)")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning(f"Critic VLM erreur : {e}")
|
||||
return None
|
||||
|
||||
def _merge_results(
|
||||
self,
|
||||
pixel: VerificationResult,
|
||||
semantic: Dict[str, Any],
|
||||
) -> VerificationResult:
|
||||
"""Fusionner les résultats pixel et sémantique.
|
||||
|
||||
Matrice de décision :
|
||||
- Pixel OK + Semantic OK → vérifié (confiance haute)
|
||||
- Pixel OK + Semantic NON → INATTENDU (l'écran a changé mais pas comme prévu)
|
||||
- Pixel NON + Semantic OK → vérifié quand même (le VLM voit le résultat)
|
||||
- Pixel NON + Semantic NON → échec (retry)
|
||||
"""
|
||||
sem_ok = semantic["verified"]
|
||||
pix_ok = pixel.changes_detected
|
||||
|
||||
if pix_ok and sem_ok:
|
||||
# Tout concorde — confiance maximale
|
||||
return VerificationResult(
|
||||
verified=True,
|
||||
confidence=min(0.95, pixel.confidence + 0.2),
|
||||
changes_detected=True,
|
||||
change_area_pct=pixel.change_area_pct,
|
||||
local_change_pct=pixel.local_change_pct,
|
||||
suggestion="continue",
|
||||
detail=f"Pixel OK + Critic OK : {semantic['detail']}",
|
||||
semantic_verified=True,
|
||||
semantic_detail=semantic["detail"],
|
||||
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||
)
|
||||
|
||||
elif pix_ok and not sem_ok:
|
||||
# L'écran a changé mais pas dans le bon sens → INATTENDU
|
||||
# C'est le cas le plus important : popup, erreur, mauvaise fenêtre
|
||||
return VerificationResult(
|
||||
verified=False,
|
||||
confidence=0.7,
|
||||
changes_detected=True,
|
||||
change_area_pct=pixel.change_area_pct,
|
||||
local_change_pct=pixel.local_change_pct,
|
||||
suggestion="retry",
|
||||
detail=f"Pixel OK mais Critic NON : {semantic['detail']}",
|
||||
semantic_verified=False,
|
||||
semantic_detail=semantic["detail"],
|
||||
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||
)
|
||||
|
||||
elif not pix_ok and sem_ok:
|
||||
# Peu de pixels ont changé mais le VLM dit que le résultat est bon
|
||||
# Ex: focus sur un onglet déjà visible (changement subtil)
|
||||
return VerificationResult(
|
||||
verified=True,
|
||||
confidence=0.6,
|
||||
changes_detected=False,
|
||||
change_area_pct=pixel.change_area_pct,
|
||||
local_change_pct=pixel.local_change_pct,
|
||||
suggestion="continue",
|
||||
detail=f"Pixel inchangé mais Critic OK : {semantic['detail']}",
|
||||
semantic_verified=True,
|
||||
semantic_detail=semantic["detail"],
|
||||
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||
)
|
||||
|
||||
else:
|
||||
# Rien n'a changé et le VLM confirme → échec
|
||||
return VerificationResult(
|
||||
verified=False,
|
||||
confidence=0.8,
|
||||
changes_detected=False,
|
||||
change_area_pct=pixel.change_area_pct,
|
||||
local_change_pct=pixel.local_change_pct,
|
||||
suggestion="retry",
|
||||
detail=f"Pixel inchangé + Critic NON : {semantic['detail']}",
|
||||
semantic_verified=False,
|
||||
semantic_detail=semantic["detail"],
|
||||
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||
)
|
||||
|
||||
@@ -1095,6 +1095,187 @@ def _attach_expected_screenshots(
|
||||
action_idx += 1
|
||||
|
||||
|
||||
def _enrich_actions_with_intentions(
|
||||
actions: list,
|
||||
session_dir: Path,
|
||||
domain_id: str = "",
|
||||
) -> None:
|
||||
"""Enrichir les actions avec intention + expected_result via gemma4.
|
||||
|
||||
Pour chaque action, gemma4 reçoit :
|
||||
- Le contexte métier (TIM codage CIM-10, bureautique, etc.)
|
||||
- Le screenshot AVANT l'action (contexte visuel)
|
||||
- La description de l'action (clic sur X, frappe Y)
|
||||
- La position dans le workflow (action N/total)
|
||||
|
||||
Et produit :
|
||||
- intention : ce que l'utilisateur veut accomplir (en termes métier)
|
||||
- expected_result : ce qui devrait changer à l'écran après l'action
|
||||
- expected_state : description de l'état attendu AVANT l'action
|
||||
|
||||
Ces champs alimentent le Critic (vérification sémantique) et
|
||||
l'Observer (pré-analyse écran). C'est la Phase 1 du plan acteur.
|
||||
|
||||
Un seul appel gemma4 par action — fait pendant le build, pas au replay.
|
||||
Modifie les actions in-place.
|
||||
"""
|
||||
import requests as _requests
|
||||
|
||||
gemma4_port = os.environ.get("GEMMA4_PORT", _GEMMA4_PORT)
|
||||
gemma4_url = f"http://localhost:{gemma4_port}/api/chat"
|
||||
|
||||
# Charger le contexte métier
|
||||
from .domain_context import get_domain_context
|
||||
domain = get_domain_context(domain_id or os.environ.get("RPA_DOMAIN", "generic"))
|
||||
domain_prompt = domain.system_prompt
|
||||
|
||||
# Vérifier que gemma4 est disponible
|
||||
try:
|
||||
_requests.get(f"http://localhost:{gemma4_port}/api/tags", timeout=3)
|
||||
except Exception:
|
||||
logger.info("gemma4 non disponible — enrichissement intentions désactivé")
|
||||
return
|
||||
|
||||
logger.info(f"Enrichissement intentions avec contexte métier : {domain.name}")
|
||||
shots_dir = session_dir / "shots"
|
||||
total = len(actions)
|
||||
|
||||
# Construire un résumé du workflow pour le contexte
|
||||
action_summaries = []
|
||||
for i, a in enumerate(actions):
|
||||
a_type = a.get("type", "?")
|
||||
if a_type == "click":
|
||||
by_text = a.get("target_spec", {}).get("by_text", "")
|
||||
window = a.get("target_spec", {}).get("window_title", "")
|
||||
desc = f"{i+1}. Clic sur '{by_text or 'élément'}' dans '{window or '?'}'"
|
||||
elif a_type == "type":
|
||||
text = a.get("text", "")
|
||||
desc = f"{i+1}. Saisie de texte : '{text[:30]}'"
|
||||
elif a_type == "key_combo":
|
||||
keys = a.get("keys", [])
|
||||
desc = f"{i+1}. Raccourci clavier : {'+'.join(keys)}"
|
||||
elif a_type == "wait":
|
||||
desc = f"{i+1}. Attente {a.get('duration_ms', 0)}ms"
|
||||
else:
|
||||
desc = f"{i+1}. {a_type}"
|
||||
action_summaries.append(desc)
|
||||
|
||||
workflow_summary = "\n".join(action_summaries)
|
||||
|
||||
enriched_count = 0
|
||||
for i, action in enumerate(actions):
|
||||
a_type = action.get("type", "")
|
||||
|
||||
# N'enrichir que les actions significatives (click, type, key_combo)
|
||||
if a_type not in ("click", "type", "key_combo"):
|
||||
continue
|
||||
|
||||
# Construire la description de l'action courante
|
||||
if a_type == "click":
|
||||
by_text = action.get("target_spec", {}).get("by_text", "")
|
||||
window = action.get("target_spec", {}).get("window_title", "")
|
||||
action_desc = f"Cliquer sur '{by_text or 'un élément'}' dans la fenêtre '{window or 'inconnue'}'"
|
||||
elif a_type == "type":
|
||||
text = action.get("text", "")
|
||||
action_desc = f"Saisir le texte '{text[:50]}'"
|
||||
elif a_type == "key_combo":
|
||||
keys = action.get("keys", [])
|
||||
action_desc = f"Appuyer sur {'+'.join(keys)}"
|
||||
else:
|
||||
action_desc = a_type
|
||||
|
||||
# Charger le screenshot associé (si disponible)
|
||||
screenshot_b64 = ""
|
||||
# Chercher le screenshot le plus proche dans le target_spec ou les expected
|
||||
if action.get("target_spec", {}).get("anchor_image_base64"):
|
||||
# On a le crop — pas suffisant pour le contexte, chercher le full
|
||||
pass
|
||||
|
||||
# Chercher dans les screenshots de la session
|
||||
# Les actions sont ordonnées, et les screenshots aussi
|
||||
# On utilise l'expected_screenshot de l'action PRÉCÉDENTE comme "avant"
|
||||
if i > 0 and actions[i-1].get("expected_screenshot_b64"):
|
||||
screenshot_b64 = actions[i-1]["expected_screenshot_b64"]
|
||||
|
||||
# Prompt enrichi avec le contexte métier
|
||||
prompt = (
|
||||
f"Tu analyses un workflow enregistré ({total} actions).\n\n"
|
||||
f"Workflow complet :\n{workflow_summary}\n\n"
|
||||
f"Action actuelle ({i+1}/{total}) : {action_desc}\n\n"
|
||||
f"Réponds EXACTEMENT dans ce format (3 lignes) :\n"
|
||||
f"INTENTION: ce que l'utilisateur veut accomplir avec cette action (1 phrase)\n"
|
||||
f"AVANT: description de l'état attendu de l'écran AVANT cette action (1 phrase)\n"
|
||||
f"APRÈS: description de l'état attendu de l'écran APRÈS cette action (1 phrase)"
|
||||
)
|
||||
|
||||
# Injecter le contexte métier (TIM, comptabilité, etc.)
|
||||
messages = []
|
||||
if domain_prompt:
|
||||
messages.append({"role": "system", "content": domain_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
if screenshot_b64:
|
||||
messages[0]["images"] = [screenshot_b64]
|
||||
|
||||
try:
|
||||
resp = _requests.post(
|
||||
gemma4_url,
|
||||
json={
|
||||
"model": "gemma4:e4b",
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"think": True,
|
||||
"options": {"temperature": 0.1, "num_predict": 800},
|
||||
},
|
||||
timeout=20,
|
||||
)
|
||||
if not resp.ok:
|
||||
continue
|
||||
|
||||
content = resp.json().get("message", {}).get("content", "").strip()
|
||||
|
||||
# Parser la réponse
|
||||
intention = ""
|
||||
expected_state = ""
|
||||
expected_result = ""
|
||||
|
||||
for line in content.split("\n"):
|
||||
line_clean = line.strip()
|
||||
upper = line_clean.upper()
|
||||
if upper.startswith("INTENTION:"):
|
||||
intention = line_clean.split(":", 1)[1].strip()
|
||||
elif upper.startswith("AVANT:"):
|
||||
expected_state = line_clean.split(":", 1)[1].strip()
|
||||
elif upper.startswith(("APRÈS:", "APRES:")):
|
||||
expected_result = line_clean.split(":", 1)[1].strip()
|
||||
|
||||
# Stocker dans l'action (modifie in-place)
|
||||
if intention:
|
||||
action["intention"] = intention
|
||||
if expected_state:
|
||||
action["expected_state"] = expected_state
|
||||
# Propager dans target_spec pour l'Observer
|
||||
if "target_spec" in action:
|
||||
action["target_spec"]["expected_state"] = expected_state
|
||||
if expected_result:
|
||||
action["expected_result"] = expected_result
|
||||
|
||||
if intention or expected_result:
|
||||
enriched_count += 1
|
||||
logger.debug(
|
||||
"Action %d/%d enrichie : intention='%s', expected='%s'",
|
||||
i+1, total, intention[:50], expected_result[:50],
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug("Enrichissement action %d échoué : %s", i+1, e)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Enrichissement intentions : %d/%d actions enrichies par gemma4",
|
||||
enriched_count, total,
|
||||
)
|
||||
|
||||
|
||||
def build_replay_from_raw_events(
|
||||
events: list,
|
||||
session_id: str = "",
|
||||
@@ -1514,6 +1695,34 @@ def build_replay_from_raw_events(
|
||||
if next_title:
|
||||
result[ci]["expected_window_title"] = next_title
|
||||
|
||||
# ── 10. Enrichir avec intention + expected_result via gemma4 (Critic) ──
|
||||
# gemma4 analyse chaque action dans son contexte pour produire :
|
||||
# - intention : ce que l'utilisateur veut accomplir
|
||||
# - expected_result : description de l'état écran attendu après l'action
|
||||
# - expected_state : description de l'état écran attendu AVANT l'action
|
||||
# Ces champs alimentent le Critic (vérification sémantique post-action)
|
||||
# et l'Observer (pré-analyse écran).
|
||||
# Ref: docs/VISION_RPA_INTELLIGENT.md — étape VERIFY du pipeline
|
||||
# Ref: docs/PLAN_ACTEUR_V1.md — Phase 1 : Workflow comme template
|
||||
if session_dir_path:
|
||||
_enrich_actions_with_intentions(result, session_dir_path)
|
||||
|
||||
# ── 11. Consolider avec les apprentissages passés ──
|
||||
# Les replays précédents ont enregistré quelles méthodes marchent
|
||||
# pour quels éléments. On réinjecte ces connaissances dans le workflow.
|
||||
# C'est la boucle d'apprentissage : chaque replay améliore les suivants.
|
||||
try:
|
||||
from .replay_learner import ReplayLearner
|
||||
_learner = ReplayLearner()
|
||||
consolidated = _learner.consolidate_workflow(result, session_id)
|
||||
if consolidated:
|
||||
logger.info(
|
||||
"Consolidation apprentissage : %d actions enrichies par l'historique",
|
||||
consolidated,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Consolidation apprentissage échouée : %s", e)
|
||||
|
||||
# Stats visual replay
|
||||
visual_clicks = sum(
|
||||
1 for a in result
|
||||
@@ -1521,10 +1730,13 @@ def build_replay_from_raw_events(
|
||||
)
|
||||
total_clicks = sum(1 for a in result if a.get("type") == "click")
|
||||
verified_count = sum(1 for a in result if a.get("expected_screenshot_b64"))
|
||||
intention_count = sum(1 for a in result if a.get("intention"))
|
||||
logger.info(
|
||||
"build_replay_from_raw_events(%s) : %d actions propres produites "
|
||||
"(%d/%d clics avec visual_mode, %d avec screenshot de référence)",
|
||||
session_id, len(result), visual_clicks, total_clicks, verified_count,
|
||||
"(%d/%d clics avec visual_mode, %d avec screenshot de référence, "
|
||||
"%d avec intentions)",
|
||||
session_id, len(result), visual_clicks, total_clicks,
|
||||
verified_count, intention_count,
|
||||
)
|
||||
|
||||
# Libérer gemma4 du GPU pour que qwen2.5vl puisse charger au replay
|
||||
|
||||
596
agent_v0/server_v1/task_planner.py
Normal file
596
agent_v0/server_v1/task_planner.py
Normal file
@@ -0,0 +1,596 @@
|
||||
# agent_v0/server_v1/task_planner.py
|
||||
"""
|
||||
TaskPlanner — Planificateur MACRO pour RPA Vision V3.
|
||||
|
||||
Responsabilité : comprendre un ordre en langage naturel et l'exécuter.
|
||||
|
||||
"Traite les dossiers de janvier" →
|
||||
1. Comprendre l'instruction (gemma4)
|
||||
2. Trouver le workflow appris correspondant
|
||||
3. Identifier les paramètres/variables
|
||||
4. Exécuter (replay avec substitution) ou planifier (actions libres)
|
||||
|
||||
C'est le niveau MACRO de l'architecture 3 niveaux :
|
||||
MACRO (TaskPlanner) → décompose et orchestre
|
||||
MÉSO (Policy/Observer/Critic) → décide et vérifie
|
||||
MICRO (Grounding/Executor) → localise et clique
|
||||
|
||||
Ref: docs/PLAN_ACTEUR_V1.md — Phase 3 : Planificateur
|
||||
Ref: docs/VISION_RPA_INTELLIGENT.md — "Il observe" → "Il devient autonome"
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskPlan:
|
||||
"""Plan d'exécution généré par le planificateur."""
|
||||
instruction: str # Instruction originale de l'utilisateur
|
||||
understood: bool = False # L'instruction a été comprise
|
||||
workflow_match: str = "" # ID du workflow correspondant (si trouvé)
|
||||
workflow_name: str = "" # Nom du workflow correspondant
|
||||
match_confidence: float = 0.0 # Confiance du match (0-1)
|
||||
parameters: Dict[str, Any] = field(default_factory=dict) # Variables extraites
|
||||
is_loop: bool = False # Boucle sur une liste d'éléments
|
||||
loop_source: str = "" # Source des éléments (écran, fichier, requête)
|
||||
steps: List[Dict[str, Any]] = field(default_factory=list) # Actions planifiées
|
||||
mode: str = "" # "replay" (workflow connu) ou "free" (actions générées)
|
||||
error: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"instruction": self.instruction,
|
||||
"understood": self.understood,
|
||||
"workflow_match": self.workflow_match,
|
||||
"workflow_name": self.workflow_name,
|
||||
"match_confidence": round(self.match_confidence, 3),
|
||||
"parameters": self.parameters,
|
||||
"is_loop": self.is_loop,
|
||||
"loop_source": self.loop_source,
|
||||
"steps_count": len(self.steps),
|
||||
"mode": self.mode,
|
||||
"error": self.error,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskResult:
|
||||
"""Résultat de l'exécution d'une tâche."""
|
||||
instruction: str
|
||||
success: bool
|
||||
total_items: int = 1 # Nombre d'éléments traités (1 si pas de boucle)
|
||||
completed_items: int = 0
|
||||
failed_items: int = 0
|
||||
results: List[Dict[str, Any]] = field(default_factory=list)
|
||||
elapsed_s: float = 0.0
|
||||
summary: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"instruction": self.instruction,
|
||||
"success": self.success,
|
||||
"total_items": self.total_items,
|
||||
"completed_items": self.completed_items,
|
||||
"failed_items": self.failed_items,
|
||||
"elapsed_s": round(self.elapsed_s, 1),
|
||||
"summary": self.summary,
|
||||
}
|
||||
|
||||
|
||||
class TaskPlanner:
|
||||
"""Planificateur MACRO — comprend les instructions et orchestre l'exécution.
|
||||
|
||||
Usage :
|
||||
planner = TaskPlanner()
|
||||
plan = planner.understand("traite les dossiers de janvier")
|
||||
result = planner.execute(plan, replay_callback=launch_replay)
|
||||
"""
|
||||
|
||||
def __init__(self, gemma4_port: str = "", domain_id: str = ""):
|
||||
self._gemma4_port = gemma4_port or os.environ.get("GEMMA4_PORT", "11435")
|
||||
self._gemma4_url = f"http://localhost:{self._gemma4_port}/api/chat"
|
||||
self._domain_id = domain_id or os.environ.get("RPA_DOMAIN", "generic")
|
||||
|
||||
# Charger le contexte métier
|
||||
try:
|
||||
from .domain_context import get_domain_context
|
||||
self._domain = get_domain_context(self._domain_id)
|
||||
except Exception:
|
||||
self._domain = None
|
||||
|
||||
def understand(
|
||||
self,
|
||||
instruction: str,
|
||||
available_workflows: Optional[List[Dict[str, Any]]] = None,
|
||||
screen_context: str = "",
|
||||
) -> TaskPlan:
|
||||
"""Comprendre une instruction en langage naturel.
|
||||
|
||||
Étape 1 : gemma4 analyse l'instruction et identifie :
|
||||
- Le type de tâche (ouvrir, traiter, rechercher, etc.)
|
||||
- Le workflow correspondant (s'il en existe un)
|
||||
- Les paramètres/variables (nom, date, fichier, etc.)
|
||||
- Si c'est une boucle (traiter TOUS les dossiers)
|
||||
|
||||
Args:
|
||||
instruction: L'ordre de l'utilisateur ("traite les dossiers de janvier")
|
||||
available_workflows: Liste des workflows connus [{name, description, session_id}]
|
||||
screen_context: Description de l'écran actuel (pour le contexte)
|
||||
"""
|
||||
import requests as _requests
|
||||
|
||||
plan = TaskPlan(instruction=instruction)
|
||||
|
||||
# Construire la liste des workflows disponibles pour le prompt (top 10)
|
||||
workflows_desc = "Aucun workflow enregistré."
|
||||
if available_workflows:
|
||||
top_workflows = available_workflows[:10]
|
||||
lines = []
|
||||
for i, wf in enumerate(top_workflows):
|
||||
name = wf.get("name", wf.get("session_id", f"workflow_{i}"))
|
||||
desc = wf.get("description", "")
|
||||
sid = wf.get("session_id", "")
|
||||
# Montrer la description métier pour aider le matching sémantique
|
||||
label = f"{name}"
|
||||
if desc:
|
||||
label += f" — {desc}"
|
||||
lines.append(f" {i+1}. {label} (id={sid})")
|
||||
workflows_desc = "\n".join(lines)
|
||||
|
||||
# Contexte métier
|
||||
domain_prompt = ""
|
||||
if self._domain and self._domain.system_prompt:
|
||||
domain_prompt = f"\nCONTEXTE MÉTIER :\n{self._domain.system_prompt}\n"
|
||||
|
||||
prompt = (
|
||||
f"Tu es le PLANIFICATEUR d'un robot RPA (Léa). "
|
||||
f"Analyse l'ordre utilisateur et identifie le workflow correspondant.\n"
|
||||
f"{domain_prompt}\n"
|
||||
f"WORKFLOWS DISPONIBLES :\n{workflows_desc}\n\n"
|
||||
f"ORDRE : \"{instruction}\"\n\n"
|
||||
f"RÈGLE DE MATCHING :\n"
|
||||
f"- Compare l'INTENTION de l'ordre avec la DESCRIPTION de chaque workflow\n"
|
||||
f"- \"Ouvre le bloc-notes\" correspond à un workflow décrit \"Ouvrir Bloc-notes via recherche\"\n"
|
||||
f"- Un workflow qui utilise la même application EST un match même si les mots diffèrent\n"
|
||||
f"- Si aucun workflow ne correspond, réponds WORKFLOW: AUCUN\n\n"
|
||||
f"Réponds EXACTEMENT dans ce format (une ligne par champ) :\n"
|
||||
f"COMPRIS: OUI\n"
|
||||
f"WORKFLOW: <numéro> (ou AUCUN)\n"
|
||||
f"CONFIANCE: <0.0 à 1.0>\n"
|
||||
f"PARAMETRES: clé1=valeur1, clé2=valeur2 (ou AUCUN)\n"
|
||||
f"BOUCLE: OUI ou NON\n"
|
||||
f"SOURCE_BOUCLE: écran, fichier, ou aucun\n"
|
||||
f"PLAN:\n"
|
||||
f"1. première étape\n"
|
||||
f"2. deuxième étape\n"
|
||||
)
|
||||
|
||||
try:
|
||||
resp = _requests.post(
|
||||
self._gemma4_url,
|
||||
json={
|
||||
"model": "gemma4:e4b",
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"think": True,
|
||||
"options": {"temperature": 0.2, "num_predict": 800},
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
|
||||
if not resp.ok:
|
||||
plan.error = f"gemma4 HTTP {resp.status_code}"
|
||||
return plan
|
||||
|
||||
content = resp.json().get("message", {}).get("content", "").strip()
|
||||
logger.info(f"TaskPlanner: réponse gemma4 ({len(content)} chars)")
|
||||
|
||||
# Parser la réponse
|
||||
plan = self._parse_understanding(plan, content, available_workflows)
|
||||
|
||||
except Exception as e:
|
||||
plan.error = f"gemma4 erreur: {e}"
|
||||
logger.warning(f"TaskPlanner: {plan.error}")
|
||||
|
||||
return plan
|
||||
|
||||
def _parse_understanding(
|
||||
self,
|
||||
plan: TaskPlan,
|
||||
content: str,
|
||||
available_workflows: Optional[List[Dict]] = None,
|
||||
) -> TaskPlan:
|
||||
"""Parser la réponse de gemma4 pour construire le plan.
|
||||
|
||||
Tolérant aux variations de format :
|
||||
- "COMPRIS : OUI" ou "COMPRIS: oui" ou "**COMPRIS:** OUI"
|
||||
- Numéros de workflow : "1", "1.", "#1", "Workflow 1"
|
||||
- Paramètres : "clé=valeur" ou "clé: valeur" sur la même ligne ou les suivantes
|
||||
"""
|
||||
import re
|
||||
|
||||
# Nettoyer le markdown (gras, italique)
|
||||
content_clean = re.sub(r'\*{1,2}([^*]+)\*{1,2}', r'\1', content)
|
||||
|
||||
in_params_section = False
|
||||
in_plan_section = False
|
||||
|
||||
for line in content_clean.split("\n"):
|
||||
line_clean = line.strip()
|
||||
if not line_clean:
|
||||
continue
|
||||
upper = line_clean.upper()
|
||||
|
||||
# --- COMPRIS ---
|
||||
if re.match(r'^COMPRIS\s*[:=]', upper):
|
||||
val = re.split(r'[:=]', upper, 1)[1].strip()
|
||||
plan.understood = "OUI" in val or "YES" in val or "TRUE" in val
|
||||
in_params_section = False
|
||||
in_plan_section = False
|
||||
|
||||
# --- WORKFLOW ---
|
||||
elif re.match(r'^WORKFLOW\s*[:=]', upper):
|
||||
val = line_clean.split(":", 1)[1].strip() if ":" in line_clean else line_clean.split("=", 1)[1].strip()
|
||||
val_upper = val.upper().strip()
|
||||
in_params_section = False
|
||||
in_plan_section = False
|
||||
if val_upper in ("AUCUN", "NONE", "NON", "N/A", "-", ""):
|
||||
continue
|
||||
# Extraire le numéro : "1", "1.", "#1", "Workflow 1", "1 (Bloc-notes)"
|
||||
num_match = re.search(r'(\d+)', val)
|
||||
if num_match and available_workflows:
|
||||
idx = int(num_match.group(1)) - 1
|
||||
if 0 <= idx < len(available_workflows):
|
||||
wf = available_workflows[idx]
|
||||
plan.workflow_match = wf.get("session_id", "")
|
||||
plan.workflow_name = wf.get("name", "")
|
||||
plan.match_confidence = 0.8
|
||||
plan.mode = "replay"
|
||||
|
||||
# --- CONFIANCE ---
|
||||
elif re.match(r'^CONFIANCE\s*[:=]', upper):
|
||||
val = re.split(r'[:=]', line_clean, 1)[1].strip()
|
||||
in_params_section = False
|
||||
in_plan_section = False
|
||||
# Extraire un float : "0.9", "0,9", "90%"
|
||||
float_match = re.search(r'(\d+[.,]\d+)', val)
|
||||
if float_match:
|
||||
try:
|
||||
plan.match_confidence = float(float_match.group(1).replace(",", "."))
|
||||
except ValueError:
|
||||
pass
|
||||
elif "%" in val:
|
||||
pct_match = re.search(r'(\d+)', val)
|
||||
if pct_match:
|
||||
plan.match_confidence = int(pct_match.group(1)) / 100.0
|
||||
|
||||
# --- PARAMETRES ---
|
||||
elif re.match(r'^PARAM[EÈ]TRES?\s*[:=]', upper):
|
||||
val = re.split(r'[:=]', line_clean, 1)[1].strip()
|
||||
in_plan_section = False
|
||||
val_upper = val.upper().strip()
|
||||
if val_upper in ("AUCUN", "NONE", "NON", "N/A", "-"):
|
||||
in_params_section = False
|
||||
continue
|
||||
# Vide = paramètres sur les lignes suivantes
|
||||
in_params_section = True
|
||||
if val and val_upper not in ("", ):
|
||||
# Paramètres sur la même ligne : "clé1=val1, clé2=val2"
|
||||
self._extract_params_from_line(val, plan)
|
||||
|
||||
# --- BOUCLE ---
|
||||
elif re.match(r'^BOUCLE\s*[:=]', upper):
|
||||
val = re.split(r'[:=]', upper, 1)[1].strip()
|
||||
plan.is_loop = "OUI" in val or "YES" in val or "TRUE" in val
|
||||
in_params_section = False
|
||||
in_plan_section = False
|
||||
|
||||
# --- SOURCE_BOUCLE ---
|
||||
elif re.match(r'^SOURCE[_ ]BOUCLE\s*[:=]', upper):
|
||||
plan.loop_source = re.split(r'[:=]', line_clean, 1)[1].strip()
|
||||
in_params_section = False
|
||||
in_plan_section = False
|
||||
|
||||
# --- PLAN ---
|
||||
elif re.match(r'^PLAN\s*[:=]?\s*$', upper) or upper == "PLAN:":
|
||||
in_plan_section = True
|
||||
in_params_section = False
|
||||
|
||||
# --- Lignes de contenu (paramètres d'abord, puis étapes) ---
|
||||
elif in_params_section and ("=" in line_clean or ": " in line_clean):
|
||||
self._extract_params_from_line(line_clean, plan)
|
||||
|
||||
elif in_plan_section and re.match(r'^(\d+[.)]\s+|- )', line_clean):
|
||||
plan.steps.append({"description": line_clean})
|
||||
|
||||
elif re.match(r'^(\d+[.)]\s+|- )', line_clean) and not in_params_section:
|
||||
# Étape numérotée en dehors d'une section explicite
|
||||
plan.steps.append({"description": line_clean})
|
||||
|
||||
# Si pas de workflow trouvé mais compris → mode libre
|
||||
if plan.understood and not plan.workflow_match:
|
||||
plan.mode = "free"
|
||||
|
||||
return plan
|
||||
|
||||
@staticmethod
|
||||
def _extract_params_from_line(text: str, plan: TaskPlan) -> None:
|
||||
"""Extraire des paramètres clé=valeur ou clé: valeur d'une ligne."""
|
||||
import re
|
||||
text = text.strip().strip("- ")
|
||||
# Ignorer les labels de section
|
||||
if re.match(r'^(COMPRIS|WORKFLOW|BOUCLE|SOURCE|PLAN|CONFIANCE)', text.upper()):
|
||||
return
|
||||
# Essayer clé=valeur d'abord
|
||||
if "=" in text:
|
||||
for part in text.split(","):
|
||||
part = part.strip()
|
||||
if "=" in part:
|
||||
k, v = part.split("=", 1)
|
||||
k, v = k.strip().strip("- "), v.strip()
|
||||
if k and v and v.upper() not in ("AUCUN", "NONE"):
|
||||
plan.parameters[k] = v
|
||||
# Sinon clé: valeur (mais pas les labels de section)
|
||||
elif ": " in text:
|
||||
k, v = text.split(": ", 1)
|
||||
k, v = k.strip().strip("- "), v.strip()
|
||||
if k and v and len(k) < 30 and v.upper() not in ("AUCUN", "NONE"):
|
||||
plan.parameters[k] = v
|
||||
|
||||
def execute(
|
||||
self,
|
||||
plan: TaskPlan,
|
||||
replay_callback=None,
|
||||
machine_id: str = "default",
|
||||
) -> TaskResult:
|
||||
"""Exécuter un plan.
|
||||
|
||||
Deux modes :
|
||||
1. "replay" : relancer un workflow enregistré avec substitution de variables
|
||||
2. "free" : exécuter les actions planifiées par gemma4
|
||||
|
||||
Args:
|
||||
plan: Le plan généré par understand()
|
||||
replay_callback: Fonction qui lance un replay
|
||||
signature: (session_id, machine_id, params) → replay_id
|
||||
machine_id: Machine cible pour l'exécution
|
||||
"""
|
||||
t_start = time.time()
|
||||
result = TaskResult(instruction=plan.instruction, success=False)
|
||||
|
||||
if not plan.understood:
|
||||
result.summary = f"Instruction non comprise : {plan.error or 'réponse gemma4 invalide'}"
|
||||
return result
|
||||
|
||||
if plan.mode == "replay" and plan.workflow_match:
|
||||
# Mode replay : relancer un workflow connu
|
||||
result = self._execute_replay(plan, replay_callback, machine_id)
|
||||
|
||||
elif plan.mode == "free" and plan.steps:
|
||||
# Mode libre : actions planifiées par gemma4
|
||||
result = self._execute_free(plan, replay_callback, machine_id)
|
||||
|
||||
else:
|
||||
result.summary = "Pas de workflow correspondant et pas d'actions planifiées"
|
||||
|
||||
result.elapsed_s = time.time() - t_start
|
||||
return result
|
||||
|
||||
def _execute_replay(
|
||||
self,
|
||||
plan: TaskPlan,
|
||||
replay_callback,
|
||||
machine_id: str,
|
||||
) -> TaskResult:
|
||||
"""Exécuter en mode replay (workflow connu)."""
|
||||
result = TaskResult(instruction=plan.instruction, success=False)
|
||||
|
||||
if not replay_callback:
|
||||
result.summary = "Pas de callback replay configuré"
|
||||
return result
|
||||
|
||||
if plan.is_loop:
|
||||
# Boucle : TODO — lister les éléments puis itérer
|
||||
# Pour l'instant, exécution simple
|
||||
logger.info(
|
||||
f"TaskPlanner: boucle détectée mais pas encore implémentée, "
|
||||
f"exécution simple du workflow {plan.workflow_name}"
|
||||
)
|
||||
|
||||
try:
|
||||
replay_id = replay_callback(
|
||||
session_id=plan.workflow_match,
|
||||
machine_id=machine_id,
|
||||
params=plan.parameters,
|
||||
)
|
||||
result.success = True
|
||||
result.completed_items = 1
|
||||
result.total_items = 1
|
||||
result.summary = (
|
||||
f"Workflow '{plan.workflow_name}' lancé (replay={replay_id})"
|
||||
f" avec paramètres {plan.parameters}" if plan.parameters else ""
|
||||
)
|
||||
result.results.append({
|
||||
"replay_id": replay_id,
|
||||
"workflow": plan.workflow_name,
|
||||
"params": plan.parameters,
|
||||
})
|
||||
except Exception as e:
|
||||
result.summary = f"Erreur lancement replay : {e}"
|
||||
logger.error(f"TaskPlanner: {result.summary}")
|
||||
|
||||
return result
|
||||
|
||||
def _execute_free(
|
||||
self,
|
||||
plan: TaskPlan,
|
||||
replay_callback,
|
||||
machine_id: str,
|
||||
) -> TaskResult:
|
||||
"""Exécuter en mode libre (actions planifiées par gemma4)."""
|
||||
result = TaskResult(instruction=plan.instruction, success=False)
|
||||
|
||||
# Convertir les étapes en actions replay
|
||||
actions = self._steps_to_actions(plan.steps, plan.parameters)
|
||||
|
||||
if not actions:
|
||||
result.summary = "Impossible de convertir le plan en actions exécutables"
|
||||
return result
|
||||
|
||||
if replay_callback:
|
||||
try:
|
||||
replay_id = replay_callback(
|
||||
actions=actions,
|
||||
machine_id=machine_id,
|
||||
task_description=plan.instruction,
|
||||
)
|
||||
result.success = True
|
||||
result.completed_items = 1
|
||||
result.summary = f"Plan libre exécuté ({len(actions)} actions, replay={replay_id})"
|
||||
except Exception as e:
|
||||
result.summary = f"Erreur exécution plan libre : {e}"
|
||||
else:
|
||||
result.summary = f"Plan prêt ({len(actions)} actions) mais pas de callback"
|
||||
result.results = actions
|
||||
|
||||
return result
|
||||
|
||||
def _steps_to_actions(
|
||||
self,
|
||||
steps: List[Dict[str, Any]],
|
||||
parameters: Dict[str, Any],
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Convertir les étapes textuelles en actions replay.
|
||||
|
||||
Utilise gemma4 pour traduire chaque étape en action structurée.
|
||||
Les types d'actions supportés : click, type, key_combo, wait.
|
||||
"""
|
||||
import re
|
||||
import requests as _requests
|
||||
|
||||
steps_text = "\n".join(
|
||||
s.get("description", str(s)) for s in steps
|
||||
)
|
||||
|
||||
prompt = (
|
||||
"Convertis ces étapes RPA en actions JSON.\n\n"
|
||||
f"ÉTAPES :\n{steps_text}\n\n"
|
||||
f"PARAMÈTRES : {json.dumps(parameters, ensure_ascii=False)}\n\n"
|
||||
"TYPES D'ACTIONS DISPONIBLES :\n"
|
||||
'- Cliquer : {"type": "click", "target_spec": {"by_text": "texte du bouton"}}\n'
|
||||
'- Taper du texte : {"type": "type", "text": "texte à taper"}\n'
|
||||
'- Raccourci clavier : {"type": "key_combo", "keys": ["ctrl", "s"]}\n'
|
||||
'- Attendre : {"type": "wait", "duration_ms": 2000}\n\n'
|
||||
"RÈGLES :\n"
|
||||
"- UNE action JSON par ligne\n"
|
||||
"- Pas de commentaires, pas de texte autour, JUSTE le JSON\n"
|
||||
"- Utilise les paramètres fournis dans les valeurs\n\n"
|
||||
"ACTIONS :\n"
|
||||
)
|
||||
|
||||
try:
|
||||
resp = _requests.post(
|
||||
self._gemma4_url,
|
||||
json={
|
||||
"model": "gemma4:e4b",
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"think": True,
|
||||
"options": {"temperature": 0.1, "num_predict": 1500},
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
|
||||
if not resp.ok:
|
||||
return []
|
||||
|
||||
content = resp.json().get("message", {}).get("content", "")
|
||||
return self._parse_actions_json(content)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"TaskPlanner: conversion étapes échouée : {e}")
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _parse_actions_json(content: str) -> List[Dict[str, Any]]:
|
||||
"""Parser des actions JSON depuis une réponse VLM.
|
||||
|
||||
Tolère :
|
||||
- Un JSON par ligne
|
||||
- Un tableau JSON [...]
|
||||
- Du texte autour des JSON (markdown, commentaires)
|
||||
- Des objets imbriqués (target_spec)
|
||||
"""
|
||||
import re
|
||||
|
||||
actions = []
|
||||
valid_types = {"click", "type", "key_combo", "wait"}
|
||||
|
||||
# Stratégie 1 : essayer de parser comme un tableau JSON
|
||||
array_match = re.search(r'\[[\s\S]*\]', content)
|
||||
if array_match:
|
||||
try:
|
||||
parsed = json.loads(array_match.group())
|
||||
if isinstance(parsed, list):
|
||||
for item in parsed:
|
||||
if isinstance(item, dict) and item.get("type") in valid_types:
|
||||
if item["type"] == "click":
|
||||
item["visual_mode"] = True
|
||||
actions.append(item)
|
||||
if actions:
|
||||
return actions
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Stratégie 2 : extraire les objets JSON individuels (supporte imbrication)
|
||||
# Trouver chaque { ... } en gérant les accolades imbriquées
|
||||
i = 0
|
||||
while i < len(content):
|
||||
if content[i] == '{':
|
||||
depth = 0
|
||||
start = i
|
||||
while i < len(content):
|
||||
if content[i] == '{':
|
||||
depth += 1
|
||||
elif content[i] == '}':
|
||||
depth -= 1
|
||||
if depth == 0:
|
||||
candidate = content[start:i+1]
|
||||
try:
|
||||
action = json.loads(candidate)
|
||||
if isinstance(action, dict) and action.get("type") in valid_types:
|
||||
if action["type"] == "click":
|
||||
action["visual_mode"] = True
|
||||
actions.append(action)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
break
|
||||
i += 1
|
||||
i += 1
|
||||
|
||||
return actions
|
||||
|
||||
def list_capabilities(
|
||||
self,
|
||||
available_workflows: List[Dict[str, Any]],
|
||||
) -> str:
|
||||
"""Lister ce que Léa sait faire (pour l'interface utilisateur)."""
|
||||
if not available_workflows:
|
||||
return "Léa n'a pas encore appris de workflows. Enregistrez-en un d'abord."
|
||||
|
||||
lines = ["Léa sait faire :"]
|
||||
for wf in available_workflows:
|
||||
name = wf.get("name", "?")
|
||||
desc = wf.get("description", "")
|
||||
lines.append(f" - {name}" + (f" ({desc})" if desc else ""))
|
||||
|
||||
lines.append("")
|
||||
lines.append("Dites-lui ce que vous voulez faire en langage naturel.")
|
||||
return "\n".join(lines)
|
||||
@@ -68,12 +68,19 @@ class TokenManager:
|
||||
logger.info(f"Loading token config. RPA_TOKEN_ADMIN present: {bool(admin_token)}")
|
||||
logger.info(f"Loading token config. RPA_TOKEN_READONLY present: {bool(readonly_token)}")
|
||||
if admin_token:
|
||||
logger.info(f"RPA_TOKEN_ADMIN value: {admin_token[:8]}...")
|
||||
logger.info("RPA_TOKEN_ADMIN configuré")
|
||||
if readonly_token:
|
||||
logger.info(f"RPA_TOKEN_READONLY value: {readonly_token[:8]}...")
|
||||
logger.info("RPA_TOKEN_READONLY configuré")
|
||||
|
||||
# Clé secrète pour signer les tokens
|
||||
self.secret_key = os.getenv("TOKEN_SECRET_KEY", "dev-token-secret-change-in-production")
|
||||
# Clé secrète pour signer les tokens — OBLIGATOIRE en production
|
||||
self.secret_key = os.getenv("TOKEN_SECRET_KEY", "")
|
||||
if not self.secret_key:
|
||||
logger.warning(
|
||||
"TOKEN_SECRET_KEY non défini — utilisation d'une clé aléatoire. "
|
||||
"Définir TOKEN_SECRET_KEY dans .env.local pour la production."
|
||||
)
|
||||
import secrets
|
||||
self.secret_key = secrets.token_hex(32)
|
||||
|
||||
# Tokens statiques pour rétrocompatibilité
|
||||
self.admin_tokens = set()
|
||||
@@ -89,11 +96,13 @@ class TokenManager:
|
||||
self.admin_tokens.add(admin_token)
|
||||
logger.info(f"Added RPA_TOKEN_ADMIN to admin_tokens")
|
||||
|
||||
# Temporary fix: Add production tokens directly
|
||||
prod_admin_token = "73cf0db73f9a5064e79afebba96c85338be65cc2060b9c1d42c3ea5dd7d4e490"
|
||||
prod_readonly_token = "7eea1de415cc69c02381ce09ff63aeebf3e1d9b476d54aa6730ba9de849e3dc6"
|
||||
# Tokens de production : lus EXCLUSIVEMENT depuis les variables d'environnement.
|
||||
# Ne JAMAIS hardcoder de tokens dans le code source.
|
||||
prod_admin_token = os.getenv("RPA_PROD_ADMIN_TOKEN", "")
|
||||
prod_readonly_token = os.getenv("RPA_PROD_READONLY_TOKEN", "")
|
||||
if prod_admin_token:
|
||||
self.admin_tokens.add(prod_admin_token)
|
||||
logger.info(f"Added hardcoded production admin token")
|
||||
logger.info("Added RPA_PROD_ADMIN_TOKEN to admin_tokens")
|
||||
|
||||
self.read_only_tokens = set()
|
||||
if os.getenv("READ_ONLY_TOKENS"):
|
||||
@@ -102,11 +111,11 @@ class TokenManager:
|
||||
# Support tokens RPA Vision V3 (Fiche #23)
|
||||
if readonly_token:
|
||||
self.read_only_tokens.add(readonly_token)
|
||||
logger.info(f"Added RPA_TOKEN_READONLY to read_only_tokens")
|
||||
logger.info("Added RPA_TOKEN_READONLY to read_only_tokens")
|
||||
|
||||
# Temporary fix: Add production tokens directly
|
||||
if prod_readonly_token:
|
||||
self.read_only_tokens.add(prod_readonly_token)
|
||||
logger.info(f"Added hardcoded production readonly token")
|
||||
logger.info("Added RPA_PROD_READONLY_TOKEN to read_only_tokens")
|
||||
|
||||
# Configuration expiration
|
||||
self.default_expiry_hours = int(os.getenv("TOKEN_EXPIRY_HOURS", "24"))
|
||||
|
||||
@@ -85,7 +85,10 @@ echo ""
|
||||
# 4. Copier le package agent_v1 (code Python)
|
||||
# ---------------------------------------------------------------
|
||||
echo "[4/7] Copie du code agent_v1..."
|
||||
# Copier tout le dossier en excluant les fichiers inutiles
|
||||
# Copier tout le dossier en excluant uniquement les artefacts de build/test.
|
||||
# IMPORTANT : ne PAS exclure les modules Python ui/ (shared_state, chat_window,
|
||||
# capture_server) — ils sont requis par main.py et causent un crash au demarrage
|
||||
# s'ils sont absents.
|
||||
rsync -a \
|
||||
--exclude='__pycache__' \
|
||||
--exclude='*.pyc' \
|
||||
@@ -94,9 +97,6 @@ rsync -a \
|
||||
--exclude='logs/*.log' \
|
||||
--exclude='.hypothesis' \
|
||||
--exclude='*.md' \
|
||||
--exclude='ui/chat_window.py' \
|
||||
--exclude='ui/shared_state.py' \
|
||||
--exclude='ui/capture_server.py' \
|
||||
"$PROJECT_ROOT/agent_v0/agent_v1/" \
|
||||
"$PACKAGE_DIR/agent_v1/"
|
||||
|
||||
@@ -132,6 +132,56 @@ echo "[6/7] Configuration des packages Python..."
|
||||
echo " Structure d'imports verifiee"
|
||||
echo ""
|
||||
|
||||
# ---------------------------------------------------------------
|
||||
# 6b. Verification des modules requis
|
||||
# ---------------------------------------------------------------
|
||||
echo "[6b/7] Verification des modules Python requis..."
|
||||
MISSING=0
|
||||
REQUIRED_FILES=(
|
||||
"agent_v1/__init__.py"
|
||||
"agent_v1/main.py"
|
||||
"agent_v1/config.py"
|
||||
"agent_v1/window_info.py"
|
||||
"agent_v1/window_info_crossplatform.py"
|
||||
"agent_v1/core/__init__.py"
|
||||
"agent_v1/core/captor.py"
|
||||
"agent_v1/core/executor.py"
|
||||
"agent_v1/network/__init__.py"
|
||||
"agent_v1/network/streamer.py"
|
||||
"agent_v1/session/__init__.py"
|
||||
"agent_v1/session/storage.py"
|
||||
"agent_v1/ui/__init__.py"
|
||||
"agent_v1/ui/shared_state.py"
|
||||
"agent_v1/ui/smart_tray.py"
|
||||
"agent_v1/ui/chat_window.py"
|
||||
"agent_v1/ui/capture_server.py"
|
||||
"agent_v1/ui/notifications.py"
|
||||
"agent_v1/vision/__init__.py"
|
||||
"agent_v1/vision/capturer.py"
|
||||
"agent_v1/vision/blur_sensitive.py"
|
||||
"agent_v1/vision/system_info.py"
|
||||
"agent_v1/monitoring/__init__.py"
|
||||
"lea_ui/__init__.py"
|
||||
"lea_ui/server_client.py"
|
||||
"run_agent_v1.py"
|
||||
)
|
||||
|
||||
for req_file in "${REQUIRED_FILES[@]}"; do
|
||||
if [[ ! -f "$PACKAGE_DIR/$req_file" ]]; then
|
||||
echo -e " ${RED}MANQUANT : $req_file${NC}"
|
||||
MISSING=$((MISSING + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $MISSING -gt 0 ]]; then
|
||||
echo ""
|
||||
echo -e "${RED} ERREUR : $MISSING fichier(s) requis manquant(s) !${NC}"
|
||||
echo -e "${RED} Le package est INCOMPLET — corrigez build_package.sh avant de deployer.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e " ${GREEN}Tous les ${#REQUIRED_FILES[@]} fichiers requis sont presents.${NC}"
|
||||
echo ""
|
||||
|
||||
# ---------------------------------------------------------------
|
||||
# 7. Creer le zip
|
||||
# ---------------------------------------------------------------
|
||||
|
||||
@@ -26,6 +26,7 @@ markers =
|
||||
fiche8: Tests Fiche #8 (anti-bugs terrain)
|
||||
fiche9: Tests Fiche #9 (postconditions retry backoff)
|
||||
fiche10: Tests Fiche #10 (precision metrics engine)
|
||||
visual: Tests visuels sur captures réelles (nécessite serveur GPU)
|
||||
|
||||
# Note: Chemins Python gérés par tests/conftest.py
|
||||
|
||||
|
||||
683
tests/unit/test_audit_trail.py
Normal file
683
tests/unit/test_audit_trail.py
Normal file
@@ -0,0 +1,683 @@
|
||||
# tests/unit/test_audit_trail.py
|
||||
"""
|
||||
Tests unitaires du module Audit Trail.
|
||||
|
||||
Vérifie l'enregistrement, la recherche, l'export CSV et le résumé
|
||||
journalier des entrées d'audit.
|
||||
"""
|
||||
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import date, datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
# Importer depuis le bon chemin (agent_v0/server_v1/)
|
||||
import sys
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
|
||||
|
||||
from agent_v0.server_v1.audit_trail import AuditEntry, AuditTrail
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
@pytest.fixture
|
||||
def audit_dir(tmp_path):
|
||||
"""Répertoire temporaire pour les fichiers d'audit."""
|
||||
d = tmp_path / "audit"
|
||||
d.mkdir()
|
||||
return str(d)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def audit(audit_dir):
|
||||
"""Instance AuditTrail avec répertoire temporaire."""
|
||||
return AuditTrail(audit_dir=audit_dir)
|
||||
|
||||
|
||||
def _make_entry(**kwargs) -> AuditEntry:
|
||||
"""Créer une entrée d'audit avec des valeurs par défaut."""
|
||||
defaults = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"session_id": "sess_test_001",
|
||||
"action_id": "act_001",
|
||||
"user_id": "tim_dupont",
|
||||
"user_name": "Marie Dupont",
|
||||
"machine_id": "PC-TIM-01",
|
||||
"action_type": "click",
|
||||
"action_detail": "Clic sur 'Enregistrer' dans DxCare",
|
||||
"target_app": "DxCare",
|
||||
"execution_mode": "assisted",
|
||||
"result": "success",
|
||||
"resolution_method": "som_text_match",
|
||||
"critic_result": "semantic_ok",
|
||||
"recovery_action": "",
|
||||
"domain": "tim_codage",
|
||||
"workflow_id": "wf_codage_cim10",
|
||||
"workflow_name": "Codage CIM-10 séjour",
|
||||
"duration_ms": 234.5,
|
||||
}
|
||||
defaults.update(kwargs)
|
||||
return AuditEntry(**defaults)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditEntry
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditEntry:
|
||||
"""Tests de la structure AuditEntry."""
|
||||
|
||||
def test_creation_basique(self):
|
||||
"""Créer une entrée avec tous les champs."""
|
||||
entry = _make_entry()
|
||||
assert entry.user_id == "tim_dupont"
|
||||
assert entry.action_type == "click"
|
||||
assert entry.result == "success"
|
||||
assert entry.duration_ms == 234.5
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Sérialiser en dictionnaire."""
|
||||
entry = _make_entry()
|
||||
d = entry.to_dict()
|
||||
assert isinstance(d, dict)
|
||||
assert d["user_id"] == "tim_dupont"
|
||||
assert d["domain"] == "tim_codage"
|
||||
assert d["duration_ms"] == 234.5
|
||||
|
||||
def test_from_dict(self):
|
||||
"""Désérialiser depuis un dictionnaire."""
|
||||
entry = _make_entry()
|
||||
d = entry.to_dict()
|
||||
restored = AuditEntry.from_dict(d)
|
||||
assert restored.user_id == entry.user_id
|
||||
assert restored.action_detail == entry.action_detail
|
||||
assert restored.duration_ms == entry.duration_ms
|
||||
|
||||
def test_from_dict_ignore_unknown_keys(self):
|
||||
"""Les clés inconnues sont ignorées (compatibilité future)."""
|
||||
d = {"user_id": "test", "unknown_field": "valeur", "future_key": 42}
|
||||
entry = AuditEntry.from_dict(d)
|
||||
assert entry.user_id == "test"
|
||||
# Les champs inconnus ne lèvent pas d'erreur
|
||||
|
||||
def test_to_dict_json_serializable(self):
|
||||
"""Le dictionnaire est sérialisable en JSON."""
|
||||
entry = _make_entry(action_detail="Clic sur 'Validé' — accent français")
|
||||
d = entry.to_dict()
|
||||
json_str = json.dumps(d, ensure_ascii=False)
|
||||
assert "accent français" in json_str
|
||||
|
||||
def test_default_values(self):
|
||||
"""Une entrée vide a des valeurs par défaut cohérentes."""
|
||||
entry = AuditEntry()
|
||||
assert entry.timestamp == ""
|
||||
assert entry.user_id == ""
|
||||
assert entry.duration_ms == 0.0
|
||||
assert entry.result == ""
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — enregistrement et lecture
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailRecord:
|
||||
"""Tests d'enregistrement des entrées."""
|
||||
|
||||
def test_record_and_reload(self, audit, audit_dir):
|
||||
"""Enregistrer une entrée puis la relire depuis le fichier."""
|
||||
entry = _make_entry()
|
||||
audit.record(entry)
|
||||
|
||||
# Vérifier que le fichier existe
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
assert filepath.exists()
|
||||
|
||||
# Lire le fichier directement
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) == 1
|
||||
|
||||
data = json.loads(lines[0])
|
||||
assert data["user_id"] == "tim_dupont"
|
||||
assert data["action_detail"] == "Clic sur 'Enregistrer' dans DxCare"
|
||||
|
||||
def test_record_multiple_entries(self, audit, audit_dir):
|
||||
"""Enregistrer plusieurs entrées dans le même fichier."""
|
||||
for i in range(5):
|
||||
entry = _make_entry(action_id=f"act_{i:03d}")
|
||||
audit.record(entry)
|
||||
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) == 5
|
||||
|
||||
def test_record_auto_timestamp(self, audit):
|
||||
"""Le timestamp est généré automatiquement si absent."""
|
||||
entry = _make_entry(timestamp="")
|
||||
audit.record(entry)
|
||||
|
||||
# Le timestamp doit avoir été rempli
|
||||
entries = audit.query()
|
||||
assert len(entries) == 1
|
||||
assert entries[0]["timestamp"] != ""
|
||||
# Vérifier le format ISO 8601
|
||||
datetime.fromisoformat(entries[0]["timestamp"])
|
||||
|
||||
def test_record_utf8_french(self, audit):
|
||||
"""Les caractères français sont correctement enregistrés."""
|
||||
entry = _make_entry(
|
||||
action_detail="Saisie du diagnostic 'Hépatite à cytomégalovirus' — CIM-10: B25.1",
|
||||
user_name="François Müller",
|
||||
workflow_name="Codage séjour réanimation néonatale",
|
||||
)
|
||||
audit.record(entry)
|
||||
|
||||
entries = audit.query()
|
||||
assert len(entries) == 1
|
||||
assert "Hépatite" in entries[0]["action_detail"]
|
||||
assert "François Müller" in entries[0]["user_name"]
|
||||
assert "néonatale" in entries[0]["workflow_name"]
|
||||
|
||||
def test_record_creates_directory(self, tmp_path):
|
||||
"""Le répertoire est créé automatiquement s'il n'existe pas."""
|
||||
new_dir = str(tmp_path / "sub" / "deep" / "audit")
|
||||
audit = AuditTrail(audit_dir=new_dir)
|
||||
entry = _make_entry()
|
||||
audit.record(entry)
|
||||
|
||||
assert Path(new_dir).exists()
|
||||
entries = audit.query()
|
||||
assert len(entries) == 1
|
||||
|
||||
def test_record_different_dates(self, audit, audit_dir):
|
||||
"""Les entrées de dates différentes vont dans des fichiers différents."""
|
||||
today = date.today()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
entry_today = _make_entry(timestamp=datetime.now().isoformat())
|
||||
entry_yesterday = _make_entry(
|
||||
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||
action_id="act_yesterday",
|
||||
)
|
||||
|
||||
audit.record(entry_today)
|
||||
audit.record(entry_yesterday)
|
||||
|
||||
# Vérifier les fichiers
|
||||
file_today = Path(audit_dir) / f"audit_{today.isoformat()}.jsonl"
|
||||
file_yesterday = Path(audit_dir) / f"audit_{yesterday.isoformat()}.jsonl"
|
||||
assert file_today.exists()
|
||||
assert file_yesterday.exists()
|
||||
|
||||
def test_jsonl_format(self, audit, audit_dir):
|
||||
"""Chaque ligne du fichier est un JSON valide (format JSONL)."""
|
||||
for i in range(3):
|
||||
audit.record(_make_entry(action_id=f"act_{i}"))
|
||||
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
for line_num, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
assert line, f"Ligne {line_num} vide"
|
||||
data = json.loads(line) # Ne doit pas lever d'exception
|
||||
assert "action_id" in data
|
||||
assert "timestamp" in data
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — requêtes avec filtres
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailQuery:
|
||||
"""Tests de recherche et filtrage."""
|
||||
|
||||
def _seed_entries(self, audit):
|
||||
"""Insérer des entrées de test variées."""
|
||||
entries = [
|
||||
_make_entry(
|
||||
action_id="act_001",
|
||||
user_id="tim_dupont",
|
||||
result="success",
|
||||
action_type="click",
|
||||
workflow_id="wf_01",
|
||||
domain="tim_codage",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_002",
|
||||
user_id="tim_dupont",
|
||||
result="failed",
|
||||
action_type="type",
|
||||
workflow_id="wf_01",
|
||||
domain="generic",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_003",
|
||||
user_id="tim_martin",
|
||||
user_name="Jean Martin",
|
||||
result="success",
|
||||
action_type="click",
|
||||
workflow_id="wf_02",
|
||||
domain="generic",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_004",
|
||||
user_id="tim_martin",
|
||||
user_name="Jean Martin",
|
||||
result="recovered",
|
||||
action_type="key_combo",
|
||||
workflow_id="wf_02",
|
||||
domain="generic",
|
||||
),
|
||||
_make_entry(
|
||||
action_id="act_005",
|
||||
user_id="tim_dupont",
|
||||
result="success",
|
||||
action_type="click",
|
||||
workflow_id="wf_01",
|
||||
domain="generic",
|
||||
),
|
||||
]
|
||||
for e in entries:
|
||||
audit.record(e)
|
||||
|
||||
def test_query_all(self, audit):
|
||||
"""Requête sans filtre retourne tout."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query()
|
||||
assert len(results) == 5
|
||||
|
||||
def test_query_by_user(self, audit):
|
||||
"""Filtrer par identifiant utilisateur."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(user_id="tim_dupont")
|
||||
assert len(results) == 3
|
||||
assert all(r["user_id"] == "tim_dupont" for r in results)
|
||||
|
||||
def test_query_by_result(self, audit):
|
||||
"""Filtrer par résultat."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(result="success")
|
||||
assert len(results) == 3
|
||||
assert all(r["result"] == "success" for r in results)
|
||||
|
||||
def test_query_by_action_type(self, audit):
|
||||
"""Filtrer par type d'action."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(action_type="click")
|
||||
assert len(results) == 3
|
||||
|
||||
def test_query_by_workflow(self, audit):
|
||||
"""Filtrer par workflow."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(workflow_id="wf_02")
|
||||
assert len(results) == 2
|
||||
|
||||
def test_query_by_domain(self, audit):
|
||||
"""Filtrer par domaine métier."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(domain="tim_codage")
|
||||
assert len(results) == 1
|
||||
assert results[0]["action_id"] == "act_001"
|
||||
|
||||
def test_query_by_session(self, audit):
|
||||
"""Filtrer par session."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(session_id="sess_test_001")
|
||||
assert len(results) == 5 # Toutes les entrées ont la même session
|
||||
|
||||
def test_query_combined_filters(self, audit):
|
||||
"""Combinaison de plusieurs filtres (AND)."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(user_id="tim_dupont", result="success")
|
||||
assert len(results) == 2
|
||||
|
||||
def test_query_no_match(self, audit):
|
||||
"""Filtre sans correspondance retourne une liste vide."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(user_id="tim_inexistant")
|
||||
assert len(results) == 0
|
||||
|
||||
def test_query_pagination_limit(self, audit):
|
||||
"""Limiter le nombre de résultats."""
|
||||
self._seed_entries(audit)
|
||||
results = audit.query(limit=2)
|
||||
assert len(results) == 2
|
||||
|
||||
def test_query_pagination_offset(self, audit):
|
||||
"""Décalage dans les résultats."""
|
||||
self._seed_entries(audit)
|
||||
all_results = audit.query()
|
||||
offset_results = audit.query(offset=3)
|
||||
assert len(offset_results) == 2
|
||||
assert offset_results[0] == all_results[3]
|
||||
|
||||
def test_query_sorted_by_timestamp_desc(self, audit):
|
||||
"""Les résultats sont triés par timestamp décroissant."""
|
||||
now = datetime.now()
|
||||
for i in range(5):
|
||||
ts = (now - timedelta(minutes=i)).isoformat()
|
||||
audit.record(_make_entry(
|
||||
timestamp=ts,
|
||||
action_id=f"act_{i}",
|
||||
))
|
||||
|
||||
results = audit.query()
|
||||
timestamps = [r["timestamp"] for r in results]
|
||||
assert timestamps == sorted(timestamps, reverse=True)
|
||||
|
||||
def test_query_date_range(self, audit):
|
||||
"""Filtrer par plage de dates."""
|
||||
today = date.today()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
# Entrée d'hier
|
||||
audit.record(_make_entry(
|
||||
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||
action_id="act_yesterday",
|
||||
))
|
||||
# Entrée d'aujourd'hui
|
||||
audit.record(_make_entry(
|
||||
timestamp=datetime.now().isoformat(),
|
||||
action_id="act_today",
|
||||
))
|
||||
|
||||
# Filtrer uniquement hier
|
||||
results = audit.query(
|
||||
date_from=yesterday.isoformat(),
|
||||
date_to=yesterday.isoformat(),
|
||||
)
|
||||
assert len(results) == 1
|
||||
assert results[0]["action_id"] == "act_yesterday"
|
||||
|
||||
# Filtrer les deux jours
|
||||
results = audit.query(
|
||||
date_from=yesterday.isoformat(),
|
||||
date_to=today.isoformat(),
|
||||
)
|
||||
assert len(results) == 2
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — résumé journalier
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailSummary:
|
||||
"""Tests du résumé journalier."""
|
||||
|
||||
def test_summary_empty(self, audit):
|
||||
"""Résumé d'un jour sans données."""
|
||||
summary = audit.get_summary("2025-01-01")
|
||||
assert summary["total_actions"] == 0
|
||||
assert summary["success_rate"] == 0.0
|
||||
assert summary["by_user"] == {}
|
||||
|
||||
def test_summary_basic(self, audit):
|
||||
"""Résumé avec quelques entrées."""
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="failed"))
|
||||
audit.record(_make_entry(user_id="tim_martin", user_name="Jean Martin", result="success"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["total_actions"] == 3
|
||||
assert summary["success_rate"] == round(2 / 3, 3)
|
||||
|
||||
def test_summary_by_user(self, audit):
|
||||
"""Répartition par utilisateur."""
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||
audit.record(_make_entry(user_id="tim_dupont", result="failed"))
|
||||
audit.record(_make_entry(user_id="tim_martin", user_name="Jean Martin", result="success"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert "tim_dupont" in summary["by_user"]
|
||||
assert summary["by_user"]["tim_dupont"]["total"] == 3
|
||||
assert summary["by_user"]["tim_dupont"]["success"] == 2
|
||||
assert summary["by_user"]["tim_dupont"]["success_rate"] == round(2 / 3, 3)
|
||||
assert summary["by_user"]["tim_martin"]["total"] == 1
|
||||
assert summary["by_user"]["tim_martin"]["success_rate"] == 1.0
|
||||
|
||||
def test_summary_by_result(self, audit):
|
||||
"""Répartition par résultat."""
|
||||
audit.record(_make_entry(result="success"))
|
||||
audit.record(_make_entry(result="success"))
|
||||
audit.record(_make_entry(result="failed"))
|
||||
audit.record(_make_entry(result="recovered"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_result"]["success"] == 2
|
||||
assert summary["by_result"]["failed"] == 1
|
||||
assert summary["by_result"]["recovered"] == 1
|
||||
|
||||
def test_summary_by_action_type(self, audit):
|
||||
"""Répartition par type d'action."""
|
||||
audit.record(_make_entry(action_type="click"))
|
||||
audit.record(_make_entry(action_type="click"))
|
||||
audit.record(_make_entry(action_type="type"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_action_type"]["click"] == 2
|
||||
assert summary["by_action_type"]["type"] == 1
|
||||
|
||||
def test_summary_by_workflow(self, audit):
|
||||
"""Répartition par workflow."""
|
||||
audit.record(_make_entry(workflow_id="wf_01"))
|
||||
audit.record(_make_entry(workflow_id="wf_01"))
|
||||
audit.record(_make_entry(workflow_id="wf_02"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_workflow"]["wf_01"] == 2
|
||||
assert summary["by_workflow"]["wf_02"] == 1
|
||||
|
||||
def test_summary_by_execution_mode(self, audit):
|
||||
"""Répartition par mode d'exécution."""
|
||||
audit.record(_make_entry(execution_mode="autonomous"))
|
||||
audit.record(_make_entry(execution_mode="assisted"))
|
||||
audit.record(_make_entry(execution_mode="assisted"))
|
||||
|
||||
summary = audit.get_summary()
|
||||
assert summary["by_execution_mode"]["autonomous"] == 1
|
||||
assert summary["by_execution_mode"]["assisted"] == 2
|
||||
|
||||
def test_summary_date_field(self, audit):
|
||||
"""Le résumé contient la date demandée."""
|
||||
today = date.today().isoformat()
|
||||
summary = audit.get_summary(today)
|
||||
assert summary["date"] == today
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests AuditTrail — export CSV
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailExportCSV:
|
||||
"""Tests de l'export CSV."""
|
||||
|
||||
def test_export_csv_empty(self, audit):
|
||||
"""Export sans données retourne une chaîne vide."""
|
||||
csv_data = audit.export_csv(date_from="2025-01-01")
|
||||
assert csv_data == ""
|
||||
|
||||
def test_export_csv_basic(self, audit):
|
||||
"""Export CSV avec quelques entrées."""
|
||||
audit.record(_make_entry(action_id="act_001"))
|
||||
audit.record(_make_entry(action_id="act_002"))
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
assert csv_data
|
||||
assert "act_001" in csv_data
|
||||
assert "act_002" in csv_data
|
||||
|
||||
def test_export_csv_header(self, audit):
|
||||
"""L'en-tête CSV contient tous les champs du dataclass."""
|
||||
audit.record(_make_entry())
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
fieldnames = reader.fieldnames
|
||||
assert "timestamp" in fieldnames
|
||||
assert "user_id" in fieldnames
|
||||
assert "action_detail" in fieldnames
|
||||
assert "domain" in fieldnames
|
||||
assert "duration_ms" in fieldnames
|
||||
|
||||
def test_export_csv_parseable(self, audit):
|
||||
"""Le CSV produit est parseable par le module csv."""
|
||||
for i in range(5):
|
||||
audit.record(_make_entry(
|
||||
action_id=f"act_{i}",
|
||||
action_detail=f"Action {i} — avec des 'guillemets' et des, virgules",
|
||||
))
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
rows = list(reader)
|
||||
assert len(rows) == 5
|
||||
|
||||
# Vérifier que les valeurs sont correctes malgré les caractères spéciaux
|
||||
for row in rows:
|
||||
assert "virgules" in row["action_detail"]
|
||||
|
||||
def test_export_csv_filter_by_user(self, audit):
|
||||
"""Export filtré par utilisateur."""
|
||||
audit.record(_make_entry(user_id="tim_dupont", action_id="act_001"))
|
||||
audit.record(_make_entry(user_id="tim_martin", action_id="act_002"))
|
||||
|
||||
csv_data = audit.export_csv(user_id="tim_dupont")
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
rows = list(reader)
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["user_id"] == "tim_dupont"
|
||||
|
||||
def test_export_csv_utf8(self, audit):
|
||||
"""L'export CSV gère correctement l'UTF-8 français."""
|
||||
audit.record(_make_entry(
|
||||
action_detail="Saisie 'Hépatite à cytomégalovirus' — réanimation néonatale",
|
||||
user_name="François Müller",
|
||||
))
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
assert "Hépatite" in csv_data
|
||||
assert "François Müller" in csv_data
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests de robustesse
|
||||
# =========================================================================
|
||||
|
||||
class TestAuditTrailRobustness:
|
||||
"""Tests de robustesse et cas limites."""
|
||||
|
||||
def test_directory_auto_creation(self, tmp_path):
|
||||
"""Le répertoire est créé automatiquement s'il n'existe pas."""
|
||||
audit_dir = str(tmp_path / "nonexistent" / "deep" / "audit")
|
||||
assert not Path(audit_dir).exists()
|
||||
|
||||
audit = AuditTrail(audit_dir=audit_dir)
|
||||
assert Path(audit_dir).exists()
|
||||
|
||||
def test_corrupted_jsonl_line(self, audit, audit_dir):
|
||||
"""Une ligne corrompue dans le fichier JSONL ne fait pas crasher la lecture."""
|
||||
# Écrire des entrées normales
|
||||
audit.record(_make_entry(action_id="act_001"))
|
||||
audit.record(_make_entry(action_id="act_002"))
|
||||
|
||||
# Injecter une ligne corrompue
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
with open(filepath, "a", encoding="utf-8") as f:
|
||||
f.write("{invalid json line\n")
|
||||
|
||||
# Ajouter encore une entrée valide
|
||||
audit.record(_make_entry(action_id="act_003"))
|
||||
|
||||
# La lecture doit fonctionner et ignorer la ligne corrompue
|
||||
entries = audit.query()
|
||||
assert len(entries) == 3 # 2 valides avant + 1 valide après
|
||||
|
||||
def test_empty_file(self, audit, audit_dir):
|
||||
"""Un fichier vide ne fait pas crasher."""
|
||||
today = date.today().isoformat()
|
||||
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||
filepath.touch() # Fichier vide
|
||||
|
||||
entries = audit.query()
|
||||
assert len(entries) == 0
|
||||
|
||||
def test_concurrent_writes(self, audit):
|
||||
"""Écritures concurrentes grâce au verrou threading."""
|
||||
import threading
|
||||
|
||||
errors = []
|
||||
|
||||
def write_entries(start):
|
||||
try:
|
||||
for i in range(20):
|
||||
audit.record(_make_entry(action_id=f"act_{start}_{i}"))
|
||||
except Exception as e:
|
||||
errors.append(str(e))
|
||||
|
||||
threads = [
|
||||
threading.Thread(target=write_entries, args=(t,))
|
||||
for t in range(5)
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
assert not errors, f"Erreurs concurrentes: {errors}"
|
||||
entries = audit.query(limit=200)
|
||||
assert len(entries) == 100 # 5 threads x 20 entrées
|
||||
|
||||
def test_query_invalid_date(self, audit):
|
||||
"""Dates invalides ne font pas crasher."""
|
||||
# Ne doit pas lever d'exception
|
||||
results = audit.query(date_from="not-a-date")
|
||||
assert isinstance(results, list)
|
||||
|
||||
def test_summary_invalid_date(self, audit):
|
||||
"""Date invalide dans get_summary ne fait pas crasher."""
|
||||
summary = audit.get_summary("not-a-date")
|
||||
assert summary["total_actions"] == 0
|
||||
|
||||
def test_entry_all_fields_present_in_export(self, audit):
|
||||
"""Tous les champs du dataclass sont présents dans l'export CSV."""
|
||||
from dataclasses import fields as dc_fields
|
||||
entry = _make_entry()
|
||||
audit.record(entry)
|
||||
|
||||
csv_data = audit.export_csv()
|
||||
reader = csv.DictReader(io.StringIO(csv_data))
|
||||
row = next(reader)
|
||||
|
||||
expected_fields = {f.name for f in dc_fields(AuditEntry)}
|
||||
actual_fields = set(row.keys())
|
||||
assert expected_fields == actual_fields
|
||||
|
||||
def test_date_range_reversed(self, audit):
|
||||
"""Plage de dates inversée (date_to < date_from) fonctionne quand même."""
|
||||
today = date.today()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
audit.record(_make_entry(
|
||||
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||
))
|
||||
|
||||
# date_from > date_to → doit quand même fonctionner
|
||||
results = audit.query(
|
||||
date_from=today.isoformat(),
|
||||
date_to=yesterday.isoformat(),
|
||||
)
|
||||
# L'implémentation inverse automatiquement les dates
|
||||
assert isinstance(results, list)
|
||||
530
tests/unit/test_policy_grounding_recovery_learning.py
Normal file
530
tests/unit/test_policy_grounding_recovery_learning.py
Normal file
@@ -0,0 +1,530 @@
|
||||
"""
|
||||
Tests fonctionnels pour P2 (Policy/Grounding), P3 (Recovery), P4 (Learning).
|
||||
|
||||
Vérifie que chaque module fait bien son travail :
|
||||
- Grounding : localise ou retourne NOT_FOUND (pas de décision)
|
||||
- Policy : décide RETRY/SKIP/ABORT/SUPERVISE (pas de localisation)
|
||||
- Recovery : exécute Ctrl+Z / Escape / Alt+F4 selon le contexte
|
||||
- Learning : enregistre et requête les résultats structurés
|
||||
"""
|
||||
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch, PropertyMock
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P2 : Grounding — localisation pure
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestGroundingEngine:
|
||||
|
||||
def _make_engine(self):
|
||||
from agent_v0.agent_v1.core.grounding import GroundingEngine
|
||||
executor = MagicMock()
|
||||
executor._capture_screenshot_b64.return_value = "fake_b64_data"
|
||||
return GroundingEngine(executor), executor
|
||||
|
||||
def test_server_found_retourne_coordonnees(self):
|
||||
"""Si le serveur trouve l'élément, retourne ses coordonnées."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._server_resolve_target.return_value = {
|
||||
"resolved": True, "x_pct": 0.5, "y_pct": 0.3,
|
||||
"method": "som_text", "score": 0.95,
|
||||
"matched_element": {"label": "Enregistrer"},
|
||||
}
|
||||
result = engine.locate("http://server", {"by_text": "Enregistrer"}, 0.5, 0.3, 1920, 1080)
|
||||
assert result.found is True
|
||||
assert result.x_pct == 0.5
|
||||
assert result.y_pct == 0.3
|
||||
assert result.method == "som_text"
|
||||
|
||||
def test_server_not_found_cascade_template(self):
|
||||
"""Si serveur échoue, cascade vers template matching."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._server_resolve_target.return_value = None
|
||||
executor._template_match_anchor.return_value = {
|
||||
"resolved": True, "x_pct": 0.4, "y_pct": 0.6,
|
||||
"score": 0.85,
|
||||
}
|
||||
result = engine.locate(
|
||||
"http://server",
|
||||
{"by_text": "OK", "anchor_image_base64": "abc123"},
|
||||
0.5, 0.3, 1920, 1080,
|
||||
)
|
||||
assert result.found is True
|
||||
assert result.method == "anchor_template"
|
||||
|
||||
def test_toutes_strategies_echouent_retourne_not_found(self):
|
||||
"""Si toutes les stratégies échouent, retourne NOT_FOUND."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._server_resolve_target.return_value = None
|
||||
executor._template_match_anchor.return_value = None
|
||||
executor._hybrid_vlm_resolve.return_value = None
|
||||
result = engine.locate(
|
||||
"http://server",
|
||||
{"by_text": "Inexistant", "anchor_image_base64": "abc", "vlm_description": "bouton"},
|
||||
0.5, 0.3, 1920, 1080,
|
||||
)
|
||||
assert result.found is False
|
||||
assert "échoué" in result.detail
|
||||
|
||||
def test_screenshot_echoue_retourne_not_found(self):
|
||||
"""Si la capture screenshot échoue, NOT_FOUND immédiat."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._capture_screenshot_b64.return_value = None
|
||||
result = engine.locate("http://server", {"by_text": "OK"}, 0.5, 0.3, 1920, 1080)
|
||||
assert result.found is False
|
||||
assert "screenshot" in result.detail.lower()
|
||||
|
||||
def test_strategies_custom(self):
|
||||
"""On peut spécifier les stratégies à utiliser."""
|
||||
engine, executor = self._make_engine()
|
||||
executor._template_match_anchor.return_value = {
|
||||
"resolved": True, "x_pct": 0.2, "y_pct": 0.8, "score": 0.9,
|
||||
}
|
||||
# Seulement template, pas de serveur
|
||||
result = engine.locate(
|
||||
"", {"anchor_image_base64": "abc"}, 0.5, 0.3, 1920, 1080,
|
||||
strategies=["template"],
|
||||
)
|
||||
assert result.found is True
|
||||
# Le serveur n'a PAS été appelé
|
||||
executor._server_resolve_target.assert_not_called()
|
||||
|
||||
def test_grounding_result_to_dict(self):
|
||||
"""Le GroundingResult se sérialise correctement."""
|
||||
from agent_v0.agent_v1.core.grounding import GroundingResult
|
||||
r = GroundingResult(found=True, x_pct=0.5, y_pct=0.3, method="som", score=0.9)
|
||||
d = r.to_dict()
|
||||
assert d["found"] is True
|
||||
assert d["x_pct"] == 0.5
|
||||
assert d["method"] == "som"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P2 : Policy — décisions quand grounding échoue
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestPolicyEngine:
|
||||
|
||||
def _make_engine(self):
|
||||
from agent_v0.agent_v1.core.policy import PolicyEngine
|
||||
executor = MagicMock()
|
||||
return PolicyEngine(executor), executor
|
||||
|
||||
def test_premier_essai_popup_fermee_retry(self):
|
||||
"""Premier échec + popup fermée → RETRY."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._handle_popup_vlm.return_value = True # Popup fermée
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "OK"},
|
||||
retry_count=0,
|
||||
)
|
||||
assert decision.decision == Decision.RETRY
|
||||
assert "popup" in decision.reason.lower()
|
||||
|
||||
def test_premier_essai_pas_de_popup_retry(self):
|
||||
"""Premier échec + pas de popup → RETRY quand même (max_retries > 0)."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._handle_popup_vlm.return_value = False
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "OK"},
|
||||
retry_count=0,
|
||||
max_retries=2,
|
||||
)
|
||||
assert decision.decision == Decision.RETRY
|
||||
|
||||
def test_max_retries_acteur_passer_skip(self):
|
||||
"""Max retries atteint + acteur dit PASSER → SKIP."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._actor_decide.return_value = "PASSER"
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "Onglet"},
|
||||
retry_count=1,
|
||||
max_retries=1,
|
||||
)
|
||||
assert decision.decision == Decision.SKIP
|
||||
|
||||
def test_max_retries_acteur_stopper_abort(self):
|
||||
"""Max retries atteint + acteur dit STOPPER → ABORT."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._actor_decide.return_value = "STOPPER"
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "X"},
|
||||
retry_count=1,
|
||||
max_retries=1,
|
||||
)
|
||||
assert decision.decision == Decision.ABORT
|
||||
|
||||
def test_max_retries_acteur_executer_supervise(self):
|
||||
"""Max retries + acteur dit EXECUTER → SUPERVISE (rendre la main)."""
|
||||
from agent_v0.agent_v1.core.policy import Decision
|
||||
engine, executor = self._make_engine()
|
||||
executor._actor_decide.return_value = "EXECUTER"
|
||||
|
||||
decision = engine.decide(
|
||||
action={"type": "click"},
|
||||
target_spec={"by_text": "X"},
|
||||
retry_count=1,
|
||||
max_retries=1,
|
||||
)
|
||||
assert decision.decision == Decision.SUPERVISE
|
||||
|
||||
def test_policy_decision_to_dict(self):
|
||||
"""PolicyDecision se sérialise correctement."""
|
||||
from agent_v0.agent_v1.core.policy import PolicyDecision, Decision
|
||||
d = PolicyDecision(decision=Decision.SKIP, reason="État atteint").to_dict()
|
||||
assert d["decision"] == "skip"
|
||||
assert d["reason"] == "État atteint"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P3 : Recovery — rollback après échec
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestRecoveryEngine:
|
||||
|
||||
def _make_engine(self):
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryEngine
|
||||
executor = MagicMock()
|
||||
executor.keyboard = MagicMock()
|
||||
executor.sct = MagicMock()
|
||||
executor.sct.monitors = [{}, {"width": 1920, "height": 1080}]
|
||||
executor._click = MagicMock()
|
||||
return RecoveryEngine(executor), executor
|
||||
|
||||
def test_popup_detectee_escape(self):
|
||||
"""Critic dit "popup" → Recovery fait Escape."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "click"},
|
||||
critic_detail="Une popup d'erreur est apparue",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.ESCAPE
|
||||
assert result.success is True
|
||||
# Vérifie que Escape a été pressé
|
||||
executor.keyboard.press.assert_called()
|
||||
|
||||
def test_frappe_incorrecte_undo(self):
|
||||
"""Frappe incorrecte → Recovery fait Ctrl+Z."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "type"},
|
||||
critic_detail="Le texte a été tapé au mauvais endroit",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.UNDO
|
||||
assert result.success is True
|
||||
|
||||
def test_mauvaise_fenetre_close(self):
|
||||
"""Mauvaise fenêtre → Recovery fait Alt+F4."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "click"},
|
||||
critic_detail="Mauvaise fenêtre ouverte au lieu du bloc-notes",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.CLOSE_WINDOW
|
||||
assert result.success is True
|
||||
|
||||
def test_menu_ouvert_escape(self):
|
||||
"""Menu déroulant ouvert → Recovery fait Escape."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "click"},
|
||||
critic_detail="Un menu déroulant s'est ouvert",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.ESCAPE
|
||||
assert result.success is True
|
||||
|
||||
def test_aucune_strategie_applicable(self):
|
||||
"""Pas de pattern reconnu → NONE."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||
engine, executor = self._make_engine()
|
||||
result = engine.attempt(
|
||||
failed_action={"type": "wait"},
|
||||
critic_detail="Quelque chose d'inattendu",
|
||||
)
|
||||
assert result.action_taken == RecoveryAction.NONE
|
||||
assert result.success is False
|
||||
|
||||
def test_recovery_result_to_dict(self):
|
||||
"""RecoveryResult se sérialise correctement."""
|
||||
from agent_v0.agent_v1.core.recovery import RecoveryResult, RecoveryAction
|
||||
d = RecoveryResult(
|
||||
action_taken=RecoveryAction.UNDO, success=True, detail="Ctrl+Z"
|
||||
).to_dict()
|
||||
assert d["action_taken"] == "undo"
|
||||
assert d["success"] is True
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# P4 : Learning — apprentissage runtime
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestReplayLearner:
|
||||
|
||||
@pytest.fixture
|
||||
def learner(self):
|
||||
tmpdir = tempfile.mkdtemp(prefix="test_learning_")
|
||||
from agent_v0.server_v1.replay_learner import ReplayLearner
|
||||
l = ReplayLearner(learning_dir=tmpdir)
|
||||
yield l
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
def test_record_et_load_session(self, learner):
|
||||
"""Enregistrer un résultat et le relire depuis le fichier."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
outcome = ActionOutcome(
|
||||
session_id="test_session",
|
||||
action_id="act_001",
|
||||
action_type="click",
|
||||
target_description="Bouton Enregistrer",
|
||||
resolution_method="som_text",
|
||||
resolution_score=0.95,
|
||||
success=True,
|
||||
)
|
||||
learner.record(outcome)
|
||||
|
||||
# Relire
|
||||
loaded = learner.load_session("test_session")
|
||||
assert len(loaded) == 1
|
||||
assert loaded[0].action_id == "act_001"
|
||||
assert loaded[0].success is True
|
||||
assert loaded[0].resolution_method == "som_text"
|
||||
|
||||
def test_record_from_replay_result(self, learner):
|
||||
"""Convertir le format replay en ActionOutcome."""
|
||||
learner.record_from_replay_result(
|
||||
session_id="s1",
|
||||
action={"action_id": "a1", "type": "click", "target_spec": {"by_text": "OK", "window_title": "App"}},
|
||||
result={"success": True, "resolution_method": "template", "resolution_score": 0.9},
|
||||
verification={"verified": True, "semantic_verified": True, "semantic_detail": "OK"},
|
||||
)
|
||||
loaded = learner.load_session("s1")
|
||||
assert len(loaded) == 1
|
||||
assert loaded[0].target_description == "OK"
|
||||
assert loaded[0].semantic_verified is True
|
||||
|
||||
def test_query_similar(self, learner):
|
||||
"""Requêter des résultats similaires par description."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Enregistrer plusieurs résultats
|
||||
for i, (desc, method, success) in enumerate([
|
||||
("Bouton Enregistrer", "som_text", True),
|
||||
("Bouton Annuler", "template", True),
|
||||
("Bouton Enregistrer", "vlm_direct", False),
|
||||
("Menu Fichier", "som_text", True),
|
||||
]):
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id=f"a{i}",
|
||||
action_type="click", target_description=desc,
|
||||
resolution_method=method, success=success,
|
||||
))
|
||||
|
||||
# Chercher "Enregistrer"
|
||||
results = learner.query_similar(target_description="Enregistrer")
|
||||
assert len(results) == 2
|
||||
# Les deux résultats concernent "Enregistrer"
|
||||
for r in results:
|
||||
assert "enregistrer" in r["outcome"]["target_description"].lower()
|
||||
|
||||
def test_get_stats(self, learner):
|
||||
"""Les statistiques globales sont correctes."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
for success, method in [(True, "som"), (True, "som"), (False, "template"), (True, "vlm")]:
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a",
|
||||
action_type="click", success=success,
|
||||
resolution_method=method,
|
||||
))
|
||||
|
||||
stats = learner.get_stats()
|
||||
assert stats["total"] == 4
|
||||
assert stats["success_rate"] == 0.75
|
||||
assert stats["methods"]["som"]["success_rate"] == 1.0
|
||||
assert stats["methods"]["template"]["success_rate"] == 0.0
|
||||
|
||||
def test_gemma4_indisponible_pas_de_crash(self, learner):
|
||||
"""Le learning fonctionne même sans VLM."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Pas de crash, juste un record simple
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a1", action_type="click",
|
||||
success=False, error="target_not_found",
|
||||
))
|
||||
stats = learner.get_stats()
|
||||
assert stats["total"] == 1
|
||||
assert stats["success_rate"] == 0.0
|
||||
|
||||
def test_fichier_jsonl_format(self, learner):
|
||||
"""Le fichier JSONL contient du JSON valide ligne par ligne."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a1", action_type="click", success=True,
|
||||
))
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a2", action_type="type", success=False,
|
||||
))
|
||||
|
||||
jsonl_file = learner.learning_dir / "s1.jsonl"
|
||||
assert jsonl_file.is_file()
|
||||
|
||||
with open(jsonl_file) as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) == 2
|
||||
for line in lines:
|
||||
data = json.loads(line) # Doit être du JSON valide
|
||||
assert "action_id" in data
|
||||
assert "success" in data
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Boucle d'apprentissage : consolidation cross-workflow
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestLearningLoop:
|
||||
"""Tests de la boucle d'apprentissage : les replays passés améliorent les suivants."""
|
||||
|
||||
@pytest.fixture
|
||||
def learner(self):
|
||||
tmpdir = tempfile.mkdtemp(prefix="test_learning_loop_")
|
||||
from agent_v0.server_v1.replay_learner import ReplayLearner
|
||||
l = ReplayLearner(learning_dir=tmpdir)
|
||||
yield l
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
def test_best_strategy_apprend_du_succes(self, learner):
|
||||
"""La meilleure stratégie est celle qui a le plus de succès."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# template échoue 3 fois sur "Enregistrer"
|
||||
for i in range(3):
|
||||
learner.record(ActionOutcome(
|
||||
session_id=f"s{i}", action_id=f"a{i}", action_type="click",
|
||||
target_description="Enregistrer", resolution_method="anchor_template",
|
||||
success=False,
|
||||
))
|
||||
# som_text réussit 2 fois sur "Enregistrer"
|
||||
for i in range(2):
|
||||
learner.record(ActionOutcome(
|
||||
session_id=f"s{10+i}", action_id=f"a{10+i}", action_type="click",
|
||||
target_description="Enregistrer", resolution_method="som_text_match",
|
||||
success=True,
|
||||
))
|
||||
|
||||
best = learner.best_strategy_for("Enregistrer")
|
||||
assert best == "som_text_match"
|
||||
|
||||
def test_best_strategy_minimum_2_essais(self, learner):
|
||||
"""Il faut au moins 2 essais pour qu'une stratégie soit recommandée."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Un seul succès → pas assez pour recommander
|
||||
learner.record(ActionOutcome(
|
||||
session_id="s1", action_id="a1", action_type="click",
|
||||
target_description="OK", resolution_method="vlm_direct",
|
||||
success=True,
|
||||
))
|
||||
best = learner.best_strategy_for("OK")
|
||||
assert best is None
|
||||
|
||||
def test_best_strategy_rien_si_historique_vide(self, learner):
|
||||
"""Pas d'historique → pas de recommandation."""
|
||||
best = learner.best_strategy_for("Inexistant")
|
||||
assert best is None
|
||||
|
||||
def test_consolidate_workflow_enrichit_les_actions(self, learner):
|
||||
"""La consolidation injecte _learned_strategy dans les target_spec."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Historique : som_text_match marche pour "Fichier"
|
||||
for i in range(3):
|
||||
learner.record(ActionOutcome(
|
||||
session_id=f"s{i}", action_id=f"a{i}", action_type="click",
|
||||
target_description="Fichier", resolution_method="som_text_match",
|
||||
success=True,
|
||||
))
|
||||
|
||||
# Workflow avec une action "Fichier"
|
||||
actions = [
|
||||
{"type": "click", "target_spec": {"by_text": "Fichier", "window_title": "Bloc-notes"}},
|
||||
{"type": "type", "text": "bonjour"},
|
||||
{"type": "click", "target_spec": {"by_text": "Inconnu"}},
|
||||
]
|
||||
|
||||
enriched = learner.consolidate_workflow(actions)
|
||||
assert enriched == 1 # Seul "Fichier" a un historique
|
||||
assert actions[0]["target_spec"]["_learned_strategy"] == "som_text_match"
|
||||
assert "_learned_strategy" not in actions[2].get("target_spec", {})
|
||||
|
||||
def test_consolidation_cross_workflow(self, learner):
|
||||
"""Un succès dans le workflow A améliore le workflow B."""
|
||||
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||
# Workflow A : "Enregistrer" réussit avec grounding_vlm
|
||||
for i in range(3):
|
||||
learner.record(ActionOutcome(
|
||||
session_id="workflow_A", action_id=f"a{i}", action_type="click",
|
||||
target_description="Enregistrer",
|
||||
window_title="Bloc-notes",
|
||||
resolution_method="grounding_vlm", success=True,
|
||||
))
|
||||
|
||||
# Workflow B : contient aussi "Enregistrer"
|
||||
workflow_b = [
|
||||
{"type": "click", "target_spec": {"by_text": "Enregistrer", "window_title": "Bloc-notes"}},
|
||||
]
|
||||
enriched = learner.consolidate_workflow(workflow_b, "workflow_B")
|
||||
assert enriched == 1
|
||||
assert workflow_b[0]["target_spec"]["_learned_strategy"] == "grounding_vlm"
|
||||
|
||||
def test_grounding_reordonne_strategies(self):
|
||||
"""Le GroundingEngine réordonne ses stratégies selon _learned_strategy."""
|
||||
from agent_v0.agent_v1.core.grounding import GroundingEngine
|
||||
executor = MagicMock()
|
||||
executor._capture_screenshot_b64.return_value = "fake"
|
||||
# Simuler que template marche
|
||||
executor._server_resolve_target.return_value = None
|
||||
executor._template_match_anchor.return_value = {
|
||||
"resolved": True, "x_pct": 0.5, "y_pct": 0.5, "score": 0.9,
|
||||
}
|
||||
executor._hybrid_vlm_resolve.return_value = None
|
||||
|
||||
engine = GroundingEngine(executor)
|
||||
|
||||
# Avec _learned_strategy = anchor_template → template en premier
|
||||
result = engine.locate(
|
||||
"http://server",
|
||||
{"by_text": "OK", "anchor_image_base64": "abc", "_learned_strategy": "anchor_template"},
|
||||
0.5, 0.3, 1920, 1080,
|
||||
)
|
||||
assert result.found is True
|
||||
assert result.method == "anchor_template"
|
||||
# Le serveur n'a PAS été appelé (template était en premier)
|
||||
executor._server_resolve_target.assert_not_called()
|
||||
441
tests/unit/test_replay_critic.py
Normal file
441
tests/unit/test_replay_critic.py
Normal file
@@ -0,0 +1,441 @@
|
||||
"""
|
||||
Tests unitaires pour le Critic (ReplayVerifier.verify_with_critic)
|
||||
et l'enrichissement des actions avec intentions.
|
||||
|
||||
Vérifie les FONCTIONNALITÉS, pas juste la non-régression :
|
||||
1. Le Critic fusionne correctement pixel + sémantique
|
||||
2. La matrice de décision (4 cas) est correcte
|
||||
3. L'enrichissement intentions parse bien les réponses gemma4
|
||||
4. Les fallbacks fonctionnent quand le VLM est indisponible
|
||||
"""
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch, Mock
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
from agent_v0.server_v1.replay_verifier import ReplayVerifier, VerificationResult
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _make_screenshot_b64(width=100, height=100, color=(128, 128, 128)):
|
||||
"""Créer un screenshot base64 factice (JPEG)."""
|
||||
from PIL import Image
|
||||
img = Image.new("RGB", (width, height), color)
|
||||
buf = io.BytesIO()
|
||||
img.save(buf, format="JPEG", quality=50)
|
||||
return base64.b64encode(buf.getvalue()).decode()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def verifier():
|
||||
return ReplayVerifier()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def screenshot_gray():
|
||||
return _make_screenshot_b64(100, 100, (128, 128, 128))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def screenshot_white():
|
||||
return _make_screenshot_b64(100, 100, (255, 255, 255))
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests VerificationResult — nouveaux champs sémantiques
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestVerificationResult:
|
||||
|
||||
def test_to_dict_sans_semantique(self):
|
||||
"""Sans vérification sémantique, les champs semantic_ sont absents du dict."""
|
||||
r = VerificationResult(
|
||||
verified=True, confidence=0.8, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue", detail="test",
|
||||
)
|
||||
d = r.to_dict()
|
||||
assert "semantic_verified" not in d
|
||||
assert d["verified"] is True
|
||||
assert d["confidence"] == 0.8
|
||||
|
||||
def test_to_dict_avec_semantique(self):
|
||||
"""Avec vérification sémantique, les champs semantic_ sont présents."""
|
||||
r = VerificationResult(
|
||||
verified=True, confidence=0.9, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue", detail="test",
|
||||
semantic_verified=True, semantic_detail="Bouton visible",
|
||||
semantic_elapsed_ms=1500.0,
|
||||
)
|
||||
d = r.to_dict()
|
||||
assert d["semantic_verified"] is True
|
||||
assert d["semantic_detail"] == "Bouton visible"
|
||||
assert d["semantic_elapsed_ms"] == 1500.0
|
||||
|
||||
def test_to_dict_semantique_false(self):
|
||||
"""semantic_verified=False doit apparaître dans le dict."""
|
||||
r = VerificationResult(
|
||||
verified=False, confidence=0.7, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="retry",
|
||||
semantic_verified=False, semantic_detail="Mauvais écran",
|
||||
semantic_elapsed_ms=2000.0,
|
||||
)
|
||||
d = r.to_dict()
|
||||
assert d["semantic_verified"] is False
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests verify_with_critic — matrice de décision
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestVerifyWithCritic:
|
||||
|
||||
def test_sans_expected_result_retourne_pixel_seul(self, verifier, screenshot_gray):
|
||||
"""Sans expected_result, verify_with_critic = verify_action (pixel seul)."""
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_gray,
|
||||
expected_result="", # Pas d'attendu
|
||||
)
|
||||
# Pixel seul — pas de champ semantic
|
||||
assert result.semantic_verified is None
|
||||
|
||||
def test_sans_screenshots_pas_de_semantique(self, verifier):
|
||||
"""Sans screenshots, pas de vérification sémantique possible."""
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=None,
|
||||
screenshot_after=None,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
# Pas de screenshots → pixel seul (confidence basse)
|
||||
assert result.verified is True
|
||||
assert result.confidence < 0.5
|
||||
|
||||
def test_pixel_pas_change_et_expected_result_skip_vlm(
|
||||
self, verifier, screenshot_gray,
|
||||
):
|
||||
"""Si pixel identiques + expected_result → skip VLM (pas de changement = retry)."""
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test", "x_pct": 0.5, "y_pct": 0.5},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_gray, # Même image → aucun changement
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
# Pas de changement pixel → retry, VLM non appelé
|
||||
assert result.verified is False
|
||||
assert result.suggestion == "retry"
|
||||
assert result.semantic_verified is None # VLM non appelé
|
||||
|
||||
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||
def test_pixel_ok_semantic_ok(
|
||||
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||
):
|
||||
"""Pixel OK + Semantic OK → vérifié avec haute confiance."""
|
||||
mock_semantic.return_value = {
|
||||
"verified": True,
|
||||
"detail": "Le menu est bien ouvert",
|
||||
"elapsed_ms": 2000.0,
|
||||
}
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_white, # Différent → changement détecté
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is True
|
||||
assert result.confidence >= 0.7
|
||||
assert "Critic OK" in result.detail
|
||||
|
||||
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||
def test_pixel_ok_semantic_non(
|
||||
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||
):
|
||||
"""Pixel OK + Semantic NON → INATTENDU (changement mais pas le bon)."""
|
||||
mock_semantic.return_value = {
|
||||
"verified": False,
|
||||
"detail": "Une erreur est apparue au lieu du menu",
|
||||
"elapsed_ms": 2500.0,
|
||||
}
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
assert result.verified is False
|
||||
assert result.semantic_verified is False
|
||||
assert result.suggestion == "retry"
|
||||
assert "Critic NON" in result.detail
|
||||
|
||||
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||
def test_vlm_indisponible_fallback_pixel(
|
||||
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||
):
|
||||
"""VLM indisponible → fallback sur pixel seul."""
|
||||
mock_semantic.return_value = None # VLM down
|
||||
result = verifier.verify_with_critic(
|
||||
action={"type": "click", "action_id": "test"},
|
||||
result={"success": True},
|
||||
screenshot_before=screenshot_gray,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
# Fallback pixel seul — le changement est détecté
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is None # Pas de VLM
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests _verify_semantic — parsing de la réponse VLM
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestVerifySemantic:
|
||||
|
||||
@patch("requests.post")
|
||||
def test_parse_verdict_oui(self, mock_post, verifier, screenshot_white):
|
||||
"""Parse correctement VERDICT: OUI."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": "VERDICT: OUI\nRAISON: Le fichier est bien ouvert"}
|
||||
}
|
||||
mock_post.return_value = mock_resp
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=screenshot_white,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
assert result is not None
|
||||
assert result["verified"] is True
|
||||
assert "ouvert" in result["detail"]
|
||||
|
||||
@patch("requests.post")
|
||||
def test_parse_verdict_non(self, mock_post, verifier, screenshot_white):
|
||||
"""Parse correctement VERDICT: NON."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": "VERDICT: NON\nRAISON: L'écran n'a pas changé"}
|
||||
}
|
||||
mock_post.return_value = mock_resp
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=screenshot_white,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le menu s'est ouvert",
|
||||
)
|
||||
assert result is not None
|
||||
assert result["verified"] is False
|
||||
|
||||
@patch("requests.post")
|
||||
def test_vlm_timeout_retourne_none(self, mock_post, verifier, screenshot_white):
|
||||
"""Timeout VLM → retourne None (fallback gracieux)."""
|
||||
import requests as _real_requests
|
||||
mock_post.side_effect = _real_requests.Timeout("timeout")
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=screenshot_white,
|
||||
screenshot_after=screenshot_white,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
assert result is None
|
||||
|
||||
def test_sans_screenshot_after_retourne_none(self, verifier):
|
||||
"""Sans screenshot_after, pas de vérification possible."""
|
||||
result = verifier._verify_semantic(
|
||||
screenshot_before=None,
|
||||
screenshot_after=None,
|
||||
expected_result="Le fichier est ouvert",
|
||||
)
|
||||
assert result is None
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests _merge_results — matrice pixel x sémantique
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestMergeResults:
|
||||
|
||||
def test_pixel_ok_sem_ok(self, verifier):
|
||||
pixel = VerificationResult(
|
||||
verified=True, confidence=0.7, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue",
|
||||
)
|
||||
semantic = {"verified": True, "detail": "OK", "elapsed_ms": 1000}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is True
|
||||
assert result.confidence >= 0.7
|
||||
|
||||
def test_pixel_ok_sem_non(self, verifier):
|
||||
"""Pixel OK + Sémantique NON = inattendu → retry."""
|
||||
pixel = VerificationResult(
|
||||
verified=True, confidence=0.7, changes_detected=True,
|
||||
change_area_pct=5.0, suggestion="continue",
|
||||
)
|
||||
semantic = {"verified": False, "detail": "Erreur popup", "elapsed_ms": 2000}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is False
|
||||
assert result.semantic_verified is False
|
||||
assert result.suggestion == "retry"
|
||||
|
||||
def test_pixel_non_sem_ok(self, verifier):
|
||||
"""Pixel inchangé + Sémantique OK = état subtil → continue."""
|
||||
pixel = VerificationResult(
|
||||
verified=False, confidence=0.5, changes_detected=False,
|
||||
change_area_pct=0.1, suggestion="retry",
|
||||
)
|
||||
semantic = {"verified": True, "detail": "Onglet déjà actif", "elapsed_ms": 1500}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is True
|
||||
assert result.semantic_verified is True
|
||||
assert result.suggestion == "continue"
|
||||
|
||||
def test_pixel_non_sem_non(self, verifier):
|
||||
"""Pixel inchangé + Sémantique NON = échec complet → retry."""
|
||||
pixel = VerificationResult(
|
||||
verified=False, confidence=0.5, changes_detected=False,
|
||||
change_area_pct=0.0, suggestion="retry",
|
||||
)
|
||||
semantic = {"verified": False, "detail": "Rien ne s'est passé", "elapsed_ms": 3000}
|
||||
result = verifier._merge_results(pixel, semantic)
|
||||
assert result.verified is False
|
||||
assert result.semantic_verified is False
|
||||
assert result.confidence >= 0.7 # Haute confiance dans l'échec
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests enrichissement intentions (stream_processor)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestEnrichActionsWithIntentions:
|
||||
|
||||
@patch("requests.post")
|
||||
@patch("requests.get")
|
||||
def test_enrichissement_parse_reponse_gemma4(self, mock_get, mock_post):
|
||||
"""La réponse gemma4 est correctement parsée en intention/avant/après."""
|
||||
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||
import tempfile, shutil
|
||||
|
||||
# Mock gemma4 disponible
|
||||
mock_tags_resp = MagicMock()
|
||||
mock_tags_resp.ok = True
|
||||
mock_get.return_value = mock_tags_resp
|
||||
|
||||
mock_chat_resp = MagicMock()
|
||||
mock_chat_resp.ok = True
|
||||
mock_chat_resp.json.return_value = {
|
||||
"message": {
|
||||
"content": (
|
||||
"INTENTION: Ouvrir le fichier client dans le logiciel\n"
|
||||
"AVANT: Le logiciel est ouvert sur la page d'accueil\n"
|
||||
"APRÈS: Le fichier client est affiché dans la fenêtre"
|
||||
)
|
||||
}
|
||||
}
|
||||
mock_post.return_value = mock_chat_resp
|
||||
|
||||
actions = [
|
||||
{
|
||||
"type": "click",
|
||||
"action_id": "act_001",
|
||||
"target_spec": {"by_text": "Ouvrir", "window_title": "Logiciel"},
|
||||
},
|
||||
{
|
||||
"type": "wait",
|
||||
"action_id": "act_002",
|
||||
"duration_ms": 1000,
|
||||
},
|
||||
]
|
||||
|
||||
tmpdir = Path(tempfile.mkdtemp())
|
||||
try:
|
||||
(tmpdir / "shots").mkdir()
|
||||
_enrich_actions_with_intentions(actions, tmpdir)
|
||||
|
||||
# L'action click doit être enrichie
|
||||
assert actions[0].get("intention") == "Ouvrir le fichier client dans le logiciel"
|
||||
assert actions[0].get("expected_state") == "Le logiciel est ouvert sur la page d'accueil"
|
||||
assert actions[0].get("expected_result") == "Le fichier client est affiché dans la fenêtre"
|
||||
# expected_state doit aussi être dans target_spec (pour l'Observer)
|
||||
assert actions[0]["target_spec"]["expected_state"] == "Le logiciel est ouvert sur la page d'accueil"
|
||||
|
||||
# L'action wait ne doit PAS être enrichie
|
||||
assert "intention" not in actions[1]
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
@patch("requests.get")
|
||||
def test_gemma4_indisponible_pas_de_crash(self, mock_get):
|
||||
"""Si gemma4 est down, l'enrichissement est silencieusement désactivé."""
|
||||
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||
import tempfile, shutil
|
||||
|
||||
mock_get.side_effect = ConnectionError("gemma4 down")
|
||||
|
||||
actions = [
|
||||
{"type": "click", "action_id": "act_001", "target_spec": {"by_text": "OK"}},
|
||||
]
|
||||
|
||||
tmpdir = Path(tempfile.mkdtemp())
|
||||
try:
|
||||
(tmpdir / "shots").mkdir()
|
||||
_enrich_actions_with_intentions(actions, tmpdir)
|
||||
# Aucun crash, aucune intention ajoutée
|
||||
assert "intention" not in actions[0]
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
@patch("requests.post")
|
||||
@patch("requests.get")
|
||||
def test_reponse_gemma4_malformee(self, mock_get, mock_post):
|
||||
"""Si gemma4 retourne du texte non structuré, pas de crash."""
|
||||
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||
import tempfile, shutil
|
||||
|
||||
mock_tags = MagicMock()
|
||||
mock_tags.ok = True
|
||||
mock_get.return_value = mock_tags
|
||||
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": "Je ne comprends pas cette demande."}
|
||||
}
|
||||
mock_post.return_value = mock_resp
|
||||
|
||||
actions = [
|
||||
{"type": "click", "action_id": "act_001", "target_spec": {"by_text": "OK"}},
|
||||
]
|
||||
|
||||
tmpdir = Path(tempfile.mkdtemp())
|
||||
try:
|
||||
(tmpdir / "shots").mkdir()
|
||||
_enrich_actions_with_intentions(actions, tmpdir)
|
||||
# Pas de crash, mais pas d'intention non plus
|
||||
assert "intention" not in actions[0]
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
762
tests/unit/test_task_planner.py
Normal file
762
tests/unit/test_task_planner.py
Normal file
@@ -0,0 +1,762 @@
|
||||
# tests/unit/test_task_planner.py
|
||||
"""
|
||||
Tests unitaires du TaskPlanner (planificateur MACRO).
|
||||
|
||||
Vérifie :
|
||||
1. La compréhension d'ordres simples (understand)
|
||||
2. Le matching de workflows par description sémantique
|
||||
3. La détection de boucles et l'extraction de paramètres
|
||||
4. La conversion étapes → actions JSON (format correct)
|
||||
5. L'extraction de descriptions de session
|
||||
|
||||
Toutes les réponses gemma4 sont mockées pour la reproductibilité.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch, Mock
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
from agent_v0.server_v1.task_planner import TaskPlanner, TaskPlan
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
@pytest.fixture
|
||||
def planner():
|
||||
"""TaskPlanner avec port gemma4 factice."""
|
||||
return TaskPlanner(gemma4_port="11435", domain_id="generic")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_workflows():
|
||||
"""Workflows disponibles pour les tests de matching."""
|
||||
return [
|
||||
{
|
||||
"session_id": "sess_001",
|
||||
"name": "Bloc-notes",
|
||||
"description": "Ouvrir Bloc-notes via Exécuter (Win+R) et écrire du texte",
|
||||
"machine": "PC-01",
|
||||
"event_count": 25,
|
||||
},
|
||||
{
|
||||
"session_id": "sess_002",
|
||||
"name": "Explorateur de fichiers",
|
||||
"description": "Naviguer dans l'Explorateur de fichiers et ouvrir des images",
|
||||
"machine": "PC-01",
|
||||
"event_count": 40,
|
||||
},
|
||||
{
|
||||
"session_id": "sess_003",
|
||||
"name": "DxCare, Codage CIM-10",
|
||||
"description": "Ouvrir un dossier patient dans DxCare et coder les diagnostics CIM-10",
|
||||
"machine": "PC-TIM",
|
||||
"event_count": 80,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def _mock_gemma4_response(content: str):
|
||||
"""Créer un mock de réponse HTTP gemma4."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = True
|
||||
mock_resp.status_code = 200
|
||||
mock_resp.json.return_value = {
|
||||
"message": {"content": content}
|
||||
}
|
||||
return mock_resp
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : understand — ordre simple
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandOrdreSimple:
|
||||
"""Vérifier que understand() parse correctement des réponses gemma4."""
|
||||
|
||||
def test_understand_ordre_simple(self, planner, sample_workflows):
|
||||
"""'Ouvre le bloc-notes' → understood=True."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 0.9\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir le Bloc-notes via Win+R\n"
|
||||
"2. Taper notepad et valider\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Ouvre le bloc-notes",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.understood is True
|
||||
assert plan.instruction == "Ouvre le bloc-notes"
|
||||
|
||||
def test_understand_instruction_non_comprise(self, planner):
|
||||
"""Instruction incompréhensible → understood=False."""
|
||||
gemma4_response = "COMPRIS: NON\nWORKFLOW: AUCUN\nBOUCLE: NON\n"
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("xyzzy blah blah")
|
||||
|
||||
assert plan.understood is False
|
||||
|
||||
def test_understand_gemma4_erreur_http(self, planner):
|
||||
"""Erreur HTTP gemma4 → plan.error renseigné."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = False
|
||||
mock_resp.status_code = 500
|
||||
|
||||
with patch("requests.post", return_value=mock_resp):
|
||||
plan = planner.understand("Ouvre le bloc-notes")
|
||||
|
||||
assert plan.understood is False
|
||||
assert "500" in plan.error
|
||||
|
||||
def test_understand_gemma4_timeout(self, planner):
|
||||
"""Timeout gemma4 → plan.error renseigné."""
|
||||
import requests
|
||||
with patch("requests.post", side_effect=requests.Timeout("timeout")):
|
||||
plan = planner.understand("Ouvre le bloc-notes")
|
||||
|
||||
assert plan.understood is False
|
||||
assert "erreur" in plan.error.lower() or "timeout" in plan.error.lower()
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : matching workflow
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandIdentifieWorkflow:
|
||||
"""Vérifier que le matching de workflow fonctionne."""
|
||||
|
||||
def test_understand_identifie_workflow(self, planner, sample_workflows):
|
||||
"""Quand un workflow matche, workflow_match est rempli."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 0.9\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Lancer le Bloc-notes\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Ouvre le bloc-notes",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.workflow_match == "sess_001"
|
||||
assert plan.workflow_name == "Bloc-notes"
|
||||
assert plan.mode == "replay"
|
||||
assert plan.match_confidence >= 0.8
|
||||
|
||||
def test_understand_workflow_aucun_match(self, planner, sample_workflows):
|
||||
"""Aucun workflow correspondant → mode libre."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir Chrome\n"
|
||||
"2. Aller sur Google\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Recherche voiture sur Google",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.understood is True
|
||||
assert plan.workflow_match == ""
|
||||
assert plan.mode == "free"
|
||||
|
||||
def test_understand_workflow_second_match(self, planner, sample_workflows):
|
||||
"""Workflow 2 sélectionné correctement."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 2\n"
|
||||
"CONFIANCE: 0.85\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir l'explorateur de fichiers\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Ouvre mes images",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.workflow_match == "sess_002"
|
||||
assert plan.workflow_name == "Explorateur de fichiers"
|
||||
|
||||
def test_understand_workflow_avec_description_dans_prompt(self, planner, sample_workflows):
|
||||
"""Le prompt envoyé à gemma4 inclut les descriptions des workflows."""
|
||||
captured_body = {}
|
||||
|
||||
def capture_post(url, json=None, **kwargs):
|
||||
captured_body.update(json or {})
|
||||
return _mock_gemma4_response("COMPRIS: OUI\nWORKFLOW: AUCUN\nBOUCLE: NON\n")
|
||||
|
||||
with patch("requests.post", side_effect=capture_post):
|
||||
planner.understand(
|
||||
"Ouvre le bloc-notes",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
prompt_content = captured_body["messages"][0]["content"]
|
||||
# La description doit apparaître dans le prompt
|
||||
assert "Ouvrir Bloc-notes via Exécuter" in prompt_content
|
||||
assert "Naviguer dans l'Explorateur" in prompt_content
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : détection de boucle
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandDetecteBoucle:
|
||||
"""Vérifier la détection de boucle."""
|
||||
|
||||
def test_understand_detecte_boucle(self, planner, sample_workflows):
|
||||
"""'traite TOUS les dossiers' → is_loop=True."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 3\n"
|
||||
"CONFIANCE: 0.8\n"
|
||||
"PARAMETRES: AUCUN\n"
|
||||
"BOUCLE: OUI\n"
|
||||
"SOURCE_BOUCLE: écran\n"
|
||||
"PLAN:\n"
|
||||
"1. Pour chaque dossier dans la liste\n"
|
||||
"2. Ouvrir le dossier\n"
|
||||
"3. Coder les diagnostics\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Traite TOUS les dossiers de la liste",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert plan.is_loop is True
|
||||
assert plan.loop_source == "écran"
|
||||
|
||||
def test_understand_pas_de_boucle(self, planner):
|
||||
"""Ordre simple → is_loop=False."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"SOURCE_BOUCLE: aucun\n"
|
||||
"PLAN:\n"
|
||||
"1. Ouvrir le navigateur\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("Ouvre le navigateur")
|
||||
|
||||
assert plan.is_loop is False
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : extraction de paramètres
|
||||
# =========================================================================
|
||||
|
||||
class TestUnderstandExtraitParametres:
|
||||
"""Vérifier l'extraction des paramètres."""
|
||||
|
||||
def test_understand_extrait_parametres(self, planner, sample_workflows):
|
||||
"""'dossiers de janvier' → parameters contient mois=janvier."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 3\n"
|
||||
"CONFIANCE: 0.85\n"
|
||||
"PARAMETRES: mois=janvier\n"
|
||||
"BOUCLE: OUI\n"
|
||||
"SOURCE_BOUCLE: écran\n"
|
||||
"PLAN:\n"
|
||||
"1. Filtrer les dossiers de janvier\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand(
|
||||
"Traite les dossiers de janvier",
|
||||
available_workflows=sample_workflows,
|
||||
)
|
||||
|
||||
assert "mois" in plan.parameters
|
||||
assert plan.parameters["mois"] == "janvier"
|
||||
|
||||
def test_understand_parametres_multiples(self, planner):
|
||||
"""Plusieurs paramètres sur des lignes séparées."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"PARAMETRES:\n"
|
||||
"- patient=DUPONT\n"
|
||||
"- date=2026-01-15\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"1. Rechercher le patient DUPONT\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("Cherche le dossier de DUPONT du 15 janvier")
|
||||
|
||||
assert plan.parameters.get("patient") == "DUPONT"
|
||||
assert plan.parameters.get("date") == "2026-01-15"
|
||||
|
||||
def test_understand_parametres_inline(self, planner):
|
||||
"""Paramètres sur la même ligne que PARAMETRES:."""
|
||||
gemma4_response = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"PARAMETRES: nom=Martin, ville=Paris\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"1. Chercher Martin à Paris\n"
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
plan = planner.understand("Cherche Martin à Paris")
|
||||
|
||||
assert plan.parameters.get("nom") == "Martin"
|
||||
assert plan.parameters.get("ville") == "Paris"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _parse_understanding (parsing tolérant)
|
||||
# =========================================================================
|
||||
|
||||
class TestParseUnderstanding:
|
||||
"""Tester le parsing tolérant de réponses gemma4 variées."""
|
||||
|
||||
def test_parse_markdown_gras(self, planner):
|
||||
"""Réponse avec **gras** → parsée correctement."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"**COMPRIS:** OUI\n"
|
||||
"**WORKFLOW:** AUCUN\n"
|
||||
"**BOUCLE:** NON\n"
|
||||
"**PLAN:**\n"
|
||||
"1. Première étape\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, [])
|
||||
assert result.understood is True
|
||||
assert result.mode == "free"
|
||||
|
||||
def test_parse_confiance_pourcentage(self, planner, sample_workflows):
|
||||
"""CONFIANCE: 90% → match_confidence=0.9."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 90%\n"
|
||||
"BOUCLE: NON\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.match_confidence == pytest.approx(0.9)
|
||||
|
||||
def test_parse_confiance_virgule(self, planner, sample_workflows):
|
||||
"""CONFIANCE: 0,85 → match_confidence=0.85."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 1\n"
|
||||
"CONFIANCE: 0,85\n"
|
||||
"BOUCLE: NON\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.match_confidence == pytest.approx(0.85)
|
||||
|
||||
def test_parse_workflow_avec_parentheses(self, planner, sample_workflows):
|
||||
"""WORKFLOW: 2 (Explorateur) → index 2 correctement extrait."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: 2 (Explorateur de fichiers)\n"
|
||||
"BOUCLE: NON\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.workflow_match == "sess_002"
|
||||
|
||||
def test_parse_workflow_aucun_variantes(self, planner, sample_workflows):
|
||||
"""Toutes les variantes de 'aucun' sont reconnues."""
|
||||
for val in ("AUCUN", "None", "N/A", "-", "NON"):
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = f"COMPRIS: OUI\nWORKFLOW: {val}\nBOUCLE: NON\n"
|
||||
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||
assert result.workflow_match == "", f"Devrait être vide pour '{val}'"
|
||||
|
||||
def test_parse_etapes_tirets(self, planner):
|
||||
"""Étapes avec tirets → ajoutées au plan."""
|
||||
plan = TaskPlan(instruction="test")
|
||||
content = (
|
||||
"COMPRIS: OUI\n"
|
||||
"WORKFLOW: AUCUN\n"
|
||||
"BOUCLE: NON\n"
|
||||
"PLAN:\n"
|
||||
"- Ouvrir l'application\n"
|
||||
"- Cliquer sur Fichier\n"
|
||||
"- Sauvegarder\n"
|
||||
)
|
||||
result = planner._parse_understanding(plan, content, [])
|
||||
assert len(result.steps) == 3
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _steps_to_actions
|
||||
# =========================================================================
|
||||
|
||||
class TestStepsToActions:
|
||||
"""Vérifier la conversion étapes → actions JSON."""
|
||||
|
||||
def test_steps_to_actions_format(self, planner):
|
||||
"""Les actions générées ont le bon format (type, target_spec, etc.)."""
|
||||
gemma4_response = (
|
||||
'{"type": "click", "target_spec": {"by_text": "Rechercher"}}\n'
|
||||
'{"type": "type", "text": "bloc-notes"}\n'
|
||||
'{"type": "key_combo", "keys": ["enter"]}\n'
|
||||
'{"type": "wait", "duration_ms": 2000}\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Ouvrir le bloc-notes"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 4
|
||||
assert actions[0]["type"] == "click"
|
||||
assert actions[0]["visual_mode"] is True # Ajouté automatiquement
|
||||
assert actions[0]["target_spec"]["by_text"] == "Rechercher"
|
||||
assert actions[1]["type"] == "type"
|
||||
assert actions[1]["text"] == "bloc-notes"
|
||||
assert actions[2]["type"] == "key_combo"
|
||||
assert actions[2]["keys"] == ["enter"]
|
||||
assert actions[3]["type"] == "wait"
|
||||
assert actions[3]["duration_ms"] == 2000
|
||||
|
||||
def test_steps_to_actions_json_array(self, planner):
|
||||
"""gemma4 retourne un tableau JSON → parsé correctement."""
|
||||
gemma4_response = (
|
||||
'Voici les actions :\n'
|
||||
'```json\n'
|
||||
'[\n'
|
||||
' {"type": "click", "target_spec": {"by_text": "Fichier"}},\n'
|
||||
' {"type": "click", "target_spec": {"by_text": "Ouvrir"}}\n'
|
||||
']\n'
|
||||
'```\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Ouvrir un fichier"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 2
|
||||
assert actions[0]["target_spec"]["by_text"] == "Fichier"
|
||||
assert actions[1]["target_spec"]["by_text"] == "Ouvrir"
|
||||
|
||||
def test_steps_to_actions_nested_json(self, planner):
|
||||
"""JSON imbriqué (target_spec) → parsé correctement."""
|
||||
gemma4_response = (
|
||||
'{"type": "click", "target_spec": {"by_text": "OK", "window_title": "Confirmation"}}\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Confirmer"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 1
|
||||
assert actions[0]["target_spec"]["window_title"] == "Confirmation"
|
||||
|
||||
def test_steps_to_actions_gemma4_erreur(self, planner):
|
||||
"""Erreur gemma4 → liste vide."""
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.ok = False
|
||||
|
||||
with patch("requests.post", return_value=mock_resp):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Faire quelque chose"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert actions == []
|
||||
|
||||
def test_steps_to_actions_filtre_types_invalides(self, planner):
|
||||
"""Seuls les types valides (click, type, key_combo, wait) sont acceptés."""
|
||||
gemma4_response = (
|
||||
'{"type": "click", "target_spec": {"by_text": "OK"}}\n'
|
||||
'{"type": "invalid_action", "foo": "bar"}\n'
|
||||
'{"type": "wait", "duration_ms": 500}\n'
|
||||
'{"not_a_type": "test"}\n'
|
||||
)
|
||||
|
||||
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||
actions = planner._steps_to_actions(
|
||||
[{"description": "1. Test"}],
|
||||
{},
|
||||
)
|
||||
|
||||
assert len(actions) == 2
|
||||
assert actions[0]["type"] == "click"
|
||||
assert actions[1]["type"] == "wait"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _parse_actions_json (parsing robuste)
|
||||
# =========================================================================
|
||||
|
||||
class TestParseActionsJson:
|
||||
"""Tester le parsing robuste d'actions JSON."""
|
||||
|
||||
def test_parse_json_une_par_ligne(self):
|
||||
"""Actions JSON une par ligne."""
|
||||
content = (
|
||||
'{"type": "click", "target_spec": {"by_text": "A"}}\n'
|
||||
'{"type": "type", "text": "hello"}\n'
|
||||
)
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 2
|
||||
|
||||
def test_parse_json_array(self):
|
||||
"""Tableau JSON."""
|
||||
content = '[{"type": "click", "target_spec": {"by_text": "A"}}, {"type": "wait", "duration_ms": 1000}]'
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 2
|
||||
|
||||
def test_parse_json_avec_texte_autour(self):
|
||||
"""JSON entouré de commentaires texte."""
|
||||
content = (
|
||||
"Voici les actions RPA :\n\n"
|
||||
'{"type": "click", "target_spec": {"by_text": "Envoyer"}}\n'
|
||||
"\n"
|
||||
"C'est tout.\n"
|
||||
)
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 1
|
||||
assert actions[0]["target_spec"]["by_text"] == "Envoyer"
|
||||
|
||||
def test_parse_json_vide(self):
|
||||
"""Contenu vide → liste vide."""
|
||||
assert TaskPlanner._parse_actions_json("") == []
|
||||
assert TaskPlanner._parse_actions_json("Pas de JSON ici") == []
|
||||
|
||||
def test_parse_json_markdown_code_block(self):
|
||||
"""JSON dans un bloc de code markdown."""
|
||||
content = (
|
||||
"```json\n"
|
||||
'{"type": "type", "text": "bonjour"}\n'
|
||||
"```\n"
|
||||
)
|
||||
actions = TaskPlanner._parse_actions_json(content)
|
||||
assert len(actions) == 1
|
||||
assert actions[0]["text"] == "bonjour"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : _extract_session_description
|
||||
# =========================================================================
|
||||
|
||||
class TestExtractSessionDescription:
|
||||
"""Vérifier que les descriptions de session sont lisibles et sémantiques."""
|
||||
|
||||
def _write_events(self, tmp_path, events):
|
||||
"""Écrire des événements dans un fichier JSONL temporaire."""
|
||||
events_file = tmp_path / "live_events.jsonl"
|
||||
with open(events_file, "w") as f:
|
||||
for evt in events:
|
||||
f.write(json.dumps(evt, ensure_ascii=False) + "\n")
|
||||
return events_file
|
||||
|
||||
def test_extract_session_description_bloc_notes(self, tmp_path):
|
||||
"""Session Bloc-notes via Win+R → description sémantique."""
|
||||
events = [
|
||||
{"event": {"type": "key_combo", "keys": ["win", "r"],
|
||||
"window": {"title": "Bureau"}}},
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "Exécuter"}}},
|
||||
{"event": {"type": "text_input", "text": "notepad",
|
||||
"window": {"title": "Exécuter"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Exécuter"}}},
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Exécuter"},
|
||||
"to": {"title": "Sans titre – Bloc-notes"}}},
|
||||
{"event": {"type": "text_input", "text": "Bonjour le monde",
|
||||
"window": {"title": "Sans titre – Bloc-notes"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
# Importer depuis api_stream (la fonction est au niveau module)
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert desc["event_count"] == 6
|
||||
# La description doit être lisible et pas juste "Bloc-notes, Exécuter"
|
||||
description = desc["description"]
|
||||
assert "Bloc-notes" in description or "bloc-notes" in description.lower()
|
||||
# Le nom doit contenir l'app
|
||||
assert "Bloc-notes" in desc["name"]
|
||||
|
||||
def test_extract_session_description_explorateur(self, tmp_path):
|
||||
"""Session Explorateur de fichiers → description pertinente."""
|
||||
events = [
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "Images – Explorateur de fichiers"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||
{"event": {"type": "mouse_click", "button": "left",
|
||||
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert "Explorateur" in desc["name"] or "Explorateur" in desc["description"]
|
||||
|
||||
def test_extract_session_description_vide(self, tmp_path):
|
||||
"""Fichier vide → description par défaut."""
|
||||
events_file = self._write_events(tmp_path, [])
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert desc["event_count"] == 0
|
||||
assert desc["name"] == "Session sans nom"
|
||||
|
||||
def test_extract_session_description_cmd(self, tmp_path):
|
||||
"""Session avec cmd.exe → description contient cmd."""
|
||||
events = [
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||
{"event": {"type": "text_input", "text": "dir",
|
||||
"window": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||
{"event": {"type": "text_input", "text": "cd documents",
|
||||
"window": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
assert desc["event_count"] == 3
|
||||
# Le nom ou la description doit mentionner cmd
|
||||
full = f"{desc['name']} {desc['description']}"
|
||||
assert "cmd" in full.lower()
|
||||
|
||||
def test_extract_session_description_recherche_windows(self, tmp_path):
|
||||
"""Session avec recherche Windows (Win+S) → description mentionne recherche."""
|
||||
events = [
|
||||
{"event": {"type": "key_combo", "keys": ["win", "s"],
|
||||
"window": {"title": "Bureau"}}},
|
||||
{"event": {"type": "window_focus_change",
|
||||
"from": {"title": "Bureau"},
|
||||
"to": {"title": "Rechercher"}}},
|
||||
{"event": {"type": "text_input", "text": "calculator",
|
||||
"window": {"title": "Rechercher"}}},
|
||||
]
|
||||
events_file = self._write_events(tmp_path, events)
|
||||
|
||||
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||
desc = _extract_session_description(events_file)
|
||||
|
||||
# La description doit mentionner la recherche Windows
|
||||
assert "recherche" in desc["description"].lower()
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : list_capabilities
|
||||
# =========================================================================
|
||||
|
||||
class TestListCapabilities:
|
||||
"""Vérifier le listing des capacités."""
|
||||
|
||||
def test_list_capabilities_avec_workflows(self, planner, sample_workflows):
|
||||
"""Avec des workflows → texte lisible avec descriptions."""
|
||||
text = planner.list_capabilities(sample_workflows)
|
||||
assert "Léa sait faire" in text
|
||||
assert "Bloc-notes" in text
|
||||
|
||||
def test_list_capabilities_sans_workflows(self, planner):
|
||||
"""Sans workflows → message d'aide."""
|
||||
text = planner.list_capabilities([])
|
||||
assert "pas encore appris" in text
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests : execute (mode replay et free)
|
||||
# =========================================================================
|
||||
|
||||
class TestExecute:
|
||||
"""Vérifier l'exécution des plans."""
|
||||
|
||||
def test_execute_replay(self, planner):
|
||||
"""Mode replay → callback appelé avec le bon session_id."""
|
||||
plan = TaskPlan(
|
||||
instruction="Ouvre le bloc-notes",
|
||||
understood=True,
|
||||
workflow_match="sess_001",
|
||||
workflow_name="Bloc-notes",
|
||||
mode="replay",
|
||||
)
|
||||
|
||||
callback = MagicMock(return_value="replay_123")
|
||||
result = planner.execute(plan, replay_callback=callback)
|
||||
|
||||
assert result.success is True
|
||||
callback.assert_called_once_with(
|
||||
session_id="sess_001",
|
||||
machine_id="default",
|
||||
params={},
|
||||
)
|
||||
|
||||
def test_execute_non_compris(self, planner):
|
||||
"""Plan non compris → échec."""
|
||||
plan = TaskPlan(instruction="blah", understood=False)
|
||||
result = planner.execute(plan)
|
||||
assert result.success is False
|
||||
assert "non comprise" in result.summary.lower() or "non comprise" in result.summary
|
||||
|
||||
def test_execute_sans_callback(self, planner):
|
||||
"""Mode replay sans callback → échec."""
|
||||
plan = TaskPlan(
|
||||
instruction="test",
|
||||
understood=True,
|
||||
workflow_match="sess_001",
|
||||
mode="replay",
|
||||
)
|
||||
result = planner.execute(plan, replay_callback=None)
|
||||
assert result.success is False
|
||||
419
tests/visual/test_grounding_benchmark.py
Normal file
419
tests/visual/test_grounding_benchmark.py
Normal file
@@ -0,0 +1,419 @@
|
||||
"""
|
||||
Benchmark de grounding — 3 approches testées en boucle.
|
||||
|
||||
Compare la robustesse et la précision de :
|
||||
1. Baseline : qwen2.5vl direct
|
||||
2. Zoom progressif : 2 passes (full → crop → re-grounding)
|
||||
3. OCR-first : docTR localise le texte, VLM seulement pour les icônes
|
||||
|
||||
Chaque approche est testée N fois sur les mêmes cibles.
|
||||
Mesure : taux de détection, variance des coordonnées, temps moyen.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
_SHOTS_DIR = Path(_ROOT) / "data/training/live_sessions/DESKTOP-ST3VBSD_windows/sess_20260404T135010_cec5c8/shots"
|
||||
|
||||
# Nombre d'itérations par test
|
||||
N_ITERATIONS = 5
|
||||
|
||||
|
||||
def _load_screenshot(name: str) -> str:
|
||||
path = _SHOTS_DIR / name
|
||||
if not path.is_file():
|
||||
pytest.skip(f"Screenshot {name} non disponible")
|
||||
return base64.b64encode(path.read_bytes()).decode()
|
||||
|
||||
|
||||
def _load_screenshot_pil(name: str):
|
||||
from PIL import Image
|
||||
path = _SHOTS_DIR / name
|
||||
if not path.is_file():
|
||||
pytest.skip(f"Screenshot {name} non disponible")
|
||||
return Image.open(path)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Approche 1 : Baseline qwen2.5vl direct
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _parse_bbox_2d(content: str) -> Optional[Tuple[int, int, int, int]]:
|
||||
"""Parser les coordonnées bbox_2d depuis une réponse qwen2.5vl.
|
||||
|
||||
qwen2.5vl retourne du JSON :
|
||||
```json
|
||||
[{"bbox_2d": [x1, y1, x2, y2], "label": "..."}]
|
||||
```
|
||||
Les coordonnées sont en pixels relatifs à l'image envoyée.
|
||||
"""
|
||||
# Stratégie 1 : parser le JSON complet (le plus fiable)
|
||||
# Nettoyer les fences markdown
|
||||
cleaned = re.sub(r'```(?:json)?\s*', '', content).strip()
|
||||
try:
|
||||
data = json.loads(cleaned)
|
||||
if isinstance(data, list) and len(data) > 0:
|
||||
bbox = data[0].get("bbox_2d")
|
||||
if bbox and len(bbox) >= 4:
|
||||
return (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
|
||||
elif isinstance(data, dict):
|
||||
bbox = data.get("bbox_2d")
|
||||
if bbox and len(bbox) >= 4:
|
||||
return (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
|
||||
except (json.JSONDecodeError, ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# Stratégie 2 : regex ciblé sur "bbox_2d": [x1, y1, x2, y2]
|
||||
bbox_match = re.search(
|
||||
r'"bbox_2d"\s*:\s*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\]',
|
||||
content,
|
||||
)
|
||||
if bbox_match:
|
||||
return tuple(int(bbox_match.group(i)) for i in range(1, 5))
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def grounding_baseline(screenshot_b64: str, description: str, img_width: int = 1280, img_height: int = 800) -> Optional[Tuple[float, float]]:
|
||||
"""Grounding qwen2.5vl direct — retourne (x_pct, y_pct) normalisées.
|
||||
|
||||
qwen2.5vl retourne des coordonnées en pixels relatifs à l'image envoyée.
|
||||
On normalise en divisant par les dimensions de l'image.
|
||||
"""
|
||||
import requests
|
||||
|
||||
try:
|
||||
resp = requests.post(
|
||||
"http://localhost:11434/api/chat",
|
||||
json={
|
||||
"model": "qwen2.5vl:7b",
|
||||
"messages": [{"role": "user", "content": f"Detect '{description}' with a bounding box.", "images": [screenshot_b64]}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.0, "num_predict": 100},
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
if not resp.ok:
|
||||
return None
|
||||
content = resp.json().get("message", {}).get("content", "")
|
||||
bbox = _parse_bbox_2d(content)
|
||||
if bbox:
|
||||
x1, y1, x2, y2 = bbox
|
||||
# Normaliser par les dimensions de l'image (pixels → 0-1)
|
||||
cx = (x1 + x2) / 2 / img_width
|
||||
cy = (y1 + y2) / 2 / img_height
|
||||
if 0.0 <= cx <= 1.0 and 0.0 <= cy <= 1.0:
|
||||
return (cx, cy)
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Approche 2 : Zoom progressif (2 passes)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def grounding_zoom(screenshot_b64: str, description: str, img_width: int = 1280, img_height: int = 800) -> Optional[Tuple[float, float]]:
|
||||
"""Zoom progressif — passe 1 (full) puis passe 2 (crop 2x)."""
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
# Passe 1 : grounding sur l'image complète
|
||||
result1 = grounding_baseline(screenshot_b64, description, img_width, img_height)
|
||||
if result1 is None:
|
||||
return None
|
||||
|
||||
x1_pct, y1_pct = result1
|
||||
|
||||
# Passe 2 : crop autour de la zone trouvée, re-grounding
|
||||
try:
|
||||
img_bytes = base64.b64decode(screenshot_b64)
|
||||
img = Image.open(io.BytesIO(img_bytes))
|
||||
w, h = img.size
|
||||
|
||||
# Crop 2x autour du point trouvé (25% de l'image de chaque côté)
|
||||
crop_size = 0.25
|
||||
cx_px = int(x1_pct * w)
|
||||
cy_px = int(y1_pct * h)
|
||||
x_left = max(0, cx_px - int(crop_size * w))
|
||||
y_top = max(0, cy_px - int(crop_size * h))
|
||||
x_right = min(w, cx_px + int(crop_size * w))
|
||||
y_bottom = min(h, cy_px + int(crop_size * h))
|
||||
|
||||
cropped = img.crop((x_left, y_top, x_right, y_bottom))
|
||||
crop_w, crop_h = cropped.size
|
||||
|
||||
# Encoder le crop en base64
|
||||
buf = io.BytesIO()
|
||||
cropped.save(buf, format="JPEG", quality=85)
|
||||
crop_b64 = base64.b64encode(buf.getvalue()).decode()
|
||||
|
||||
# Passe 2 : re-grounding sur le crop (dimensions du crop)
|
||||
result2 = grounding_baseline(crop_b64, description, crop_w, crop_h)
|
||||
if result2 is None:
|
||||
return result1 # Fallback sur passe 1
|
||||
|
||||
# Reconvertir les coordonnées du crop vers l'image originale
|
||||
x2_in_crop, y2_in_crop = result2
|
||||
x_final = (x_left + x2_in_crop * crop_w) / w
|
||||
y_final = (y_top + y2_in_crop * crop_h) / h
|
||||
return (x_final, y_final)
|
||||
|
||||
except Exception:
|
||||
return result1 # Fallback
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Approche 3 : OCR-first (docTR)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def grounding_ocr_first(screenshot_b64: str, description: str) -> Optional[Tuple[float, float]]:
|
||||
"""OCR-first — docTR localise le texte, VLM pour les icônes."""
|
||||
try:
|
||||
from doctr.io import DocumentFile
|
||||
from doctr.models import ocr_predictor
|
||||
|
||||
# Décoder l'image
|
||||
img_bytes = base64.b64decode(screenshot_b64)
|
||||
|
||||
# OCR
|
||||
predictor = ocr_predictor(det_arch='db_resnet50', reco_arch='crnn_vgg16_bn', pretrained=True)
|
||||
doc = DocumentFile.from_images([img_bytes])
|
||||
result = predictor(doc)
|
||||
|
||||
# Chercher le texte dans les résultats OCR
|
||||
target_lower = description.lower()
|
||||
best_match = None
|
||||
best_score = 0
|
||||
|
||||
for page in result.pages:
|
||||
for block in page.blocks:
|
||||
for line_obj in block.lines:
|
||||
for word in line_obj.words:
|
||||
word_text = word.value.lower()
|
||||
# Match exact ou partiel
|
||||
if target_lower in word_text or word_text in target_lower:
|
||||
score = len(word_text) / max(len(target_lower), 1)
|
||||
if score > best_score:
|
||||
# Coordonnées normalisées (docTR retourne 0-1)
|
||||
box = word.geometry # ((x1,y1), (x2,y2))
|
||||
cx = (box[0][0] + box[1][0]) / 2
|
||||
cy = (box[0][1] + box[1][1]) / 2
|
||||
best_match = (cx, cy)
|
||||
best_score = score
|
||||
|
||||
if best_match and best_score > 0.5:
|
||||
return best_match
|
||||
|
||||
except ImportError:
|
||||
pass # docTR non disponible
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback VLM pour les éléments sans texte
|
||||
return grounding_baseline(screenshot_b64, description)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Framework de benchmark
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def run_benchmark(
|
||||
approach_fn,
|
||||
approach_name: str,
|
||||
screenshot_b64: str,
|
||||
description: str,
|
||||
n_iterations: int = N_ITERATIONS,
|
||||
) -> Dict:
|
||||
"""Exécuter un benchmark : N itérations, mesurer variance et temps."""
|
||||
results = []
|
||||
times = []
|
||||
|
||||
for i in range(n_iterations):
|
||||
t_start = time.time()
|
||||
result = approach_fn(screenshot_b64, description)
|
||||
elapsed = time.time() - t_start
|
||||
times.append(elapsed)
|
||||
|
||||
if result is not None:
|
||||
results.append(result)
|
||||
|
||||
# Statistiques
|
||||
n_found = len(results)
|
||||
detection_rate = n_found / n_iterations
|
||||
|
||||
stats = {
|
||||
"approach": approach_name,
|
||||
"target": description,
|
||||
"iterations": n_iterations,
|
||||
"detection_rate": round(detection_rate, 2),
|
||||
"avg_time_ms": round(sum(times) / len(times) * 1000, 0),
|
||||
}
|
||||
|
||||
if n_found >= 2:
|
||||
xs = [r[0] for r in results]
|
||||
ys = [r[1] for r in results]
|
||||
stats["x_mean"] = round(sum(xs) / len(xs), 4)
|
||||
stats["y_mean"] = round(sum(ys) / len(ys), 4)
|
||||
stats["x_variance"] = round(max(xs) - min(xs), 4)
|
||||
stats["y_variance"] = round(max(ys) - min(ys), 4)
|
||||
stats["stable"] = stats["x_variance"] < 0.05 and stats["y_variance"] < 0.05
|
||||
elif n_found == 1:
|
||||
stats["x_mean"] = round(results[0][0], 4)
|
||||
stats["y_mean"] = round(results[0][1], 4)
|
||||
stats["x_variance"] = 0
|
||||
stats["y_variance"] = 0
|
||||
stats["stable"] = True
|
||||
else:
|
||||
stats["stable"] = False
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests de benchmark comparatif
|
||||
# =========================================================================
|
||||
|
||||
|
||||
# Cibles à tester (screenshot, description, nom)
|
||||
_TARGETS = [
|
||||
("shot_0001_full.png", "Rechercher", "Rechercher taskbar"),
|
||||
("shot_0001_full.png", "agent_v1", "Dossier agent_v1"),
|
||||
("shot_0004_full.png", "Fichier", "Menu Fichier"),
|
||||
("shot_0004_full.png", "Modifier", "Menu Modifier"),
|
||||
("shot_0004_full.png", "Ceci est un test.txt", "Onglet fichier"),
|
||||
("shot_0014_full.png", "Rechercher sur Google ou saisir une URL", "Recherche Google"),
|
||||
("shot_0014_full.png", "Gmail", "Lien Gmail"),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestBenchmarkBaseline:
|
||||
"""Benchmark de l'approche baseline (qwen2.5vl direct)."""
|
||||
|
||||
@pytest.mark.parametrize("shot,desc,name", _TARGETS)
|
||||
def test_baseline_robustesse(self, shot, desc, name):
|
||||
screenshot = _load_screenshot(shot)
|
||||
stats = run_benchmark(grounding_baseline, "baseline", screenshot, desc, N_ITERATIONS)
|
||||
|
||||
print(f"\n [{stats['approach']}] {name}:")
|
||||
print(f" Détection: {stats['detection_rate']*100:.0f}% ({int(stats['detection_rate']*N_ITERATIONS)}/{N_ITERATIONS})")
|
||||
print(f" Temps moyen: {stats['avg_time_ms']:.0f}ms")
|
||||
if stats.get("x_mean") is not None:
|
||||
print(f" Position: ({stats['x_mean']:.3f}, {stats['y_mean']:.3f})")
|
||||
print(f" Variance: X={stats['x_variance']:.4f} Y={stats['y_variance']:.4f}")
|
||||
print(f" Stable: {'OUI' if stats['stable'] else 'NON'}")
|
||||
|
||||
assert stats["detection_rate"] >= 0.6, f"{name}: détection trop faible ({stats['detection_rate']})"
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestBenchmarkZoom:
|
||||
"""Benchmark de l'approche zoom progressif."""
|
||||
|
||||
@pytest.mark.parametrize("shot,desc,name", _TARGETS)
|
||||
def test_zoom_robustesse(self, shot, desc, name):
|
||||
screenshot = _load_screenshot(shot)
|
||||
stats = run_benchmark(grounding_zoom, "zoom", screenshot, desc, N_ITERATIONS)
|
||||
|
||||
print(f"\n [{stats['approach']}] {name}:")
|
||||
print(f" Détection: {stats['detection_rate']*100:.0f}% ({int(stats['detection_rate']*N_ITERATIONS)}/{N_ITERATIONS})")
|
||||
print(f" Temps moyen: {stats['avg_time_ms']:.0f}ms")
|
||||
if stats.get("x_mean") is not None:
|
||||
print(f" Position: ({stats['x_mean']:.3f}, {stats['y_mean']:.3f})")
|
||||
print(f" Variance: X={stats['x_variance']:.4f} Y={stats['y_variance']:.4f}")
|
||||
print(f" Stable: {'OUI' if stats['stable'] else 'NON'}")
|
||||
|
||||
assert stats["detection_rate"] >= 0.6, f"{name}: détection trop faible ({stats['detection_rate']})"
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestBenchmarkCitrix:
|
||||
"""Benchmark baseline sur images dégradées (simulation Citrix JPEG Q20)."""
|
||||
|
||||
def _degrade_citrix(self, screenshot_b64: str) -> str:
|
||||
"""Simuler compression Citrix (JPEG qualité 20)."""
|
||||
from PIL import Image
|
||||
img_bytes = base64.b64decode(screenshot_b64)
|
||||
img = Image.open(io.BytesIO(img_bytes))
|
||||
buf = io.BytesIO()
|
||||
img.save(buf, "JPEG", quality=20)
|
||||
return base64.b64encode(buf.getvalue()).decode()
|
||||
|
||||
@pytest.mark.parametrize("shot,desc,name", _TARGETS)
|
||||
def test_citrix_robustesse(self, shot, desc, name):
|
||||
screenshot = _load_screenshot(shot)
|
||||
citrix = self._degrade_citrix(screenshot)
|
||||
stats = run_benchmark(grounding_baseline, "citrix_q20", citrix, desc, N_ITERATIONS)
|
||||
|
||||
print(f"\n [{stats['approach']}] {name}:")
|
||||
print(f" Détection: {stats['detection_rate']*100:.0f}%")
|
||||
print(f" Temps moyen: {stats['avg_time_ms']:.0f}ms")
|
||||
if stats.get("x_mean") is not None:
|
||||
print(f" Position: ({stats['x_mean']:.3f}, {stats['y_mean']:.3f})")
|
||||
print(f" Variance: X={stats['x_variance']:.4f} Y={stats['y_variance']:.4f}")
|
||||
print(f" Stable: {'OUI' if stats['stable'] else 'NON'}")
|
||||
|
||||
# Citrix peut être moins fiable — seuil plus bas
|
||||
assert stats["detection_rate"] >= 0.4, f"{name} Citrix: détection trop faible ({stats['detection_rate']})"
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestRapportComparatif:
|
||||
"""Génère un rapport comparatif des 3 approches."""
|
||||
|
||||
def test_rapport_complet(self):
|
||||
"""Exécuter les 3 approches sur toutes les cibles et comparer."""
|
||||
from PIL import Image
|
||||
|
||||
all_results = []
|
||||
|
||||
for shot, desc, name in _TARGETS:
|
||||
screenshot = _load_screenshot(shot)
|
||||
|
||||
# Citrix
|
||||
img_bytes = base64.b64decode(screenshot)
|
||||
img = Image.open(io.BytesIO(img_bytes))
|
||||
buf = io.BytesIO()
|
||||
img.save(buf, "JPEG", quality=20)
|
||||
citrix = base64.b64encode(buf.getvalue()).decode()
|
||||
|
||||
for approach_fn, approach_name, img_b64 in [
|
||||
(grounding_baseline, "baseline", screenshot),
|
||||
(grounding_zoom, "zoom", screenshot),
|
||||
(grounding_baseline, "citrix_q20", citrix),
|
||||
]:
|
||||
stats = run_benchmark(approach_fn, approach_name, img_b64, desc, 3)
|
||||
stats["target_name"] = name
|
||||
all_results.append(stats)
|
||||
|
||||
# Rapport
|
||||
print("\n" + "=" * 80)
|
||||
print("RAPPORT COMPARATIF — GROUNDING BENCHMARK")
|
||||
print("=" * 80)
|
||||
print(f"{'Cible':<25s} {'Approche':<12s} {'Détect.':<8s} {'Temps':<8s} {'Position':<20s} {'Var X':<8s} {'Var Y':<8s} {'Stable'}")
|
||||
print("-" * 80)
|
||||
for r in all_results:
|
||||
pos = f"({r.get('x_mean',0):.3f}, {r.get('y_mean',0):.3f})" if r.get('x_mean') is not None else "N/A"
|
||||
var_x = f"{r.get('x_variance',0):.4f}" if r.get('x_variance') is not None else "N/A"
|
||||
var_y = f"{r.get('y_variance',0):.4f}" if r.get('y_variance') is not None else "N/A"
|
||||
stable = "OUI" if r.get('stable') else "NON"
|
||||
print(f"{r['target_name']:<25s} {r['approach']:<12s} {r['detection_rate']*100:5.0f}% {r['avg_time_ms']:5.0f}ms {pos:<20s} {var_x:<8s} {var_y:<8s} {stable}")
|
||||
print("=" * 80)
|
||||
445
tests/visual/test_visual_grounding.py
Normal file
445
tests/visual/test_visual_grounding.py
Normal file
@@ -0,0 +1,445 @@
|
||||
"""
|
||||
Tests visuels sur captures d'écran réelles — Grounding benchmark.
|
||||
|
||||
Vérifie que le système trouve les bons éléments UI sur des screenshots
|
||||
Windows réels. Pas besoin de VM — juste les images et le serveur.
|
||||
|
||||
Chaque test :
|
||||
1. Charge un screenshot réel (sessions enregistrées)
|
||||
2. Demande au serveur de localiser un élément (via /resolve_target)
|
||||
3. Vérifie que les coordonnées retournées sont dans la zone attendue
|
||||
|
||||
C'est l'apprentissage de l'environnement Windows :
|
||||
- Rechercher un programme
|
||||
- Fermer/réduire/agrandir une fenêtre
|
||||
- Naviguer dans les onglets
|
||||
- Utiliser les menus
|
||||
"""
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
# Répertoire des screenshots de test
|
||||
_SHOTS_DIR = Path(_ROOT) / "data/training/live_sessions/DESKTOP-ST3VBSD_windows/sess_20260404T135010_cec5c8/shots"
|
||||
|
||||
# Résolution des screenshots
|
||||
_SCREEN_W = 1280
|
||||
_SCREEN_H = 800
|
||||
|
||||
|
||||
def _load_screenshot(name: str) -> Optional[str]:
|
||||
"""Charger un screenshot en base64."""
|
||||
path = _SHOTS_DIR / name
|
||||
if not path.is_file():
|
||||
pytest.skip(f"Screenshot {name} non disponible")
|
||||
return base64.b64encode(path.read_bytes()).decode()
|
||||
|
||||
|
||||
def _in_zone(x_pct: float, y_pct: float, zone: dict) -> bool:
|
||||
"""Vérifier si un point est dans une zone attendue.
|
||||
|
||||
zone = {"x_min": 0.3, "x_max": 0.5, "y_min": 0.9, "y_max": 1.0}
|
||||
"""
|
||||
return (
|
||||
zone["x_min"] <= x_pct <= zone["x_max"]
|
||||
and zone["y_min"] <= y_pct <= zone["y_max"]
|
||||
)
|
||||
|
||||
|
||||
def _resolve_via_server(
|
||||
screenshot_b64: str,
|
||||
target_spec: dict,
|
||||
strict: bool = True,
|
||||
) -> Optional[dict]:
|
||||
"""Résoudre une cible visuellement via le VLM (qwen2.5vl grounding direct).
|
||||
|
||||
Appelle qwen2.5vl directement pour le grounding (bbox_2d).
|
||||
Si le VLM ne trouve pas, essaie aussi via l'endpoint serveur.
|
||||
"""
|
||||
import requests
|
||||
import re
|
||||
|
||||
# ── Stratégie 1 : Grounding VLM direct (qwen2.5vl) ──
|
||||
by_text = target_spec.get("by_text", "")
|
||||
vlm_desc = target_spec.get("vlm_description", "")
|
||||
search_text = by_text or vlm_desc
|
||||
|
||||
if search_text:
|
||||
try:
|
||||
prompt = f"Detect the element '{search_text}' with a bounding box."
|
||||
resp = requests.post(
|
||||
"http://localhost:11434/api/chat",
|
||||
json={
|
||||
"model": "qwen2.5vl:7b",
|
||||
"messages": [{"role": "user", "content": prompt, "images": [screenshot_b64]}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.0, "num_predict": 100},
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
if resp.ok:
|
||||
content = resp.json().get("message", {}).get("content", "")
|
||||
# Parser bbox_2d — qwen2.5vl retourne des pixels relatifs à l'image,
|
||||
# PAS une grille 1000x1000.
|
||||
bbox_match = re.search(
|
||||
r'"bbox_2d"\s*:\s*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\]',
|
||||
content,
|
||||
)
|
||||
if bbox_match:
|
||||
x1, y1, x2, y2 = [int(bbox_match.group(i)) for i in range(1, 5)]
|
||||
# Normaliser par les dimensions de l'image (pixels → 0-1)
|
||||
cx = (x1 + x2) / 2 / _SCREEN_W
|
||||
cy = (y1 + y2) / 2 / _SCREEN_H
|
||||
if 0.0 <= cx <= 1.0 and 0.0 <= cy <= 1.0:
|
||||
return {
|
||||
"resolved": True,
|
||||
"method": "vlm_grounding",
|
||||
"x_pct": cx,
|
||||
"y_pct": cy,
|
||||
"score": 0.8,
|
||||
"raw_bbox": [x1, y1, x2, y2],
|
||||
}
|
||||
except requests.Timeout:
|
||||
pytest.skip("qwen2.5vl timeout — premier chargement ?")
|
||||
except requests.ConnectionError:
|
||||
pytest.skip("Ollama non disponible (localhost:11434)")
|
||||
|
||||
# ── Stratégie 2 : Endpoint serveur (fallback) ──
|
||||
token = os.environ.get("RPA_API_TOKEN", "")
|
||||
if not token:
|
||||
env_file = Path(_ROOT) / ".env.local"
|
||||
if env_file.is_file():
|
||||
for line in env_file.read_text().splitlines():
|
||||
if line.startswith("RPA_API_TOKEN="):
|
||||
token = line.split("=", 1)[1].strip()
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
|
||||
try:
|
||||
resp = requests.post(
|
||||
"http://localhost:5005/api/v1/traces/stream/replay/resolve_target",
|
||||
json={
|
||||
"session_id": "visual_test",
|
||||
"screenshot_b64": screenshot_b64,
|
||||
"target_spec": target_spec,
|
||||
"screen_width": _SCREEN_W,
|
||||
"screen_height": _SCREEN_H,
|
||||
"fallback_x_pct": 0.5,
|
||||
"fallback_y_pct": 0.5,
|
||||
"strict_mode": strict,
|
||||
},
|
||||
headers=headers,
|
||||
timeout=30,
|
||||
)
|
||||
if resp.ok:
|
||||
data = resp.json()
|
||||
if data.get("resolved"):
|
||||
return data
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _assert_found_in_zone(result: dict, zone: dict, element_name: str):
|
||||
"""Vérifier qu'un élément a été trouvé dans la zone attendue."""
|
||||
assert result is not None, f"{element_name}: pas de réponse du serveur"
|
||||
assert result.get("resolved"), (
|
||||
f"{element_name}: non trouvé (reason={result.get('reason', '?')})"
|
||||
)
|
||||
x = result.get("x_pct", 0)
|
||||
y = result.get("y_pct", 0)
|
||||
assert _in_zone(x, y, zone), (
|
||||
f"{element_name}: trouvé à ({x:.3f}, {y:.3f}) "
|
||||
f"mais attendu dans zone x=[{zone['x_min']:.2f}-{zone['x_max']:.2f}] "
|
||||
f"y=[{zone['y_min']:.2f}-{zone['y_max']:.2f}]"
|
||||
)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# shot_0001 : Explorateur de fichiers Windows
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestExplorateurFichiers:
|
||||
"""Tests sur l'Explorateur de fichiers Windows (shot_0001)."""
|
||||
|
||||
@pytest.fixture
|
||||
def screenshot(self):
|
||||
return _load_screenshot("shot_0001_full.png")
|
||||
|
||||
def test_trouver_rechercher_taskbar(self, screenshot):
|
||||
"""Trouver 'Rechercher' dans la barre des tâches."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Rechercher",
|
||||
"vlm_description": "La barre de recherche Windows dans la barre des tâches, en bas de l'écran",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.20, "x_max": 0.50,
|
||||
"y_min": 0.90, "y_max": 1.00,
|
||||
}, "Rechercher (taskbar)")
|
||||
|
||||
def test_trouver_bouton_fermer_explorateur(self, screenshot):
|
||||
"""Trouver le bouton X (fermer) de l'Explorateur."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton fermer (X) de la fenêtre Explorateur de fichiers, en haut à droite",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.90, "x_max": 1.00,
|
||||
"y_min": 0.00, "y_max": 0.05,
|
||||
}, "Bouton fermer (X)")
|
||||
|
||||
def test_trouver_bouton_reduire(self, screenshot):
|
||||
"""Trouver le bouton réduire (-) de l'Explorateur."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton réduire (minimize, -) de la fenêtre, en haut à droite à gauche du X",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.85, "x_max": 0.95,
|
||||
"y_min": 0.00, "y_max": 0.05,
|
||||
}, "Bouton réduire (-)")
|
||||
|
||||
def test_trouver_dossier_agent_v1(self, screenshot):
|
||||
"""Trouver le dossier 'agent_v1' dans la liste des fichiers."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "agent_v1",
|
||||
"vlm_description": "Le dossier agent_v1 dans la liste des fichiers de l'Explorateur",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.05, "x_max": 0.50,
|
||||
"y_min": 0.10, "y_max": 0.30,
|
||||
}, "Dossier agent_v1")
|
||||
|
||||
def test_trouver_bouton_demarrer(self, screenshot):
|
||||
"""Trouver le bouton Démarrer (Windows) dans la barre des tâches."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton Démarrer (logo Windows) dans la barre des tâches, en bas",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.18, "x_max": 0.30,
|
||||
"y_min": 0.90, "y_max": 1.00,
|
||||
}, "Bouton Démarrer")
|
||||
|
||||
def test_trouver_ce_pc(self, screenshot):
|
||||
"""Trouver 'Ce PC' dans le panneau latéral de l'Explorateur."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Ce PC",
|
||||
"vlm_description": "L'élément 'Ce PC' dans le panneau de navigation gauche de l'Explorateur",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.00, "x_max": 0.12,
|
||||
"y_min": 0.40, "y_max": 0.55,
|
||||
}, "Ce PC")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# shot_0004 : Bloc-notes avec onglets + Explorateur derrière
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestBlocNotesOnglets:
|
||||
"""Tests sur le Bloc-notes avec plusieurs onglets (shot_0004)."""
|
||||
|
||||
@pytest.fixture
|
||||
def screenshot(self):
|
||||
return _load_screenshot("shot_0004_full.png")
|
||||
|
||||
def test_trouver_menu_fichier(self, screenshot):
|
||||
"""Trouver le menu 'Fichier' du Bloc-notes."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Fichier",
|
||||
"vlm_description": "Le menu Fichier dans la barre de menus du Bloc-notes",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.02, "x_max": 0.10,
|
||||
"y_min": 0.08, "y_max": 0.15,
|
||||
}, "Menu Fichier")
|
||||
|
||||
def test_trouver_onglet_ceci_est_un_test(self, screenshot):
|
||||
"""Trouver l'onglet 'Ceci est un test.txt' dans le Bloc-notes."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Ceci est un test",
|
||||
"vlm_description": "L'onglet 'Ceci est un test.txt' dans le Bloc-notes",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.40, "x_max": 0.70,
|
||||
"y_min": 0.03, "y_max": 0.10,
|
||||
}, "Onglet 'Ceci est un test.txt'")
|
||||
|
||||
def test_trouver_nouvel_onglet_plus(self, screenshot):
|
||||
"""Trouver le bouton '+' pour ajouter un nouvel onglet."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton + (plus) pour ajouter un nouvel onglet dans le Bloc-notes, à droite des onglets",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.55, "x_max": 0.70,
|
||||
"y_min": 0.03, "y_max": 0.10,
|
||||
}, "Bouton + (nouvel onglet)")
|
||||
|
||||
def test_trouver_bouton_fermer_onglet(self, screenshot):
|
||||
"""Trouver le X de fermeture de l'onglet actif."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton X pour fermer l'onglet actif 'Ceci est un test.txt' dans le Bloc-notes",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.50, "x_max": 0.65,
|
||||
"y_min": 0.03, "y_max": 0.10,
|
||||
}, "Fermer onglet (X)")
|
||||
|
||||
def test_trouver_menu_modifier(self, screenshot):
|
||||
"""Trouver le menu 'Modifier' du Bloc-notes."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Modifier",
|
||||
"vlm_description": "Le menu Modifier dans la barre de menus du Bloc-notes",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.07, "x_max": 0.16,
|
||||
"y_min": 0.08, "y_max": 0.15,
|
||||
}, "Menu Modifier")
|
||||
|
||||
def test_trouver_encodage_utf8(self, screenshot):
|
||||
"""Trouver l'indicateur d'encodage UTF-8 dans la barre de statut."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "UTF-8",
|
||||
"vlm_description": "L'indicateur d'encodage UTF-8 dans la barre de statut en bas du Bloc-notes",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.60, "x_max": 0.80,
|
||||
"y_min": 0.90, "y_max": 1.00,
|
||||
}, "UTF-8 (barre de statut)")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# shot_0014 : Google Chrome page d'accueil
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestGoogleChrome:
|
||||
"""Tests sur Google Chrome avec page d'accueil (shot_0014)."""
|
||||
|
||||
@pytest.fixture
|
||||
def screenshot(self):
|
||||
return _load_screenshot("shot_0014_full.png")
|
||||
|
||||
def test_trouver_barre_recherche_google(self, screenshot):
|
||||
"""Trouver la barre de recherche Google au centre."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Rechercher sur Google",
|
||||
"vlm_description": "La barre de recherche Google au centre de la page d'accueil",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.10, "x_max": 0.60,
|
||||
"y_min": 0.30, "y_max": 0.50,
|
||||
}, "Barre recherche Google")
|
||||
|
||||
def test_trouver_barre_adresse_chrome(self, screenshot):
|
||||
"""Trouver la barre d'adresse de Chrome en haut."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "La barre d'adresse URL de Google Chrome, en haut du navigateur",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.10, "x_max": 0.60,
|
||||
"y_min": 0.05, "y_max": 0.15,
|
||||
}, "Barre d'adresse Chrome")
|
||||
|
||||
def test_trouver_nouvel_onglet_chrome(self, screenshot):
|
||||
"""Trouver le bouton '+' pour un nouvel onglet Chrome."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton + pour ouvrir un nouvel onglet dans Google Chrome",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.15, "x_max": 0.25,
|
||||
"y_min": 0.00, "y_max": 0.06,
|
||||
}, "Nouvel onglet (+) Chrome")
|
||||
|
||||
def test_trouver_fermer_chrome(self, screenshot):
|
||||
"""Trouver le bouton X pour fermer Chrome."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton fermer (X) de la fenêtre Google Chrome, en haut à droite",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.90, "x_max": 1.00,
|
||||
"y_min": 0.00, "y_max": 0.06,
|
||||
}, "Fermer Chrome (X)")
|
||||
|
||||
def test_trouver_gmail(self, screenshot):
|
||||
"""Trouver le lien Gmail sur la page d'accueil Google."""
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Gmail",
|
||||
"vlm_description": "Le lien Gmail en haut à droite de la page Google",
|
||||
})
|
||||
_assert_found_in_zone(result, {
|
||||
"x_min": 0.50, "x_max": 0.80,
|
||||
"y_min": 0.10, "y_max": 0.20,
|
||||
}, "Gmail")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests transversaux (connaissances de base Windows)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestConnaissancesWindowsBase:
|
||||
"""Connaissances de base Windows que tout utilisateur connaît."""
|
||||
|
||||
def test_rechercher_programme_depuis_explorateur(self):
|
||||
"""Depuis l'Explorateur, trouver la barre de recherche Windows."""
|
||||
screenshot = _load_screenshot("shot_0001_full.png")
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Rechercher",
|
||||
"vlm_description": "La barre de recherche dans la barre des tâches Windows en bas de l'écran",
|
||||
})
|
||||
assert result and result.get("resolved"), "Rechercher non trouvé"
|
||||
|
||||
def test_fermer_programme_depuis_blocnotes(self):
|
||||
"""Depuis le Bloc-notes, trouver le bouton fermer."""
|
||||
screenshot = _load_screenshot("shot_0004_full.png")
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton X pour fermer la fenêtre du Bloc-notes, en haut à droite",
|
||||
})
|
||||
assert result and result.get("resolved"), "Bouton fermer non trouvé"
|
||||
|
||||
def test_ajouter_onglet_blocnotes(self):
|
||||
"""Ajouter un nouvel onglet dans le Bloc-notes."""
|
||||
screenshot = _load_screenshot("shot_0004_full.png")
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "",
|
||||
"vlm_description": "Le bouton + pour ajouter un nouvel onglet dans le Bloc-notes",
|
||||
})
|
||||
assert result and result.get("resolved"), "Bouton + non trouvé"
|
||||
|
||||
def test_rechercher_sur_google(self):
|
||||
"""Taper dans la barre de recherche Google."""
|
||||
screenshot = _load_screenshot("shot_0014_full.png")
|
||||
result = _resolve_via_server(screenshot, {
|
||||
"by_text": "Rechercher sur Google",
|
||||
"vlm_description": "Le champ de recherche Google",
|
||||
})
|
||||
assert result and result.get("resolved"), "Recherche Google non trouvée"
|
||||
864
tests/visual/test_visual_robustness.py
Normal file
864
tests/visual/test_visual_robustness.py
Normal file
@@ -0,0 +1,864 @@
|
||||
"""
|
||||
Tests de robustesse visuelle — Grounding VLM qwen2.5vl:7b.
|
||||
|
||||
Objectifs :
|
||||
1. Reproductibilité : même screenshot + même cible → même résultat 10 fois
|
||||
2. Robustesse Citrix : screenshots compressés JPEG qualité 15-25 → ça marche
|
||||
3. Mesure de variance : coordonnées stables à < 5% de l'écran
|
||||
|
||||
Architecture des coordonnées qwen2.5vl :
|
||||
- Format bbox_2d : [x1, y1, x2, y2] en pixels relatifs à l'image envoyée
|
||||
- Pour une image 1280x800, X va de 0 à 1280 et Y de 0 à 800
|
||||
- Normalisation : diviser par les dimensions de l'image (pas par 1000)
|
||||
|
||||
Calibration mesurée (5 avril 2026) sur screenshots 1280x800 :
|
||||
- shot_0001/Rechercher (taskbar) : cx=0.458, cy=0.789
|
||||
- shot_0001/agent_v1 (dossier) : cx=0.247, cy=0.201
|
||||
- shot_0004/Fichier (menu) : cx=0.095, cy=0.086
|
||||
- shot_0004/Modifier (menu) : cx=0.142, cy=0.085
|
||||
- shot_0004/Ceci est un test.txt (onglet): cx=0.694, cy=0.053
|
||||
- shot_0004/Close X (Bloc-notes) : cx=0.990, cy=0.041
|
||||
- shot_0014/Google search (centre) : cx=0.539, cy=0.389
|
||||
- shot_0014/Gmail (haut-droite) : cx=0.913, cy=0.130
|
||||
"""
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import re
|
||||
import statistics
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
# Répertoire des screenshots de test
|
||||
_SHOTS_DIR = (
|
||||
Path(_ROOT)
|
||||
/ "data/training/live_sessions/DESKTOP-ST3VBSD_windows"
|
||||
/ "sess_20260404T135010_cec5c8/shots"
|
||||
)
|
||||
|
||||
# Résolution des screenshots
|
||||
_SCREEN_W = 1280
|
||||
_SCREEN_H = 800
|
||||
|
||||
# Nombre de répétitions pour les tests de reproductibilité
|
||||
_N_REPEATS = 10
|
||||
|
||||
# Tolérance de variance maximale (en fraction de l'écran, 0.05 = 5%)
|
||||
_MAX_VARIANCE = 0.05
|
||||
|
||||
# Taux de détection minimal (X sur _N_REPEATS)
|
||||
_MIN_DETECTION_RATE = 8
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Utilitaires
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _load_screenshot(name: str) -> Optional[str]:
|
||||
"""Charger un screenshot en base64."""
|
||||
path = _SHOTS_DIR / name
|
||||
if not path.is_file():
|
||||
pytest.skip(f"Screenshot {name} non disponible")
|
||||
return base64.b64encode(path.read_bytes()).decode()
|
||||
|
||||
|
||||
def _degrade_citrix(screenshot_b64: str, quality: int = 20) -> str:
|
||||
"""Simuler compression Citrix : JPEG qualité basse puis retour PNG b64."""
|
||||
from PIL import Image
|
||||
|
||||
raw = base64.b64decode(screenshot_b64)
|
||||
img = Image.open(io.BytesIO(raw))
|
||||
|
||||
# Compression JPEG qualité basse (simulation Citrix)
|
||||
buf_jpg = io.BytesIO()
|
||||
img.save(buf_jpg, "JPEG", quality=quality)
|
||||
buf_jpg.seek(0)
|
||||
citrix_img = Image.open(buf_jpg)
|
||||
|
||||
# Re-encoder en PNG pour l'envoi au VLM
|
||||
buf_png = io.BytesIO()
|
||||
citrix_img.save(buf_png, "PNG")
|
||||
return base64.b64encode(buf_png.getvalue()).decode()
|
||||
|
||||
|
||||
def _grounding_vlm(
|
||||
screenshot_b64: str,
|
||||
element_description: str,
|
||||
timeout: int = 60,
|
||||
) -> Tuple[Optional[float], Optional[float], Optional[List[int]], str]:
|
||||
"""Appeler qwen2.5vl pour localiser un élément.
|
||||
|
||||
Retourne (cx, cy, [x1,y1,x2,y2], raw_content).
|
||||
cx et cy sont les centres normalisés sur la grille 1000.
|
||||
"""
|
||||
import requests
|
||||
|
||||
try:
|
||||
resp = requests.post(
|
||||
"http://localhost:11434/api/chat",
|
||||
json={
|
||||
"model": "qwen2.5vl:7b",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f"Detect the element '{element_description}' "
|
||||
f"with a bounding box."
|
||||
),
|
||||
"images": [screenshot_b64],
|
||||
}
|
||||
],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1, "num_predict": 100},
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
except requests.ConnectionError:
|
||||
pytest.skip("Ollama non disponible (localhost:11434)")
|
||||
except requests.Timeout:
|
||||
pytest.skip("qwen2.5vl timeout — modèle en cours de chargement ?")
|
||||
|
||||
content = resp.json().get("message", {}).get("content", "")
|
||||
|
||||
# Parser bbox_2d depuis la réponse JSON
|
||||
# qwen2.5vl retourne des coordonnées en pixels relatifs à l'image envoyée,
|
||||
# PAS sur une grille 1000x1000.
|
||||
bbox_match = re.search(
|
||||
r'"bbox_2d"\s*:\s*\[(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\]',
|
||||
content,
|
||||
)
|
||||
if bbox_match:
|
||||
x1, y1, x2, y2 = [int(bbox_match.group(i)) for i in range(1, 5)]
|
||||
# Normaliser par les dimensions de l'image (pixels → 0-1)
|
||||
cx = (x1 + x2) / 2 / _SCREEN_W
|
||||
cy = (y1 + y2) / 2 / _SCREEN_H
|
||||
return cx, cy, [x1, y1, x2, y2], content
|
||||
|
||||
return None, None, None, content
|
||||
|
||||
|
||||
def _run_n_times(
|
||||
screenshot_b64: str,
|
||||
description: str,
|
||||
n: int = _N_REPEATS,
|
||||
delay: float = 0.2,
|
||||
) -> List[Dict]:
|
||||
"""Exécuter le grounding N fois et collecter les résultats."""
|
||||
results = []
|
||||
for i in range(n):
|
||||
cx, cy, bbox, raw = _grounding_vlm(screenshot_b64, description)
|
||||
results.append({
|
||||
"run": i + 1,
|
||||
"cx": cx,
|
||||
"cy": cy,
|
||||
"bbox": bbox,
|
||||
"detected": cx is not None,
|
||||
"raw": raw,
|
||||
})
|
||||
if i < n - 1:
|
||||
time.sleep(delay)
|
||||
return results
|
||||
|
||||
|
||||
def _compute_stats(results: List[Dict]) -> Dict:
|
||||
"""Calculer les statistiques de détection et de variance."""
|
||||
detected = [r for r in results if r["detected"]]
|
||||
n_total = len(results)
|
||||
n_detected = len(detected)
|
||||
|
||||
stats = {
|
||||
"total": n_total,
|
||||
"detected": n_detected,
|
||||
"rate": n_detected / n_total if n_total > 0 else 0,
|
||||
"rate_str": f"{n_detected}/{n_total}",
|
||||
}
|
||||
|
||||
if n_detected >= 2:
|
||||
xs = [r["cx"] for r in detected]
|
||||
ys = [r["cy"] for r in detected]
|
||||
stats.update({
|
||||
"x_min": min(xs),
|
||||
"x_max": max(xs),
|
||||
"x_mean": statistics.mean(xs),
|
||||
"x_range": max(xs) - min(xs),
|
||||
"x_stdev": statistics.stdev(xs) if n_detected >= 2 else 0,
|
||||
"y_min": min(ys),
|
||||
"y_max": max(ys),
|
||||
"y_mean": statistics.mean(ys),
|
||||
"y_range": max(ys) - min(ys),
|
||||
"y_stdev": statistics.stdev(ys) if n_detected >= 2 else 0,
|
||||
})
|
||||
elif n_detected == 1:
|
||||
stats.update({
|
||||
"x_min": detected[0]["cx"],
|
||||
"x_max": detected[0]["cx"],
|
||||
"x_mean": detected[0]["cx"],
|
||||
"x_range": 0,
|
||||
"x_stdev": 0,
|
||||
"y_min": detected[0]["cy"],
|
||||
"y_max": detected[0]["cy"],
|
||||
"y_mean": detected[0]["cy"],
|
||||
"y_range": 0,
|
||||
"y_stdev": 0,
|
||||
})
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def _assert_reproducible(
|
||||
stats: Dict,
|
||||
element_name: str,
|
||||
min_rate: int = _MIN_DETECTION_RATE,
|
||||
max_var: float = _MAX_VARIANCE,
|
||||
):
|
||||
"""Vérifier la reproductibilité : taux de détection + variance faible."""
|
||||
assert stats["detected"] >= min_rate, (
|
||||
f"{element_name}: seulement {stats['rate_str']} détections "
|
||||
f"(minimum requis: {min_rate}/{stats['total']})"
|
||||
)
|
||||
|
||||
if stats["detected"] >= 2:
|
||||
assert stats["x_range"] < max_var, (
|
||||
f"{element_name}: variance X trop élevée: "
|
||||
f"{stats['x_range']:.4f} (max={max_var})"
|
||||
)
|
||||
assert stats["y_range"] < max_var, (
|
||||
f"{element_name}: variance Y trop élevée: "
|
||||
f"{stats['y_range']:.4f} (max={max_var})"
|
||||
)
|
||||
|
||||
|
||||
def _assert_in_zone(
|
||||
stats: Dict,
|
||||
zone: Dict[str, float],
|
||||
element_name: str,
|
||||
):
|
||||
"""Vérifier que la position moyenne est dans la zone attendue."""
|
||||
assert stats["detected"] >= 1, f"{element_name}: aucune détection"
|
||||
cx = stats["x_mean"]
|
||||
cy = stats["y_mean"]
|
||||
assert zone["x_min"] <= cx <= zone["x_max"], (
|
||||
f"{element_name}: X moyen {cx:.4f} hors zone "
|
||||
f"[{zone['x_min']:.2f}-{zone['x_max']:.2f}]"
|
||||
)
|
||||
assert zone["y_min"] <= cy <= zone["y_max"], (
|
||||
f"{element_name}: Y moyen {cy:.4f} hors zone "
|
||||
f"[{zone['y_min']:.2f}-{zone['y_max']:.2f}]"
|
||||
)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Zones calibrées (mesurées le 5 avril 2026)
|
||||
# =========================================================================
|
||||
|
||||
CALIBRATED_ZONES = {
|
||||
# shot_0001 — Explorateur de fichiers Windows
|
||||
"rechercher_taskbar": {
|
||||
"x_min": 0.40, "x_max": 0.60,
|
||||
"y_min": 0.74, "y_max": 0.84,
|
||||
},
|
||||
"agent_v1_folder": {
|
||||
"x_min": 0.18, "x_max": 0.30,
|
||||
"y_min": 0.16, "y_max": 0.26,
|
||||
},
|
||||
# shot_0004 — Bloc-notes avec onglets
|
||||
"fichier_menu": {
|
||||
"x_min": 0.06, "x_max": 0.13,
|
||||
"y_min": 0.06, "y_max": 0.12,
|
||||
},
|
||||
"modifier_menu": {
|
||||
"x_min": 0.11, "x_max": 0.18,
|
||||
"y_min": 0.06, "y_max": 0.12,
|
||||
},
|
||||
"ceci_est_un_test_tab": {
|
||||
"x_min": 0.65, "x_max": 0.75,
|
||||
"y_min": 0.03, "y_max": 0.08,
|
||||
},
|
||||
"close_x_notepad": {
|
||||
"x_min": 0.95, "x_max": 1.02,
|
||||
"y_min": 0.02, "y_max": 0.06,
|
||||
},
|
||||
# shot_0014 — Google Chrome
|
||||
"google_search_bar": {
|
||||
"x_min": 0.48, "x_max": 0.60,
|
||||
"y_min": 0.35, "y_max": 0.43,
|
||||
},
|
||||
"gmail_link": {
|
||||
"x_min": 0.87, "x_max": 0.95,
|
||||
"y_min": 0.10, "y_max": 0.16,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests de reproductibilité — 10 appels consécutifs
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestReproductibilite:
|
||||
"""Chaque test appelle le VLM 10 fois et vérifie la cohérence.
|
||||
|
||||
Critères de réussite :
|
||||
- Au moins 8/10 détections
|
||||
- Variance des coordonnées < 5% de l'écran sur chaque axe
|
||||
- Position moyenne dans la zone calibrée
|
||||
"""
|
||||
|
||||
# -- shot_0001 : Explorateur de fichiers --
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def shot_0001(self):
|
||||
return _load_screenshot("shot_0001_full.png")
|
||||
|
||||
def test_rechercher_10_fois(self, shot_0001):
|
||||
"""Le VLM trouve 'Rechercher' au même endroit 10 fois de suite."""
|
||||
results = _run_n_times(
|
||||
shot_0001,
|
||||
"the 'Rechercher' search text in the Windows taskbar at the bottom",
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
_assert_reproducible(stats, "Rechercher (taskbar)")
|
||||
_assert_in_zone(stats, CALIBRATED_ZONES["rechercher_taskbar"], "Rechercher")
|
||||
# Afficher le résumé pour le rapport
|
||||
print(f"\n [Rechercher] {stats['rate_str']} détections, "
|
||||
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||
|
||||
def test_agent_v1_10_fois(self, shot_0001):
|
||||
"""Le VLM trouve le dossier 'agent_v1' au même endroit 10 fois."""
|
||||
results = _run_n_times(
|
||||
shot_0001,
|
||||
"the folder named 'agent_v1' in the file list",
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
_assert_reproducible(stats, "agent_v1 (dossier)")
|
||||
_assert_in_zone(stats, CALIBRATED_ZONES["agent_v1_folder"], "agent_v1")
|
||||
print(f"\n [agent_v1] {stats['rate_str']} détections, "
|
||||
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||
|
||||
def test_close_x_explorateur_10_fois(self, shot_0001):
|
||||
"""Le bouton X de la fenêtre maximisée : overflow X attendu.
|
||||
|
||||
Ce test vérifie que le VLM détecte bien le bouton X de façon cohérente.
|
||||
Sur les fenêtres maximisées (1280px de large), les coordonnées X
|
||||
dépassent la grille 1000 normalisée (cx > 1.0).
|
||||
|
||||
Note : le VLM peut parfois confondre le bouton X de la fenêtre avec
|
||||
celui de l'onglet (ambiguïté multiple close buttons). On vérifie
|
||||
que la majorité des détections ciblent le bon bouton.
|
||||
"""
|
||||
results = _run_n_times(
|
||||
shot_0001,
|
||||
"the X close button of the 'Lea' window",
|
||||
)
|
||||
# Vérifier que le VLM détecte bien quelque chose
|
||||
detected = [r for r in results if r["detected"]]
|
||||
assert len(detected) >= _MIN_DETECTION_RATE, (
|
||||
f"Close X: seulement {len(detected)}/{len(results)} détections"
|
||||
)
|
||||
|
||||
# Classer les détections : overflow (bouton fenêtre) vs non-overflow (bouton onglet)
|
||||
overflows = [r for r in detected if r["cx"] > 1.0]
|
||||
non_overflows = [r for r in detected if r["cx"] <= 1.0]
|
||||
|
||||
# Au moins 60% des détections doivent viser le bouton fenêtre (overflow)
|
||||
assert len(overflows) >= len(detected) * 0.6, (
|
||||
f"Close X: seulement {len(overflows)}/{len(detected)} en overflow. "
|
||||
f"Ambiguïté avec bouton onglet ({len(non_overflows)} non-overflow)."
|
||||
)
|
||||
|
||||
# Vérifier la cohérence des détections overflow (le cluster principal)
|
||||
if len(overflows) >= 2:
|
||||
bboxes = [r["bbox"] for r in overflows]
|
||||
x1s = [b[0] for b in bboxes]
|
||||
y1s = [b[1] for b in bboxes]
|
||||
assert max(x1s) - min(x1s) < 20, (
|
||||
f"Close X overflow: x1 trop variable: {min(x1s)}-{max(x1s)}"
|
||||
)
|
||||
assert max(y1s) - min(y1s) < 20, (
|
||||
f"Close X overflow: y1 trop variable: {min(y1s)}-{max(y1s)}"
|
||||
)
|
||||
|
||||
print(f"\n [Close X Explorer] {len(detected)}/{len(results)} détections, "
|
||||
f"{len(overflows)} overflow (fenêtre), {len(non_overflows)} non-overflow (onglet). "
|
||||
f"cx_mean_overflow={statistics.mean([r['cx'] for r in overflows]):.4f}" if overflows else "")
|
||||
|
||||
# -- shot_0004 : Bloc-notes --
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def shot_0004(self):
|
||||
return _load_screenshot("shot_0004_full.png")
|
||||
|
||||
def test_fichier_10_fois(self, shot_0004):
|
||||
"""Le VLM trouve le menu 'Fichier' au même endroit 10 fois."""
|
||||
results = _run_n_times(
|
||||
shot_0004,
|
||||
"the 'Fichier' menu item in the menu bar",
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
_assert_reproducible(stats, "Fichier (menu)")
|
||||
_assert_in_zone(stats, CALIBRATED_ZONES["fichier_menu"], "Fichier")
|
||||
print(f"\n [Fichier] {stats['rate_str']} détections, "
|
||||
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||
|
||||
def test_modifier_10_fois(self, shot_0004):
|
||||
"""Le VLM trouve le menu 'Modifier' au même endroit 10 fois."""
|
||||
results = _run_n_times(
|
||||
shot_0004,
|
||||
"the 'Modifier' menu item in the menu bar",
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
_assert_reproducible(stats, "Modifier (menu)")
|
||||
_assert_in_zone(stats, CALIBRATED_ZONES["modifier_menu"], "Modifier")
|
||||
print(f"\n [Modifier] {stats['rate_str']} détections, "
|
||||
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||
|
||||
def test_ceci_est_un_test_10_fois(self, shot_0004):
|
||||
"""Le VLM trouve l'onglet 'Ceci est un test.txt' au même endroit 10 fois."""
|
||||
results = _run_n_times(
|
||||
shot_0004,
|
||||
"the tab labeled 'Ceci est un test.txt'",
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
_assert_reproducible(stats, "Ceci est un test.txt (onglet)")
|
||||
_assert_in_zone(stats, CALIBRATED_ZONES["ceci_est_un_test_tab"], "Ceci est un test.txt")
|
||||
print(f"\n [Ceci est un test.txt] {stats['rate_str']} détections, "
|
||||
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||
|
||||
# -- shot_0014 : Google Chrome --
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def shot_0014(self):
|
||||
return _load_screenshot("shot_0014_full.png")
|
||||
|
||||
def test_google_search_10_fois(self, shot_0014):
|
||||
"""Le VLM trouve la barre de recherche Google au même endroit 10 fois."""
|
||||
results = _run_n_times(
|
||||
shot_0014,
|
||||
"the Google search bar 'Rechercher sur Google ou saisir une URL'",
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
_assert_reproducible(stats, "Recherche Google")
|
||||
_assert_in_zone(stats, CALIBRATED_ZONES["google_search_bar"], "Recherche Google")
|
||||
print(f"\n [Google search] {stats['rate_str']} détections, "
|
||||
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||
|
||||
def test_gmail_10_fois(self, shot_0014):
|
||||
"""Le VLM trouve le lien Gmail au même endroit 10 fois."""
|
||||
results = _run_n_times(
|
||||
shot_0014,
|
||||
"the 'Gmail' link at the top of the page",
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
_assert_reproducible(stats, "Gmail")
|
||||
_assert_in_zone(stats, CALIBRATED_ZONES["gmail_link"], "Gmail")
|
||||
print(f"\n [Gmail] {stats['rate_str']} détections, "
|
||||
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests de robustesse Citrix — JPEG dégradé
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestCitrixRobustesse:
|
||||
"""Vérifier que le grounding fonctionne sur des images compressées.
|
||||
|
||||
Simule un environnement Citrix/RDP avec compression JPEG qualité 15-25.
|
||||
Compare les résultats original vs dégradé.
|
||||
"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def shots_original(self):
|
||||
return {
|
||||
"shot_0001": _load_screenshot("shot_0001_full.png"),
|
||||
"shot_0004": _load_screenshot("shot_0004_full.png"),
|
||||
"shot_0014": _load_screenshot("shot_0014_full.png"),
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def shots_citrix(self, shots_original):
|
||||
return {
|
||||
name: _degrade_citrix(b64, quality=20)
|
||||
for name, b64 in shots_original.items()
|
||||
}
|
||||
|
||||
def _compare_original_vs_citrix(
|
||||
self,
|
||||
original_b64: str,
|
||||
citrix_b64: str,
|
||||
description: str,
|
||||
element_name: str,
|
||||
zone: Dict,
|
||||
n_runs: int = 5,
|
||||
) -> Dict:
|
||||
"""Comparer les résultats original vs Citrix."""
|
||||
# 5 runs sur l'original
|
||||
results_orig = _run_n_times(original_b64, description, n=n_runs, delay=0.2)
|
||||
stats_orig = _compute_stats(results_orig)
|
||||
|
||||
# 5 runs sur le Citrix
|
||||
results_citrix = _run_n_times(citrix_b64, description, n=n_runs, delay=0.2)
|
||||
stats_citrix = _compute_stats(results_citrix)
|
||||
|
||||
return {
|
||||
"original": stats_orig,
|
||||
"citrix": stats_citrix,
|
||||
}
|
||||
|
||||
def test_rechercher_citrix(self, shots_original, shots_citrix):
|
||||
"""'Rechercher' détecté malgré compression JPEG Q20."""
|
||||
comp = self._compare_original_vs_citrix(
|
||||
shots_original["shot_0001"],
|
||||
shots_citrix["shot_0001"],
|
||||
"the 'Rechercher' search text in the Windows taskbar at the bottom",
|
||||
"Rechercher",
|
||||
CALIBRATED_ZONES["rechercher_taskbar"],
|
||||
)
|
||||
# Au moins 3/5 détections sur Citrix
|
||||
assert comp["citrix"]["detected"] >= 3, (
|
||||
f"Citrix Rechercher: seulement {comp['citrix']['rate_str']} détections"
|
||||
)
|
||||
# Position dans la zone calibrée
|
||||
if comp["citrix"]["detected"] >= 1:
|
||||
_assert_in_zone(comp["citrix"], CALIBRATED_ZONES["rechercher_taskbar"], "Rechercher (Citrix)")
|
||||
print(f"\n [Rechercher Citrix] orig={comp['original']['rate_str']}, "
|
||||
f"citrix={comp['citrix']['rate_str']}")
|
||||
|
||||
def test_fichier_citrix(self, shots_original, shots_citrix):
|
||||
"""Menu 'Fichier' détecté malgré compression JPEG Q20."""
|
||||
comp = self._compare_original_vs_citrix(
|
||||
shots_original["shot_0004"],
|
||||
shots_citrix["shot_0004"],
|
||||
"the 'Fichier' menu item in the menu bar",
|
||||
"Fichier",
|
||||
CALIBRATED_ZONES["fichier_menu"],
|
||||
)
|
||||
assert comp["citrix"]["detected"] >= 3, (
|
||||
f"Citrix Fichier: seulement {comp['citrix']['rate_str']} détections"
|
||||
)
|
||||
if comp["citrix"]["detected"] >= 1:
|
||||
_assert_in_zone(comp["citrix"], CALIBRATED_ZONES["fichier_menu"], "Fichier (Citrix)")
|
||||
print(f"\n [Fichier Citrix] orig={comp['original']['rate_str']}, "
|
||||
f"citrix={comp['citrix']['rate_str']}")
|
||||
|
||||
def test_ceci_est_un_test_citrix(self, shots_original, shots_citrix):
|
||||
"""Onglet 'Ceci est un test.txt' détecté malgré compression JPEG Q20."""
|
||||
comp = self._compare_original_vs_citrix(
|
||||
shots_original["shot_0004"],
|
||||
shots_citrix["shot_0004"],
|
||||
"the tab labeled 'Ceci est un test.txt'",
|
||||
"Ceci est un test.txt",
|
||||
CALIBRATED_ZONES["ceci_est_un_test_tab"],
|
||||
)
|
||||
assert comp["citrix"]["detected"] >= 3, (
|
||||
f"Citrix tab: seulement {comp['citrix']['rate_str']} détections"
|
||||
)
|
||||
if comp["citrix"]["detected"] >= 1:
|
||||
_assert_in_zone(
|
||||
comp["citrix"],
|
||||
CALIBRATED_ZONES["ceci_est_un_test_tab"],
|
||||
"Ceci est un test.txt (Citrix)",
|
||||
)
|
||||
print(f"\n [Ceci est un test.txt Citrix] orig={comp['original']['rate_str']}, "
|
||||
f"citrix={comp['citrix']['rate_str']}")
|
||||
|
||||
def test_google_search_citrix(self, shots_original, shots_citrix):
|
||||
"""Barre de recherche Google détectée malgré compression JPEG Q20."""
|
||||
comp = self._compare_original_vs_citrix(
|
||||
shots_original["shot_0014"],
|
||||
shots_citrix["shot_0014"],
|
||||
"the Google search bar 'Rechercher sur Google ou saisir une URL'",
|
||||
"Recherche Google",
|
||||
CALIBRATED_ZONES["google_search_bar"],
|
||||
)
|
||||
assert comp["citrix"]["detected"] >= 3, (
|
||||
f"Citrix Google: seulement {comp['citrix']['rate_str']} détections"
|
||||
)
|
||||
if comp["citrix"]["detected"] >= 1:
|
||||
_assert_in_zone(
|
||||
comp["citrix"],
|
||||
CALIBRATED_ZONES["google_search_bar"],
|
||||
"Recherche Google (Citrix)",
|
||||
)
|
||||
print(f"\n [Google search Citrix] orig={comp['original']['rate_str']}, "
|
||||
f"citrix={comp['citrix']['rate_str']}")
|
||||
|
||||
def test_gmail_citrix(self, shots_original, shots_citrix):
|
||||
"""Lien Gmail détecté malgré compression JPEG Q20."""
|
||||
comp = self._compare_original_vs_citrix(
|
||||
shots_original["shot_0014"],
|
||||
shots_citrix["shot_0014"],
|
||||
"the 'Gmail' link at the top of the page",
|
||||
"Gmail",
|
||||
CALIBRATED_ZONES["gmail_link"],
|
||||
)
|
||||
assert comp["citrix"]["detected"] >= 3, (
|
||||
f"Citrix Gmail: seulement {comp['citrix']['rate_str']} détections"
|
||||
)
|
||||
if comp["citrix"]["detected"] >= 1:
|
||||
_assert_in_zone(comp["citrix"], CALIBRATED_ZONES["gmail_link"], "Gmail (Citrix)")
|
||||
print(f"\n [Gmail Citrix] orig={comp['original']['rate_str']}, "
|
||||
f"citrix={comp['citrix']['rate_str']}")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests de dégradation progressive — qualité JPEG 50 → 15 → 5
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestDegradationProgressive:
|
||||
"""Mesurer à partir de quelle qualité JPEG le grounding échoue."""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def shot_0004(self):
|
||||
return _load_screenshot("shot_0004_full.png")
|
||||
|
||||
def test_fichier_degradation_progressive(self, shot_0004):
|
||||
"""Fichier menu : tester JPEG Q50, Q25, Q15, Q10, Q5."""
|
||||
qualities = [50, 25, 15, 10, 5]
|
||||
results_by_quality = {}
|
||||
|
||||
for q in qualities:
|
||||
degraded = _degrade_citrix(shot_0004, quality=q)
|
||||
results = _run_n_times(
|
||||
degraded,
|
||||
"the 'Fichier' menu item in the menu bar",
|
||||
n=3,
|
||||
delay=0.2,
|
||||
)
|
||||
stats = _compute_stats(results)
|
||||
results_by_quality[q] = stats
|
||||
|
||||
# Afficher le rapport de dégradation
|
||||
print("\n === Dégradation progressive : Fichier menu ===")
|
||||
for q in qualities:
|
||||
s = results_by_quality[q]
|
||||
zone_ok = ""
|
||||
if s["detected"] >= 1:
|
||||
cx = s["x_mean"]
|
||||
cy = s["y_mean"]
|
||||
z = CALIBRATED_ZONES["fichier_menu"]
|
||||
in_zone = z["x_min"] <= cx <= z["x_max"] and z["y_min"] <= cy <= z["y_max"]
|
||||
zone_ok = " (in zone)" if in_zone else f" (HORS zone: {cx:.3f},{cy:.3f})"
|
||||
print(f" Q{q:>2}: {s['rate_str']} détections{zone_ok}")
|
||||
|
||||
# Au moins Q50 et Q25 doivent fonctionner
|
||||
assert results_by_quality[50]["detected"] >= 2, "Q50 devrait fonctionner"
|
||||
assert results_by_quality[25]["detected"] >= 2, "Q25 devrait fonctionner"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Rapport final — exécuté en dernier, résume tout
|
||||
# =========================================================================
|
||||
|
||||
|
||||
@pytest.mark.visual
|
||||
class TestRapportFinal:
|
||||
"""Rapport complet des capacités de grounding VLM.
|
||||
|
||||
Ce test exécute une batterie de détections et produit un rapport
|
||||
structuré avec taux de détection, variance, et comparaison Citrix.
|
||||
"""
|
||||
|
||||
def test_rapport_complet(self):
|
||||
"""Génère le rapport final de robustesse du grounding VLM."""
|
||||
from PIL import Image
|
||||
|
||||
shots = {
|
||||
"shot_0001": _load_screenshot("shot_0001_full.png"),
|
||||
"shot_0004": _load_screenshot("shot_0004_full.png"),
|
||||
"shot_0014": _load_screenshot("shot_0014_full.png"),
|
||||
}
|
||||
|
||||
targets = [
|
||||
("shot_0001", "Rechercher (taskbar)",
|
||||
"the 'Rechercher' search text in the Windows taskbar at the bottom",
|
||||
CALIBRATED_ZONES["rechercher_taskbar"]),
|
||||
("shot_0001", "agent_v1 (dossier)",
|
||||
"the folder named 'agent_v1' in the file list",
|
||||
CALIBRATED_ZONES["agent_v1_folder"]),
|
||||
("shot_0004", "Fichier (menu)",
|
||||
"the 'Fichier' menu item in the menu bar",
|
||||
CALIBRATED_ZONES["fichier_menu"]),
|
||||
("shot_0004", "Modifier (menu)",
|
||||
"the 'Modifier' menu item in the menu bar",
|
||||
CALIBRATED_ZONES["modifier_menu"]),
|
||||
("shot_0004", "Ceci est un test.txt (onglet)",
|
||||
"the tab labeled 'Ceci est un test.txt'",
|
||||
CALIBRATED_ZONES["ceci_est_un_test_tab"]),
|
||||
("shot_0004", "Close X (Bloc-notes)",
|
||||
"the close button X of the Notepad window at the top right",
|
||||
CALIBRATED_ZONES["close_x_notepad"]),
|
||||
("shot_0014", "Recherche Google (barre)",
|
||||
"the Google search bar 'Rechercher sur Google ou saisir une URL'",
|
||||
CALIBRATED_ZONES["google_search_bar"]),
|
||||
("shot_0014", "Gmail (lien)",
|
||||
"the 'Gmail' link at the top of the page",
|
||||
CALIBRATED_ZONES["gmail_link"]),
|
||||
]
|
||||
|
||||
report_lines = [
|
||||
"",
|
||||
"=" * 80,
|
||||
"RAPPORT DE ROBUSTESSE — Grounding VLM qwen2.5vl:7b",
|
||||
f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}",
|
||||
f"Screenshots: 1280x800 (3 images, {len(targets)} cibles)",
|
||||
f"Répétitions: 5 par cible (original + Citrix Q20)",
|
||||
"=" * 80,
|
||||
"",
|
||||
"--- ORIGINAL (PNG) ---",
|
||||
f"{'Élément':<35} {'Taux':>6} {'X moy':>8} {'Y moy':>8} "
|
||||
f"{'Var X':>8} {'Var Y':>8} {'Zone':>6}",
|
||||
"-" * 80,
|
||||
]
|
||||
|
||||
all_original_stats = []
|
||||
all_citrix_stats = []
|
||||
|
||||
for shot_name, label, desc, zone in targets:
|
||||
# Original : 5 runs
|
||||
results_orig = _run_n_times(shots[shot_name], desc, n=5, delay=0.2)
|
||||
stats_orig = _compute_stats(results_orig)
|
||||
all_original_stats.append((label, stats_orig, zone))
|
||||
|
||||
in_zone = "?"
|
||||
if stats_orig["detected"] >= 1:
|
||||
cx, cy = stats_orig["x_mean"], stats_orig["y_mean"]
|
||||
ok = (zone["x_min"] <= cx <= zone["x_max"]
|
||||
and zone["y_min"] <= cy <= zone["y_max"])
|
||||
in_zone = "OK" if ok else "HORS"
|
||||
|
||||
report_lines.append(
|
||||
f"{label:<35} {stats_orig['rate_str']:>6} "
|
||||
f"{stats_orig.get('x_mean', 0):>8.4f} "
|
||||
f"{stats_orig.get('y_mean', 0):>8.4f} "
|
||||
f"{stats_orig.get('x_range', 0):>8.4f} "
|
||||
f"{stats_orig.get('y_range', 0):>8.4f} "
|
||||
f"{in_zone:>6}"
|
||||
)
|
||||
|
||||
report_lines.extend([
|
||||
"",
|
||||
"--- CITRIX (JPEG Q20) ---",
|
||||
f"{'Élément':<35} {'Taux':>6} {'X moy':>8} {'Y moy':>8} "
|
||||
f"{'Var X':>8} {'Var Y':>8} {'Zone':>6} {'Écart orig':>10}",
|
||||
"-" * 90,
|
||||
])
|
||||
|
||||
for i, (shot_name, label, desc, zone) in enumerate(targets):
|
||||
citrix_b64 = _degrade_citrix(shots[shot_name], quality=20)
|
||||
results_citrix = _run_n_times(citrix_b64, desc, n=5, delay=0.2)
|
||||
stats_citrix = _compute_stats(results_citrix)
|
||||
all_citrix_stats.append((label, stats_citrix, zone))
|
||||
|
||||
in_zone = "?"
|
||||
ecart = "N/A"
|
||||
if stats_citrix["detected"] >= 1:
|
||||
cx, cy = stats_citrix["x_mean"], stats_citrix["y_mean"]
|
||||
ok = (zone["x_min"] <= cx <= zone["x_max"]
|
||||
and zone["y_min"] <= cy <= zone["y_max"])
|
||||
in_zone = "OK" if ok else "HORS"
|
||||
|
||||
# Calculer l'écart avec l'original
|
||||
orig_stats = all_original_stats[i][1]
|
||||
if orig_stats["detected"] >= 1:
|
||||
dx = abs(cx - orig_stats["x_mean"])
|
||||
dy = abs(cy - orig_stats["y_mean"])
|
||||
ecart = f"{dx:.4f}/{dy:.4f}"
|
||||
|
||||
report_lines.append(
|
||||
f"{label:<35} {stats_citrix['rate_str']:>6} "
|
||||
f"{stats_citrix.get('x_mean', 0):>8.4f} "
|
||||
f"{stats_citrix.get('y_mean', 0):>8.4f} "
|
||||
f"{stats_citrix.get('x_range', 0):>8.4f} "
|
||||
f"{stats_citrix.get('y_range', 0):>8.4f} "
|
||||
f"{in_zone:>6} {ecart:>10}"
|
||||
)
|
||||
|
||||
# Résumé
|
||||
orig_total = sum(s["detected"] for _, s, _ in all_original_stats)
|
||||
orig_max = sum(s["total"] for _, s, _ in all_original_stats)
|
||||
citrix_total = sum(s["detected"] for _, s, _ in all_citrix_stats)
|
||||
citrix_max = sum(s["total"] for _, s, _ in all_citrix_stats)
|
||||
|
||||
orig_in_zone = sum(
|
||||
1 for _, s, z in all_original_stats
|
||||
if s["detected"] >= 1
|
||||
and z["x_min"] <= s["x_mean"] <= z["x_max"]
|
||||
and z["y_min"] <= s["y_mean"] <= z["y_max"]
|
||||
)
|
||||
citrix_in_zone = sum(
|
||||
1 for _, s, z in all_citrix_stats
|
||||
if s["detected"] >= 1
|
||||
and z["x_min"] <= s["x_mean"] <= z["x_max"]
|
||||
and z["y_min"] <= s["y_mean"] <= z["y_max"]
|
||||
)
|
||||
|
||||
# Éléments non fiables
|
||||
unreliable = []
|
||||
for label, s, _ in all_original_stats:
|
||||
if s["detected"] < 3:
|
||||
unreliable.append(f"{label} (taux {s['rate_str']})")
|
||||
elif s.get("x_range", 0) >= _MAX_VARIANCE or s.get("y_range", 0) >= _MAX_VARIANCE:
|
||||
unreliable.append(
|
||||
f"{label} (variance X={s.get('x_range', 0):.4f} "
|
||||
f"Y={s.get('y_range', 0):.4f})"
|
||||
)
|
||||
|
||||
report_lines.extend([
|
||||
"",
|
||||
"=" * 80,
|
||||
"RÉSUMÉ",
|
||||
"=" * 80,
|
||||
f" Détection original : {orig_total}/{orig_max} "
|
||||
f"({orig_total/orig_max*100:.0f}%)",
|
||||
f" Détection Citrix Q20: {citrix_total}/{citrix_max} "
|
||||
f"({citrix_total/citrix_max*100:.0f}%)",
|
||||
f" Positionnement correct (original) : {orig_in_zone}/{len(all_original_stats)}",
|
||||
f" Positionnement correct (Citrix) : {citrix_in_zone}/{len(all_citrix_stats)}",
|
||||
"",
|
||||
])
|
||||
|
||||
if unreliable:
|
||||
report_lines.append(" ÉLÉMENTS NON FIABLES :")
|
||||
for u in unreliable:
|
||||
report_lines.append(f" - {u}")
|
||||
else:
|
||||
report_lines.append(" Tous les éléments sont fiables.")
|
||||
|
||||
report_lines.extend([
|
||||
"",
|
||||
" NOTES TECHNIQUES :",
|
||||
" - qwen2.5vl bbox_2d retourne des pixels relatifs à l'image envoyée",
|
||||
" - Normalisation : diviser par les dimensions de l'image (W, H)",
|
||||
" - temperature=0.1 donne une variance < 0.003 typiquement",
|
||||
"=" * 80,
|
||||
])
|
||||
|
||||
report = "\n".join(report_lines)
|
||||
print(report)
|
||||
|
||||
# Le test réussit si au moins 80% des détections originales fonctionnent
|
||||
assert orig_total / orig_max >= 0.80, (
|
||||
f"Taux de détection global trop bas: {orig_total}/{orig_max}"
|
||||
)
|
||||
Reference in New Issue
Block a user