Compare commits
33 Commits
a7de6a488b
...
v3.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99041f0117 | ||
|
|
72a9651b94 | ||
|
|
8589e87a13 | ||
|
|
8a1dfc6e8b | ||
|
|
3bcf59e16f | ||
|
|
46206d9396 | ||
|
|
d3e928bebe | ||
|
|
a679fbb62b | ||
|
|
f0b311306d | ||
|
|
1c5ff42006 | ||
|
|
b09a3df054 | ||
|
|
fceb76de1f | ||
|
|
6d4ff4f215 | ||
|
|
2486e43def | ||
|
|
20b74286f7 | ||
|
|
a1c97504ab | ||
|
|
d6c7346898 | ||
|
|
90ee8ca8f4 | ||
|
|
84a91630e9 | ||
|
|
91614fbff0 | ||
|
|
c1ce6a3964 | ||
|
|
0bd0fbb8c5 | ||
|
|
394342be7e | ||
|
|
6724f43950 | ||
|
|
d99b17394a | ||
|
|
875367dea9 | ||
|
|
a74056ca22 | ||
|
|
6937b94f2a | ||
|
|
4f5c518d3a | ||
|
|
7dec3ab63a | ||
|
|
68d5bb7dd1 | ||
|
|
ef5d595d98 | ||
|
|
5ceee9c393 |
@@ -52,8 +52,9 @@ API_TOKEN = os.environ.get("RPA_API_TOKEN", "")
|
|||||||
MAX_SESSION_DURATION_S = 60 * 60 # 1 heure
|
MAX_SESSION_DURATION_S = 60 * 60 # 1 heure
|
||||||
SESSIONS_ROOT = BASE_DIR / "sessions"
|
SESSIONS_ROOT = BASE_DIR / "sessions"
|
||||||
|
|
||||||
# Paramètres Vision (Crops pour qwen3-vl)
|
# Paramètres Vision (Crops pour la résolution visuelle)
|
||||||
TARGETED_CROP_SIZE = (150, 150)
|
# 80x80 : assez petit pour être discriminant (icônes), assez grand pour le contexte
|
||||||
|
TARGETED_CROP_SIZE = (80, 80)
|
||||||
SCREENSHOT_QUALITY = 85
|
SCREENSHOT_QUALITY = 85
|
||||||
|
|
||||||
# Floutage des données sensibles (conformité AI Act)
|
# Floutage des données sensibles (conformité AI Act)
|
||||||
|
|||||||
@@ -79,6 +79,8 @@ class ActionExecutorV1:
|
|||||||
self._poll_backoff_factor = 1.5 # Multiplicateur en cas d'echec
|
self._poll_backoff_factor = 1.5 # Multiplicateur en cas d'echec
|
||||||
# Token d'authentification API
|
# Token d'authentification API
|
||||||
self._api_token = os.environ.get("RPA_API_TOKEN", "")
|
self._api_token = os.environ.get("RPA_API_TOKEN", "")
|
||||||
|
# Gestionnaire de notifications toast (pour les messages utilisateur)
|
||||||
|
self._notification_manager = None
|
||||||
# Log de la resolution physique pour le diagnostic DPI
|
# Log de la resolution physique pour le diagnostic DPI
|
||||||
self._log_screen_info()
|
self._log_screen_info()
|
||||||
|
|
||||||
@@ -94,6 +96,22 @@ class ActionExecutorV1:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(f"Impossible de lire la resolution ecran : {e}")
|
logger.debug(f"Impossible de lire la resolution ecran : {e}")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def notifier(self):
|
||||||
|
"""Instance NotificationManager paresseuse."""
|
||||||
|
if self._notification_manager is None:
|
||||||
|
try:
|
||||||
|
from ..ui.notifications import NotificationManager
|
||||||
|
self._notification_manager = NotificationManager()
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"NotificationManager indisponible : {e}")
|
||||||
|
# Retourner un objet factice qui ne fait rien
|
||||||
|
class _Noop:
|
||||||
|
def replay_target_not_found(self, *a, **kw):
|
||||||
|
return False
|
||||||
|
self._notification_manager = _Noop()
|
||||||
|
return self._notification_manager
|
||||||
|
|
||||||
def _auth_headers(self) -> dict:
|
def _auth_headers(self) -> dict:
|
||||||
"""Headers d'authentification Bearer pour les requetes au serveur."""
|
"""Headers d'authentification Bearer pour les requetes au serveur."""
|
||||||
if self._api_token:
|
if self._api_token:
|
||||||
@@ -107,6 +125,30 @@ class ActionExecutorV1:
|
|||||||
self._sct = mss.mss()
|
self._sct = mss.mss()
|
||||||
return self._sct
|
return self._sct
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _describe_target(target_spec: dict) -> str:
|
||||||
|
"""Construire une description humaine de la cible depuis target_spec.
|
||||||
|
|
||||||
|
Utilisé pour les notifications et le logging quand la cible n'est pas trouvée.
|
||||||
|
"""
|
||||||
|
parts = []
|
||||||
|
by_text = target_spec.get("by_text", "").strip()
|
||||||
|
window = target_spec.get("window_title", "").strip()
|
||||||
|
if by_text:
|
||||||
|
parts.append(f"'{by_text}'")
|
||||||
|
if window:
|
||||||
|
parts.append(f"dans {window}")
|
||||||
|
if not parts:
|
||||||
|
# Fallback sur la vlm_description
|
||||||
|
vlm = target_spec.get("vlm_description", "")
|
||||||
|
if vlm:
|
||||||
|
parts.append(vlm[:60])
|
||||||
|
else:
|
||||||
|
parts.append("un élément")
|
||||||
|
if parts:
|
||||||
|
return " ".join(parts)
|
||||||
|
return "élément inconnu"
|
||||||
|
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
# Execution legacy (watchdog command.json)
|
# Execution legacy (watchdog command.json)
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
@@ -135,6 +177,166 @@ class ActionExecutorV1:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Echec de l'ordre {action} : {e}")
|
logger.error(f"Echec de l'ordre {action} : {e}")
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Acteur intelligent — décision gemma4 quand le magnétoscope bloque
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
def _actor_decide(self, action: dict, target_spec: dict) -> str:
|
||||||
|
"""Demander à gemma4 de décider quand le magnétoscope ne trouve pas la cible.
|
||||||
|
|
||||||
|
gemma4 reçoit le contexte (action prévue, état de l'écran) et décide :
|
||||||
|
- PASSER : l'état est déjà atteint (ex: onglet déjà actif)
|
||||||
|
- EXECUTER : l'action est nécessaire mais pas trouvable automatiquement
|
||||||
|
- STOPPER : l'état est incohérent, impossible de continuer
|
||||||
|
|
||||||
|
Appelle gemma4 en mode texte avec thinking (Docker port 11435).
|
||||||
|
Fallback : EXECUTER (pause supervisée) si gemma4 indisponible.
|
||||||
|
"""
|
||||||
|
import requests as _requests
|
||||||
|
|
||||||
|
gemma4_port = os.environ.get("GEMMA4_PORT", "11435")
|
||||||
|
by_text = target_spec.get("by_text", "")
|
||||||
|
window_title = target_spec.get("window_title", "")
|
||||||
|
|
||||||
|
# Récupérer le titre de la fenêtre ACTUELLE
|
||||||
|
try:
|
||||||
|
from ..window_info_crossplatform import get_active_window_info
|
||||||
|
current_info = get_active_window_info()
|
||||||
|
current_title = current_info.get("title", "")
|
||||||
|
except Exception:
|
||||||
|
current_title = ""
|
||||||
|
|
||||||
|
prompt = (
|
||||||
|
f"Tu es un robot RPA. L'action suivante est : cliquer sur '{by_text or 'un élément'}' "
|
||||||
|
f"dans '{window_title}'.\n"
|
||||||
|
f"La fenêtre active est \"{current_title}\".\n"
|
||||||
|
f"Dois-je faire cette action ?\n"
|
||||||
|
f"- EXECUTER : l'action est nécessaire\n"
|
||||||
|
f"- PASSER : le résultat est déjà atteint\n"
|
||||||
|
f"- STOPPER : état incohérent\n"
|
||||||
|
f"Réponds UN SEUL MOT."
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = _requests.post(
|
||||||
|
f"http://localhost:{gemma4_port}/api/chat",
|
||||||
|
json={
|
||||||
|
"model": "gemma4:e4b",
|
||||||
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
|
"stream": False,
|
||||||
|
"think": True,
|
||||||
|
"options": {"temperature": 0.1, "num_predict": 500},
|
||||||
|
},
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
content = resp.json().get("message", {}).get("content", "").strip().upper()
|
||||||
|
# Extraire le mot clé
|
||||||
|
for keyword in ("PASSER", "EXECUTER", "STOPPER"):
|
||||||
|
if keyword in content:
|
||||||
|
logger.info(f"Acteur gemma4 décide : {keyword}")
|
||||||
|
return keyword
|
||||||
|
logger.warning(f"Acteur gemma4 réponse inattendue : {content[:50]}")
|
||||||
|
return "EXECUTER"
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Acteur gemma4 indisponible : {e}")
|
||||||
|
return "EXECUTER"
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Observer — pré-analyse écran avant chaque action
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
def _observe_screen(
|
||||||
|
self, server_url: str, target_spec: dict,
|
||||||
|
screen_width: int, screen_height: int,
|
||||||
|
) -> dict:
|
||||||
|
"""Observer : analyser l'écran AVANT de résoudre la cible.
|
||||||
|
|
||||||
|
Détecte les popups, dialogues, et états inattendus AVANT de tenter
|
||||||
|
la résolution visuelle. C'est la "pre-exploration" qui améliore
|
||||||
|
dramatiquement les performances (cf. benchmarks Claude Computer Use).
|
||||||
|
|
||||||
|
Stratégie en 2 temps (rapide puis intelligent) :
|
||||||
|
1. Vérification rapide locale : titre fenêtre, popup connue
|
||||||
|
2. Si serveur disponible : envoi du screenshot pour pré-analyse VLM
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None si écran OK (pas de problème détecté)
|
||||||
|
Dict avec screen_state ("ok"|"popup"|"unexpected"), détails, coords popup
|
||||||
|
"""
|
||||||
|
import requests as _requests
|
||||||
|
|
||||||
|
# Étape 1 : vérification rapide locale (titre fenêtre)
|
||||||
|
try:
|
||||||
|
from ..window_info_crossplatform import get_active_window_info
|
||||||
|
current_info = get_active_window_info()
|
||||||
|
current_title = current_info.get("title", "").lower()
|
||||||
|
|
||||||
|
# Patterns de popup/dialogue courants (Windows FR + EN)
|
||||||
|
popup_patterns = [
|
||||||
|
"enregistrer", "sauvegarder", "voulez-vous",
|
||||||
|
"confirmer", "confirmation", "avertissement",
|
||||||
|
"erreur", "error", "warning", "alert",
|
||||||
|
"do you want", "save as", "are you sure",
|
||||||
|
]
|
||||||
|
for pattern in popup_patterns:
|
||||||
|
if pattern in current_title:
|
||||||
|
logger.info(f"Observer : popup détectée par titre — '{current_title}'")
|
||||||
|
# On ne peut pas résoudre les coords juste par le titre
|
||||||
|
# → retourner popup sans coords, le caller fera handle_popup_vlm()
|
||||||
|
return {
|
||||||
|
"screen_state": "popup",
|
||||||
|
"popup_label": current_title,
|
||||||
|
"popup_coords": None,
|
||||||
|
"detail": f"Popup détectée par titre : {current_title}",
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Étape 2 : pré-analyse serveur (si disponible)
|
||||||
|
if not server_url:
|
||||||
|
return None # Pas de serveur → pas de pré-analyse avancée
|
||||||
|
|
||||||
|
# Envoyer le screenshot au serveur pour détection popup via VLM
|
||||||
|
screenshot_b64 = self._capture_screenshot_b64(max_width=0, quality=60)
|
||||||
|
if not screenshot_b64:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
url = f"{server_url}/traces/stream/replay/pre_analyze"
|
||||||
|
from ..config import API_TOKEN
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
if API_TOKEN:
|
||||||
|
headers["Authorization"] = f"Bearer {API_TOKEN}"
|
||||||
|
|
||||||
|
resp = _requests.post(
|
||||||
|
url,
|
||||||
|
json={
|
||||||
|
"screenshot_b64": screenshot_b64,
|
||||||
|
"expected_state": target_spec.get("expected_state", ""),
|
||||||
|
"window_title": target_spec.get("window_title", ""),
|
||||||
|
"screen_width": screen_width,
|
||||||
|
"screen_height": screen_height,
|
||||||
|
},
|
||||||
|
headers=headers,
|
||||||
|
timeout=10,
|
||||||
|
)
|
||||||
|
|
||||||
|
if resp.ok:
|
||||||
|
data = resp.json()
|
||||||
|
state = data.get("screen_state", "ok")
|
||||||
|
if state != "ok":
|
||||||
|
logger.info(f"Observer serveur : {state} — {data.get('detail', '')}")
|
||||||
|
return data
|
||||||
|
# Serveur ne supporte pas encore /pre_analyze → silencieux
|
||||||
|
except _requests.Timeout:
|
||||||
|
logger.debug("Observer : serveur timeout (10s)")
|
||||||
|
except _requests.ConnectionError:
|
||||||
|
pass # Serveur indisponible — pas grave, on continue sans
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Observer : erreur serveur — {e}")
|
||||||
|
|
||||||
|
return None # Écran OK ou pas de pré-analyse possible
|
||||||
|
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
# Execution replay (polling serveur)
|
# Execution replay (polling serveur)
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
@@ -191,47 +393,146 @@ class ActionExecutorV1:
|
|||||||
x_pct = action.get("x_pct", 0.0)
|
x_pct = action.get("x_pct", 0.0)
|
||||||
y_pct = action.get("y_pct", 0.0)
|
y_pct = action.get("y_pct", 0.0)
|
||||||
|
|
||||||
if visual_mode and target_spec and server_url:
|
# Extraire le nom de l'application depuis un titre de fenêtre
|
||||||
resolved = self._resolve_target_visual(
|
def _app_name(title):
|
||||||
server_url, target_spec, x_pct, y_pct, width, height
|
for sep in [" – ", " - ", " — "]:
|
||||||
)
|
if sep in title:
|
||||||
if resolved:
|
return title.split(sep)[-1].strip().lower()
|
||||||
x_pct = resolved["x_pct"]
|
return title.strip().lower()
|
||||||
y_pct = resolved["y_pct"]
|
|
||||||
result["visual_resolved"] = resolved.get("resolved", False)
|
|
||||||
# Métriques de résolution
|
|
||||||
result["resolution_method"] = resolved.get("resolution_method", "")
|
|
||||||
result["resolution_score"] = resolved.get("resolution_score", 0.0)
|
|
||||||
result["resolution_elapsed_ms"] = resolved.get("resolution_elapsed_ms", 0.0)
|
|
||||||
if resolved.get("resolved"):
|
|
||||||
logger.info(
|
|
||||||
f"Visual resolve OK [{result['resolution_method']}] "
|
|
||||||
f"{result['resolution_elapsed_ms']:.0f}ms : "
|
|
||||||
f"{resolved.get('matched_element', {}).get('label', '?')} "
|
|
||||||
f"-> ({x_pct:.4f}, {y_pct:.4f})"
|
|
||||||
)
|
|
||||||
|
|
||||||
# ---- Hash AVANT l'action (pour verification post-action) ----
|
# ── Pré-vérification : titre fenêtre ──
|
||||||
# Seules les actions click et key_combo sont verifiees : elles
|
# Vérifier que l'écran est dans l'état attendu AVANT de cliquer.
|
||||||
# provoquent un changement visible de l'ecran (ouverture de fenetre,
|
if visual_mode and target_spec:
|
||||||
# focus, etc.). Les actions type/wait/scroll ne sont pas verifiees.
|
expected_title = target_spec.get("window_title", "")
|
||||||
|
if expected_title and expected_title != "unknown_window":
|
||||||
|
from ..window_info_crossplatform import get_active_window_info
|
||||||
|
current_info = get_active_window_info()
|
||||||
|
current_title = current_info.get("title", "")
|
||||||
|
|
||||||
|
current_app = _app_name(current_title)
|
||||||
|
expected_app = _app_name(expected_title)
|
||||||
|
title_match = (
|
||||||
|
current_app == expected_app
|
||||||
|
or expected_title.lower() in current_title.lower()
|
||||||
|
or current_title.lower() in expected_title.lower()
|
||||||
|
)
|
||||||
|
# Ignorer la fenêtre de Léa elle-même (overlay agent)
|
||||||
|
_lea_windows = ("léa", "lea —", "léa —", "lea -", "léa -", "lea assistante", "léa assistante")
|
||||||
|
is_lea_window = any(p in current_title.lower() for p in _lea_windows)
|
||||||
|
|
||||||
|
if not title_match and not is_lea_window:
|
||||||
|
logger.warning(
|
||||||
|
f"PRÉ-VÉRIF ÉCHOUÉE : attendu '{expected_title}', "
|
||||||
|
f"actuel '{current_title}' — STOP"
|
||||||
|
)
|
||||||
|
print(f" [PRÉ-VÉRIF] STOP — fenêtre '{current_title}' ≠ attendu '{expected_title}'")
|
||||||
|
result["success"] = False
|
||||||
|
result["error"] = f"Fenêtre incorrecte: '{current_title}' (attendu: '{expected_title}')"
|
||||||
|
return result
|
||||||
|
elif is_lea_window:
|
||||||
|
logger.info(f"PRÉ-VÉRIF : fenêtre Léa détectée, ignorée — on continue")
|
||||||
|
else:
|
||||||
|
logger.info(f"PRÉ-VÉRIF OK : '{current_title}'")
|
||||||
|
|
||||||
|
# ── OBSERVER : pré-analyse écran avant résolution ──
|
||||||
|
# Détecte popups, dialogues, états inattendus AVANT de chercher la cible.
|
||||||
|
# Si un problème est détecté, on le gère tout de suite (pas après l'échec).
|
||||||
|
# Ref: docs/VISION_RPA_INTELLIGENT.md — "Il observe"
|
||||||
|
if visual_mode and target_spec and action_type == "click":
|
||||||
|
observation = self._observe_screen(server_url, target_spec, width, height)
|
||||||
|
if observation:
|
||||||
|
obs_state = observation.get("screen_state", "ok")
|
||||||
|
|
||||||
|
if obs_state == "popup":
|
||||||
|
# Popup détectée AVANT la résolution — la fermer
|
||||||
|
popup_label = observation.get("popup_label", "popup")
|
||||||
|
popup_coords = observation.get("popup_coords")
|
||||||
|
print(f" [OBSERVER] Popup détectée : '{popup_label}' — fermeture")
|
||||||
|
logger.info(f"Observer : popup '{popup_label}' détectée avant résolution")
|
||||||
|
if popup_coords:
|
||||||
|
real_x = int(popup_coords["x_pct"] * width)
|
||||||
|
real_y = int(popup_coords["y_pct"] * height)
|
||||||
|
self._click((real_x, real_y), "left")
|
||||||
|
time.sleep(1.0)
|
||||||
|
print(f" [OBSERVER] Popup fermée — reprise du flow normal")
|
||||||
|
else:
|
||||||
|
# Pas de coordonnées → fallback sur handle_popup_vlm classique
|
||||||
|
self._handle_popup_vlm()
|
||||||
|
|
||||||
|
elif obs_state == "unexpected":
|
||||||
|
# État inattendu (pas la bonne page/écran)
|
||||||
|
detail = observation.get("detail", "état inattendu")
|
||||||
|
print(f" [OBSERVER] État inattendu : {detail}")
|
||||||
|
logger.warning(f"Observer : état inattendu — {detail}")
|
||||||
|
# Demander à l'acteur (gemma4) de décider
|
||||||
|
decision = self._actor_decide(action, target_spec)
|
||||||
|
if decision == "STOPPER":
|
||||||
|
result["success"] = False
|
||||||
|
result["error"] = f"observer_unexpected:{detail}"
|
||||||
|
return result
|
||||||
|
elif decision == "PASSER":
|
||||||
|
result["success"] = True
|
||||||
|
result["warning"] = "observer_skip"
|
||||||
|
return result
|
||||||
|
# EXECUTER → continuer normalement
|
||||||
|
|
||||||
|
if visual_mode and target_spec and server_url:
|
||||||
|
# ── GROUNDING : localisation pure via GroundingEngine ──
|
||||||
|
from .grounding import GroundingEngine
|
||||||
|
grounding = GroundingEngine(self)
|
||||||
|
grounding_result = grounding.locate(
|
||||||
|
server_url, target_spec, x_pct, y_pct, width, height,
|
||||||
|
)
|
||||||
|
if grounding_result.found:
|
||||||
|
x_pct = grounding_result.x_pct
|
||||||
|
y_pct = grounding_result.y_pct
|
||||||
|
result["visual_resolved"] = True
|
||||||
|
result["resolution_method"] = grounding_result.method
|
||||||
|
result["resolution_score"] = grounding_result.score
|
||||||
|
result["resolution_elapsed_ms"] = grounding_result.elapsed_ms
|
||||||
|
logger.info(
|
||||||
|
f"Grounding OK [{grounding_result.method}] "
|
||||||
|
f"{grounding_result.elapsed_ms:.0f}ms : "
|
||||||
|
f"{grounding_result.detail or '?'} "
|
||||||
|
f"-> ({x_pct:.4f}, {y_pct:.4f})"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ---- Screenshot + hash AVANT l'action (pour le Critic post-action) ----
|
||||||
|
# Le serveur utilise screenshot_before + screenshot_after pour évaluer
|
||||||
|
# si l'action a eu l'effet attendu (Critic sémantique VLM).
|
||||||
needs_screen_check = action_type in ("click", "key_combo")
|
needs_screen_check = action_type in ("click", "key_combo")
|
||||||
hash_before = ""
|
hash_before = ""
|
||||||
|
screenshot_before_b64 = ""
|
||||||
if needs_screen_check:
|
if needs_screen_check:
|
||||||
hash_before = self._quick_screenshot_hash()
|
hash_before = self._quick_screenshot_hash()
|
||||||
|
screenshot_before_b64 = self._capture_screenshot_b64()
|
||||||
|
|
||||||
if action_type == "click":
|
if action_type == "click":
|
||||||
# Si visual_mode est activé, le resolve DOIT réussir.
|
# Si visual_mode est activé, le resolve DOIT réussir.
|
||||||
# Pas de fallback blind — on arrête le replay si la cible
|
# Pas de fallback blind — on arrête le replay si la cible
|
||||||
# n'est pas trouvée visuellement. C'est un RPA VISUEL.
|
# n'est pas trouvée visuellement. C'est un RPA VISUEL.
|
||||||
if visual_mode and not result.get("visual_resolved"):
|
if visual_mode and not result.get("visual_resolved"):
|
||||||
# Avant de STOP, vérifier s'il y a une popup imprévue via le VLM
|
# ── Policy : décider quoi faire quand grounding échoue ──
|
||||||
print(f" [POPUP-VLM] Cible non trouvée — vérification popup imprévue...")
|
from .policy import PolicyEngine, Decision
|
||||||
logger.info(f"Action {action_id} : cible non trouvée, tentative gestion popup VLM")
|
policy = PolicyEngine(self)
|
||||||
popup_handled = self._handle_popup_vlm()
|
target_desc = self._describe_target(target_spec)
|
||||||
if popup_handled:
|
retry_count = action.get("_retry_count", 0)
|
||||||
# Popup fermée — re-tenter le resolve
|
|
||||||
print(f" [POPUP-VLM] Popup gérée, re-tentative du resolve visuel...")
|
policy_decision = policy.decide(
|
||||||
|
action=action, target_spec=target_spec,
|
||||||
|
retry_count=retry_count, max_retries=1,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" [POLICY] {policy_decision.decision.value} — "
|
||||||
|
f"{policy_decision.reason}"
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Action {action_id} : Policy → {policy_decision.decision.value} "
|
||||||
|
f"({policy_decision.reason})"
|
||||||
|
)
|
||||||
|
|
||||||
|
if policy_decision.decision == Decision.RETRY:
|
||||||
|
# Re-tenter le grounding après correction (popup fermée, etc.)
|
||||||
resolved2 = self._resolve_target_visual(
|
resolved2 = self._resolve_target_visual(
|
||||||
server_url, target_spec, x_pct, y_pct, width, height
|
server_url, target_spec, x_pct, y_pct, width, height
|
||||||
)
|
)
|
||||||
@@ -239,25 +540,37 @@ class ActionExecutorV1:
|
|||||||
x_pct = resolved2["x_pct"]
|
x_pct = resolved2["x_pct"]
|
||||||
y_pct = resolved2["y_pct"]
|
y_pct = resolved2["y_pct"]
|
||||||
result["visual_resolved"] = True
|
result["visual_resolved"] = True
|
||||||
print(
|
print(f" [POLICY] Re-resolve OK après {policy_decision.action_taken}")
|
||||||
f" [POPUP-VLM] Re-resolve OK après popup : "
|
|
||||||
f"({x_pct:.3f}, {y_pct:.3f})"
|
|
||||||
)
|
|
||||||
logger.info(
|
|
||||||
f"Action {action_id} : re-resolve OK après popup "
|
|
||||||
f"({x_pct:.3f}, {y_pct:.3f})"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
|
# Re-resolve échoué — SUPERVISE (rendre la main)
|
||||||
result["success"] = False
|
result["success"] = False
|
||||||
result["error"] = "Élément non trouvé même après gestion popup"
|
result["error"] = "target_not_found"
|
||||||
print(f" [ERREUR] Élément toujours non trouvé après gestion popup — STOP")
|
result["target_description"] = target_desc
|
||||||
logger.error(f"Action {action_id} : élément non trouvé après popup, replay stoppé")
|
result["target_spec"] = target_spec
|
||||||
|
result["screenshot"] = self._capture_screenshot_b64()
|
||||||
|
result["warning"] = "visual_resolve_failed"
|
||||||
|
self.notifier.replay_target_not_found(target_desc)
|
||||||
return result
|
return result
|
||||||
else:
|
|
||||||
|
elif policy_decision.decision == Decision.SKIP:
|
||||||
|
result["success"] = True
|
||||||
|
result["warning"] = "policy_skip"
|
||||||
|
return result
|
||||||
|
|
||||||
|
elif policy_decision.decision == Decision.ABORT:
|
||||||
result["success"] = False
|
result["success"] = False
|
||||||
result["error"] = "Visual resolve échoué — cible non trouvée à l'écran"
|
result["error"] = f"policy_abort:{target_desc}"
|
||||||
print(f" [ERREUR] Visual resolve échoué, pas de popup détectée — STOP")
|
self.notifier.replay_target_not_found(target_desc)
|
||||||
logger.error(f"Action {action_id} : visual resolve échoué, pas de popup, replay stoppé")
|
return result
|
||||||
|
|
||||||
|
else: # SUPERVISE ou CONTINUE
|
||||||
|
result["success"] = False
|
||||||
|
result["error"] = "target_not_found"
|
||||||
|
result["target_description"] = target_desc
|
||||||
|
result["target_spec"] = target_spec
|
||||||
|
result["screenshot"] = self._capture_screenshot_b64()
|
||||||
|
result["warning"] = "visual_resolve_failed"
|
||||||
|
self.notifier.replay_target_not_found(target_desc)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
real_x = int(x_pct * width)
|
real_x = int(x_pct * width)
|
||||||
@@ -269,12 +582,43 @@ class ActionExecutorV1:
|
|||||||
f"({real_x}, {real_y}) sur ({width}x{height}), bouton={button}"
|
f"({real_x}, {real_y}) sur ({width}x{height}), bouton={button}"
|
||||||
)
|
)
|
||||||
self._click((real_x, real_y), button)
|
self._click((real_x, real_y), button)
|
||||||
print(f" [CLICK] Termine.")
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Replay click [{mode}] : ({x_pct:.3f}, {y_pct:.3f}) -> "
|
f"Replay click [{mode}] : ({x_pct:.3f}, {y_pct:.3f}) -> "
|
||||||
f"({real_x}, {real_y}) sur ({width}x{height})"
|
f"({real_x}, {real_y}) sur ({width}x{height})"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# ── Post-vérification : polling du titre fenêtre ──
|
||||||
|
# On attend que le titre change vers celui attendu (max 10s)
|
||||||
|
# C'est 100% visuel — pas de wait fixe arbitraire
|
||||||
|
expected_after = action.get("expected_window_title", "")
|
||||||
|
if expected_after:
|
||||||
|
from ..window_info_crossplatform import get_active_window_info
|
||||||
|
max_wait = 10.0
|
||||||
|
poll_interval = 0.3
|
||||||
|
elapsed_wait = 0.0
|
||||||
|
matched = False
|
||||||
|
while elapsed_wait < max_wait:
|
||||||
|
time.sleep(poll_interval)
|
||||||
|
elapsed_wait += poll_interval
|
||||||
|
post_info = get_active_window_info()
|
||||||
|
post_title = post_info.get("title", "")
|
||||||
|
post_app = _app_name(post_title)
|
||||||
|
expected_app_after = _app_name(expected_after)
|
||||||
|
if (post_app == expected_app_after
|
||||||
|
or expected_after.lower() in post_title.lower()
|
||||||
|
or post_title.lower() in expected_after.lower()):
|
||||||
|
matched = True
|
||||||
|
break
|
||||||
|
if matched:
|
||||||
|
print(f" [POST-VÉRIF] OK en {elapsed_wait:.1f}s — '{post_title}'")
|
||||||
|
logger.info(f"POST-VÉRIF OK en {elapsed_wait:.1f}s : '{post_title}'")
|
||||||
|
else:
|
||||||
|
print(f" [POST-VÉRIF] TIMEOUT {max_wait}s — '{post_title}' ≠ '{expected_after}'")
|
||||||
|
logger.warning(f"POST-VÉRIF TIMEOUT : '{post_title}' ≠ '{expected_after}'")
|
||||||
|
result["warning"] = f"post_verif_timeout:{post_title}"
|
||||||
|
else:
|
||||||
|
print(f" [CLICK] Terminé.")
|
||||||
|
|
||||||
elif action_type == "type":
|
elif action_type == "type":
|
||||||
text = action.get("text", "")
|
text = action.get("text", "")
|
||||||
raw_keys = action.get("raw_keys")
|
raw_keys = action.get("raw_keys")
|
||||||
@@ -353,6 +697,10 @@ class ActionExecutorV1:
|
|||||||
|
|
||||||
result["success"] = True
|
result["success"] = True
|
||||||
|
|
||||||
|
# Stocker le screenshot_before pour le Critic côté serveur
|
||||||
|
if screenshot_before_b64:
|
||||||
|
result["screenshot_before"] = screenshot_before_b64
|
||||||
|
|
||||||
# ---- Verification post-action : l'ecran a-t-il change ? ----
|
# ---- Verification post-action : l'ecran a-t-il change ? ----
|
||||||
# Verifie UNIQUEMENT, ne tente PAS de gerer les popups
|
# Verifie UNIQUEMENT, ne tente PAS de gerer les popups
|
||||||
# (Enter/Escape perturbent l'application).
|
# (Enter/Escape perturbent l'application).
|
||||||
@@ -362,6 +710,17 @@ class ActionExecutorV1:
|
|||||||
hash_before, timeout_ms=3000
|
hash_before, timeout_ms=3000
|
||||||
)
|
)
|
||||||
if not screen_changed:
|
if not screen_changed:
|
||||||
|
# ── Recovery : tenter un rollback si l'action n'a pas eu d'effet ──
|
||||||
|
from .recovery import RecoveryEngine
|
||||||
|
recovery = RecoveryEngine(self)
|
||||||
|
recovery_result = recovery.attempt(
|
||||||
|
failed_action=action,
|
||||||
|
critic_detail="L'écran n'a pas changé après l'action",
|
||||||
|
)
|
||||||
|
if recovery_result.success:
|
||||||
|
print(f" [RECOVERY] {recovery_result.detail}")
|
||||||
|
result["recovery"] = recovery_result.to_dict()
|
||||||
|
|
||||||
result["success"] = False
|
result["success"] = False
|
||||||
result["warning"] = "no_screen_change"
|
result["warning"] = "no_screen_change"
|
||||||
result["error"] = "Ecran inchange apres l'action"
|
result["error"] = "Ecran inchange apres l'action"
|
||||||
@@ -395,10 +754,15 @@ class ActionExecutorV1:
|
|||||||
) -> dict:
|
) -> dict:
|
||||||
"""Résoudre la position d'un clic visuellement.
|
"""Résoudre la position d'un clic visuellement.
|
||||||
|
|
||||||
Stratégie hybride en cascade :
|
Stratégie en cascade — compréhension sémantique d'abord :
|
||||||
1. Template matching avec le crop anchor (rapide, fiable si l'UI n'a pas changé)
|
1. Serveur resolve_target (SomEngine + VLM) — comprend CE QU'ON CHERCHE
|
||||||
2. Serveur resolve_target (SomEngine + VLM, si serveur accessible)
|
2. Template matching local (fallback rapide si serveur indisponible)
|
||||||
3. VLM local (fallback pour dev/test Linux)
|
3. VLM local (fallback dev/test Linux)
|
||||||
|
|
||||||
|
Le template matching compare des pixels et donne des faux positifs quand
|
||||||
|
l'écran n'est pas dans le même état que l'enregistrement. Le SomEngine
|
||||||
|
comprend sémantiquement les éléments UI (bouton, menu, texte) et trouve
|
||||||
|
le bon élément peu importe l'état de l'écran.
|
||||||
"""
|
"""
|
||||||
import time as _time
|
import time as _time
|
||||||
t_start = _time.time()
|
t_start = _time.time()
|
||||||
@@ -418,14 +782,8 @@ class ActionExecutorV1:
|
|||||||
result["resolution_elapsed_ms"] = round(elapsed_ms, 1)
|
result["resolution_elapsed_ms"] = round(elapsed_ms, 1)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# ---- ÉTAPE 1 : Template matching avec le crop anchor ----
|
# ---- ÉTAPE 1 : Résolution serveur (SomEngine + VLM) ----
|
||||||
anchor_b64 = target_spec.get("anchor_image_base64", "")
|
# Le serveur comprend sémantiquement ce qu'on cherche. Pas de faux positifs.
|
||||||
if anchor_b64:
|
|
||||||
tm_result = self._template_match_anchor(screenshot_b64, anchor_b64, screen_width, screen_height)
|
|
||||||
if tm_result and tm_result.get("resolved"):
|
|
||||||
return _with_metrics(tm_result)
|
|
||||||
|
|
||||||
# ---- ÉTAPE 2 : Résolution serveur (SomEngine + VLM) ----
|
|
||||||
if server_url:
|
if server_url:
|
||||||
server_result = self._server_resolve_target(
|
server_result = self._server_resolve_target(
|
||||||
server_url, screenshot_b64, target_spec,
|
server_url, screenshot_b64, target_spec,
|
||||||
@@ -434,7 +792,14 @@ class ActionExecutorV1:
|
|||||||
if server_result and server_result.get("resolved"):
|
if server_result and server_result.get("resolved"):
|
||||||
return _with_metrics(server_result)
|
return _with_metrics(server_result)
|
||||||
|
|
||||||
# ---- ÉTAPE 3 : VLM local (fallback dev/test, si Ollama accessible) ----
|
# ---- ÉTAPE 2 : Template matching local (fallback si serveur down) ----
|
||||||
|
anchor_b64 = target_spec.get("anchor_image_base64", "")
|
||||||
|
if anchor_b64:
|
||||||
|
tm_result = self._template_match_anchor(screenshot_b64, anchor_b64, screen_width, screen_height)
|
||||||
|
if tm_result and tm_result.get("resolved"):
|
||||||
|
return _with_metrics(tm_result)
|
||||||
|
|
||||||
|
# ---- ÉTAPE 3 : VLM local (fallback dev/test Linux) ----
|
||||||
by_text = target_spec.get("by_text", "")
|
by_text = target_spec.get("by_text", "")
|
||||||
vlm_description = target_spec.get("vlm_description", "")
|
vlm_description = target_spec.get("vlm_description", "")
|
||||||
if vlm_description or by_text:
|
if vlm_description or by_text:
|
||||||
@@ -444,10 +809,6 @@ class ActionExecutorV1:
|
|||||||
if hybrid_result and hybrid_result.get("resolved"):
|
if hybrid_result and hybrid_result.get("resolved"):
|
||||||
return _with_metrics(hybrid_result)
|
return _with_metrics(hybrid_result)
|
||||||
|
|
||||||
vlm_result = self._vlm_direct_resolve(screenshot_b64, target_spec)
|
|
||||||
if vlm_result and vlm_result.get("resolved"):
|
|
||||||
return _with_metrics(vlm_result)
|
|
||||||
|
|
||||||
print(" [VISUAL] Toutes les méthodes ont échoué")
|
print(" [VISUAL] Toutes les méthodes ont échoué")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -634,18 +995,23 @@ class ActionExecutorV1:
|
|||||||
"What is the exact text label of this element? "
|
"What is the exact text label of this element? "
|
||||||
"Answer ONLY the text visible on the element (button text, label, menu item)."
|
"Answer ONLY the text visible on the element (button text, label, menu item)."
|
||||||
)
|
)
|
||||||
prefill = "The text is: "
|
# Prefill pour les modèles thinking (qwen3) — skip la phase de réflexion
|
||||||
|
_vlm_model_ident = os.environ.get("RPA_VLM_MODEL", "gemma4:e4b")
|
||||||
|
_is_thinking_ident = "qwen3" in _vlm_model_ident.lower()
|
||||||
|
|
||||||
|
messages_ident = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": "You read text from UI screenshots. Answer briefly with just the text.",
|
||||||
|
},
|
||||||
|
{"role": "user", "content": prompt, "images": [screenshot_b64]},
|
||||||
|
]
|
||||||
|
if _is_thinking_ident:
|
||||||
|
messages_ident.append({"role": "assistant", "content": "The text is: "})
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": os.environ.get("RPA_VLM_MODEL", "qwen3-vl:8b"),
|
"model": _vlm_model_ident,
|
||||||
"messages": [
|
"messages": messages_ident,
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": "You read text from UI screenshots. Answer briefly with just the text.",
|
|
||||||
},
|
|
||||||
{"role": "user", "content": prompt, "images": [screenshot_b64]},
|
|
||||||
{"role": "assistant", "content": prefill},
|
|
||||||
],
|
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"think": False,
|
"think": False,
|
||||||
"options": {"temperature": 0.1, "num_predict": 30, "num_ctx": 8192},
|
"options": {"temperature": 0.1, "num_predict": 30, "num_ctx": 8192},
|
||||||
@@ -760,16 +1126,21 @@ Example: x_pct=0.50, y_pct=0.30"""
|
|||||||
ollama_host = os.environ.get("RPA_SERVER_HOST", "localhost")
|
ollama_host = os.environ.get("RPA_SERVER_HOST", "localhost")
|
||||||
ollama_url = f"http://{ollama_host}:11434/api/chat"
|
ollama_url = f"http://{ollama_host}:11434/api/chat"
|
||||||
|
|
||||||
# Prefill plus explicite pour guider la réponse
|
# Prefill pour les modèles thinking (qwen3) — évite le mode réflexion >180s
|
||||||
prefill = '{"x_pct": 0.'
|
_vlm_model = os.environ.get("RPA_VLM_MODEL", "gemma4:e4b")
|
||||||
|
_is_thinking = "qwen3" in _vlm_model.lower()
|
||||||
|
prefill = '{"x_pct": 0.' if _is_thinking else ""
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{"role": "system", "content": "You locate UI elements on screenshots. Reply with JSON only: {\"x_pct\": 0.XX, \"y_pct\": 0.XX, \"confidence\": 0.XX}"},
|
||||||
|
{"role": "user", "content": prompt, "images": images},
|
||||||
|
]
|
||||||
|
if prefill:
|
||||||
|
messages.append({"role": "assistant", "content": prefill})
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": os.environ.get("RPA_VLM_MODEL", "qwen3-vl:8b"),
|
"model": _vlm_model,
|
||||||
"messages": [
|
"messages": messages,
|
||||||
{"role": "system", "content": "You locate UI elements on screenshots. Reply with JSON only: {\"x_pct\": 0.XX, \"y_pct\": 0.XX, \"confidence\": 0.XX}"},
|
|
||||||
{"role": "user", "content": prompt, "images": images},
|
|
||||||
{"role": "assistant", "content": prefill},
|
|
||||||
],
|
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"think": False,
|
"think": False,
|
||||||
"options": {"temperature": 0.1, "num_predict": 60, "num_ctx": 8192},
|
"options": {"temperature": 0.1, "num_predict": 60, "num_ctx": 8192},
|
||||||
@@ -922,9 +1293,14 @@ Example: x_pct=0.50, y_pct=0.30"""
|
|||||||
"error": result.get("error"),
|
"error": result.get("error"),
|
||||||
"warning": result.get("warning"),
|
"warning": result.get("warning"),
|
||||||
"screenshot": result.get("screenshot"),
|
"screenshot": result.get("screenshot"),
|
||||||
|
"screenshot_after": result.get("screenshot"),
|
||||||
|
"screenshot_before": result.get("screenshot_before"),
|
||||||
"resolution_method": result.get("resolution_method"),
|
"resolution_method": result.get("resolution_method"),
|
||||||
"resolution_score": result.get("resolution_score"),
|
"resolution_score": result.get("resolution_score"),
|
||||||
"resolution_elapsed_ms": result.get("resolution_elapsed_ms"),
|
"resolution_elapsed_ms": result.get("resolution_elapsed_ms"),
|
||||||
|
# Champs enrichis pour target_not_found (pause supervisée)
|
||||||
|
"target_description": result.get("target_description"),
|
||||||
|
"target_spec": result.get("target_spec"),
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
resp2 = requests.post(
|
resp2 = requests.post(
|
||||||
@@ -1075,21 +1451,26 @@ Example: x_pct=0.50, y_pct=0.30"""
|
|||||||
"If no popup: answer NO_POPUP"
|
"If no popup: answer NO_POPUP"
|
||||||
)
|
)
|
||||||
|
|
||||||
prefill = "The button to click is: "
|
# Prefill pour les modèles thinking (qwen3) — skip la phase de réflexion
|
||||||
|
_vlm_model_popup = os.environ.get("RPA_VLM_MODEL", "gemma4:e4b")
|
||||||
|
_is_thinking_popup = "qwen3" in _vlm_model_popup.lower()
|
||||||
|
|
||||||
|
messages_popup = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": (
|
||||||
|
"You analyze screenshots to detect popup dialogs. "
|
||||||
|
"Answer briefly with just the button text. No JSON, no coordinates."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{"role": "user", "content": prompt, "images": [screenshot_b64]},
|
||||||
|
]
|
||||||
|
if _is_thinking_popup:
|
||||||
|
messages_popup.append({"role": "assistant", "content": "The button to click is: "})
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": os.environ.get("RPA_VLM_MODEL", "qwen3-vl:8b"),
|
"model": _vlm_model_popup,
|
||||||
"messages": [
|
"messages": messages_popup,
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": (
|
|
||||||
"You analyze screenshots to detect popup dialogs. "
|
|
||||||
"Answer briefly with just the button text. No JSON, no coordinates."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
{"role": "user", "content": prompt, "images": [screenshot_b64]},
|
|
||||||
{"role": "assistant", "content": prefill},
|
|
||||||
],
|
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"think": False,
|
"think": False,
|
||||||
"options": {"temperature": 0.1, "num_predict": 30, "num_ctx": 8192},
|
"options": {"temperature": 0.1, "num_predict": 30, "num_ctx": 8192},
|
||||||
@@ -1373,58 +1754,43 @@ Example: x_pct=0.50, y_pct=0.30"""
|
|||||||
# =========================================================================
|
# =========================================================================
|
||||||
|
|
||||||
def _type_text(self, text: str):
|
def _type_text(self, text: str):
|
||||||
"""Saisir du texte via copier-coller (methode principale) ou keyboard.type (fallback).
|
"""Saisir du texte caractère par caractère (anti-détection robot).
|
||||||
|
|
||||||
Le copier-coller via le presse-papiers est la methode principale car
|
Chaque caractère est tapé individuellement avec un délai aléatoire
|
||||||
keyboard.type() de pynput envoie les scancodes QWERTY, ce qui produit
|
pour simuler une frappe humaine. Les caractères spéciaux AZERTY
|
||||||
des caracteres incorrects sur les claviers AZERTY (ex: "ce" -> "ci").
|
(@ # € etc.) utilisent les bons VK codes via KeyCode.from_char().
|
||||||
Le copier-coller est agnostique du layout clavier.
|
|
||||||
|
Pas de copier-coller (détectable par les systèmes anti-robot Citrix).
|
||||||
"""
|
"""
|
||||||
|
import random
|
||||||
|
|
||||||
if not text:
|
if not text:
|
||||||
return
|
return
|
||||||
|
|
||||||
clipboard_ok = False
|
for char in text:
|
||||||
try:
|
|
||||||
import pyperclip
|
|
||||||
# Sauvegarder le contenu actuel du presse-papiers
|
|
||||||
try:
|
try:
|
||||||
old_clipboard = pyperclip.paste()
|
# Taper le caractère via from_char (respecte le layout clavier)
|
||||||
|
self.keyboard.press(KeyCode.from_char(char))
|
||||||
|
self.keyboard.release(KeyCode.from_char(char))
|
||||||
except Exception:
|
except Exception:
|
||||||
old_clipboard = None
|
# Fallback : keyboard.type pour les cas spéciaux
|
||||||
|
|
||||||
pyperclip.copy(text)
|
|
||||||
# Ctrl+V pour coller
|
|
||||||
self.keyboard.press(Key.ctrl)
|
|
||||||
time.sleep(0.02)
|
|
||||||
self.keyboard.press('v')
|
|
||||||
self.keyboard.release('v')
|
|
||||||
self.keyboard.release(Key.ctrl)
|
|
||||||
time.sleep(0.1)
|
|
||||||
|
|
||||||
# Restaurer le presse-papiers original
|
|
||||||
if old_clipboard is not None:
|
|
||||||
try:
|
try:
|
||||||
pyperclip.copy(old_clipboard)
|
self.keyboard.type(char)
|
||||||
except Exception:
|
except Exception as e:
|
||||||
pass
|
logger.debug(f"Impossible de taper '{char}': {e}")
|
||||||
|
# Délai humain entre les frappes (40-120ms)
|
||||||
|
time.sleep(random.uniform(0.04, 0.12))
|
||||||
|
|
||||||
clipboard_ok = True
|
logger.debug(f"Texte saisi char-by-char ({len(text)} chars)")
|
||||||
logger.debug(f"Texte saisi via presse-papiers ({len(text)} chars)")
|
|
||||||
except ImportError:
|
|
||||||
logger.debug("pyperclip non disponible, fallback sur keyboard.type()")
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Copier-coller echoue ({e}), fallback sur keyboard.type()")
|
|
||||||
|
|
||||||
if not clipboard_ok:
|
|
||||||
self.keyboard.type(text)
|
|
||||||
|
|
||||||
def _click(self, pos, button_name):
|
def _click(self, pos, button_name):
|
||||||
"""Deplacer la souris et cliquer.
|
"""Deplacer la souris via courbe de Bézier puis cliquer.
|
||||||
|
|
||||||
Supporte les boutons : left, right, double (double-clic gauche).
|
Le mouvement en courbe de Bézier simule un déplacement humain
|
||||||
|
(anti-détection robot pour Citrix et systèmes surveillés).
|
||||||
"""
|
"""
|
||||||
self.mouse.position = pos
|
self._bezier_move(pos)
|
||||||
time.sleep(0.1) # Delai pour simuler le temps de reaction humain
|
time.sleep(0.05)
|
||||||
|
|
||||||
if button_name == "double":
|
if button_name == "double":
|
||||||
self.mouse.click(Button.left, 2)
|
self.mouse.click(Button.left, 2)
|
||||||
@@ -1433,6 +1799,35 @@ Example: x_pct=0.50, y_pct=0.30"""
|
|||||||
else:
|
else:
|
||||||
self.mouse.click(Button.left)
|
self.mouse.click(Button.left)
|
||||||
|
|
||||||
|
def _bezier_move(self, target, steps=25):
|
||||||
|
"""Déplacer la souris vers target via une courbe de Bézier cubique.
|
||||||
|
|
||||||
|
Génère un mouvement naturel avec un point de contrôle aléatoire
|
||||||
|
pour éviter les lignes droites détectables par les anti-bots.
|
||||||
|
"""
|
||||||
|
import random
|
||||||
|
|
||||||
|
start = self.mouse.position
|
||||||
|
sx, sy = start
|
||||||
|
tx, ty = target
|
||||||
|
|
||||||
|
# Point de contrôle aléatoire (déviation latérale)
|
||||||
|
dist = ((tx - sx) ** 2 + (ty - sy) ** 2) ** 0.5
|
||||||
|
deviation = max(20, dist * 0.2)
|
||||||
|
cx = (sx + tx) / 2 + random.uniform(-deviation, deviation)
|
||||||
|
cy = (sy + ty) / 2 + random.uniform(-deviation, deviation)
|
||||||
|
|
||||||
|
for i in range(1, steps + 1):
|
||||||
|
t = i / steps
|
||||||
|
# Bézier quadratique : B(t) = (1-t)²·S + 2(1-t)t·C + t²·T
|
||||||
|
inv_t = 1 - t
|
||||||
|
x = inv_t * inv_t * sx + 2 * inv_t * t * cx + t * t * tx
|
||||||
|
y = inv_t * inv_t * sy + 2 * inv_t * t * cy + t * t * ty
|
||||||
|
self.mouse.position = (int(x), int(y))
|
||||||
|
# Vitesse variable (plus lent au début et à la fin)
|
||||||
|
speed = 0.005 + 0.01 * (1 - abs(2 * t - 1))
|
||||||
|
time.sleep(speed)
|
||||||
|
|
||||||
def _execute_key_combo(self, keys: list):
|
def _execute_key_combo(self, keys: list):
|
||||||
"""
|
"""
|
||||||
Executer une combinaison de touches.
|
Executer une combinaison de touches.
|
||||||
|
|||||||
214
agent_v0/agent_v1/core/grounding.py
Normal file
214
agent_v0/agent_v1/core/grounding.py
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
# agent_v1/core/grounding.py
|
||||||
|
"""
|
||||||
|
Module Grounding — localisation pure d'éléments UI sur l'écran.
|
||||||
|
|
||||||
|
Responsabilité unique : "Trouve l'élément X sur l'écran et retourne ses coordonnées."
|
||||||
|
Ne prend AUCUNE décision. Si l'élément n'est pas trouvé → retourne NOT_FOUND.
|
||||||
|
|
||||||
|
Stratégies disponibles (cascade configurable) :
|
||||||
|
1. Serveur SomEngine + VLM (GPU distant)
|
||||||
|
2. Template matching local (CPU, ~10ms)
|
||||||
|
3. VLM local direct (CPU/GPU local)
|
||||||
|
|
||||||
|
Séparé de Policy (qui décide quoi faire quand grounding échoue).
|
||||||
|
Ref: docs/PLAN_ACTEUR_V1.md — Architecture MICRO (grounding + exécution)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class GroundingResult:
|
||||||
|
"""Résultat d'une tentative de localisation visuelle."""
|
||||||
|
found: bool # L'élément a été trouvé
|
||||||
|
x_pct: float = 0.0 # Position X en % (0.0-1.0)
|
||||||
|
y_pct: float = 0.0 # Position Y en % (0.0-1.0)
|
||||||
|
method: str = "" # Méthode utilisée (server_som, anchor_template, vlm_direct...)
|
||||||
|
score: float = 0.0 # Confiance (0.0-1.0)
|
||||||
|
elapsed_ms: float = 0.0 # Temps de résolution
|
||||||
|
detail: str = "" # Info supplémentaire (label trouvé, raison échec)
|
||||||
|
raw: Optional[Dict] = None # Données brutes du resolver (pour debug)
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"found": self.found,
|
||||||
|
"x_pct": self.x_pct,
|
||||||
|
"y_pct": self.y_pct,
|
||||||
|
"method": self.method,
|
||||||
|
"score": round(self.score, 3),
|
||||||
|
"elapsed_ms": round(self.elapsed_ms, 1),
|
||||||
|
"detail": self.detail,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Résultat singleton pour "pas trouvé"
|
||||||
|
NOT_FOUND = GroundingResult(found=False, detail="Aucune méthode n'a trouvé l'élément")
|
||||||
|
|
||||||
|
|
||||||
|
class GroundingEngine:
|
||||||
|
"""Moteur de localisation visuelle d'éléments UI.
|
||||||
|
|
||||||
|
Encapsule la cascade de résolution (serveur → template → VLM local)
|
||||||
|
avec une interface unifiée. Ne prend aucune décision — c'est le rôle
|
||||||
|
de PolicyEngine.
|
||||||
|
|
||||||
|
Usage :
|
||||||
|
engine = GroundingEngine(executor)
|
||||||
|
result = engine.locate(screenshot_b64, target_spec, screen_w, screen_h)
|
||||||
|
if result.found:
|
||||||
|
click(result.x_pct, result.y_pct)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, executor):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
executor: ActionExecutorV1 — fournit les méthodes de résolution existantes.
|
||||||
|
"""
|
||||||
|
self._executor = executor
|
||||||
|
|
||||||
|
def locate(
|
||||||
|
self,
|
||||||
|
server_url: str,
|
||||||
|
target_spec: Dict[str, Any],
|
||||||
|
fallback_x: float,
|
||||||
|
fallback_y: float,
|
||||||
|
screen_width: int,
|
||||||
|
screen_height: int,
|
||||||
|
strategies: Optional[List[str]] = None,
|
||||||
|
) -> GroundingResult:
|
||||||
|
"""Localiser un élément UI sur l'écran.
|
||||||
|
|
||||||
|
Exécute la cascade de stratégies dans l'ordre et retourne
|
||||||
|
dès qu'une stratégie trouve l'élément.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_url: URL du serveur (SomEngine + VLM GPU)
|
||||||
|
target_spec: Spécification de la cible (by_text, anchor, vlm_description...)
|
||||||
|
fallback_x, fallback_y: Coordonnées de fallback (enregistrement)
|
||||||
|
screen_width, screen_height: Résolution écran
|
||||||
|
strategies: Liste ordonnée de stratégies à essayer.
|
||||||
|
Par défaut : ["server", "template", "vlm_local"]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
GroundingResult avec found=True et coordonnées, ou NOT_FOUND
|
||||||
|
"""
|
||||||
|
if strategies is None:
|
||||||
|
strategies = ["server", "template", "vlm_local"]
|
||||||
|
|
||||||
|
# ── Apprentissage : réordonner les stratégies selon l'historique ──
|
||||||
|
# Si le Learning sait quelle méthode marche pour cette cible,
|
||||||
|
# la mettre en premier. C'est la boucle d'apprentissage.
|
||||||
|
learned = target_spec.get("_learned_strategy", "")
|
||||||
|
if learned:
|
||||||
|
strategy_map = {
|
||||||
|
"som_text_match": "server",
|
||||||
|
"grounding_vlm": "server",
|
||||||
|
"server_som": "server",
|
||||||
|
"anchor_template": "template",
|
||||||
|
"template_matching": "template",
|
||||||
|
"hybrid_text_direct": "vlm_local",
|
||||||
|
"hybrid_vlm_text": "vlm_local",
|
||||||
|
"vlm_direct": "vlm_local",
|
||||||
|
}
|
||||||
|
preferred = strategy_map.get(learned, "")
|
||||||
|
if preferred and preferred in strategies:
|
||||||
|
strategies = [preferred] + [s for s in strategies if s != preferred]
|
||||||
|
logger.info(
|
||||||
|
f"Grounding: stratégie réordonnée par l'apprentissage → "
|
||||||
|
f"{strategies} (learned={learned})"
|
||||||
|
)
|
||||||
|
|
||||||
|
t_start = time.time()
|
||||||
|
screenshot_b64 = self._executor._capture_screenshot_b64(max_width=0, quality=75)
|
||||||
|
if not screenshot_b64:
|
||||||
|
return GroundingResult(
|
||||||
|
found=False, detail="Capture screenshot échouée",
|
||||||
|
elapsed_ms=(time.time() - t_start) * 1000,
|
||||||
|
)
|
||||||
|
|
||||||
|
for strategy in strategies:
|
||||||
|
result = self._try_strategy(
|
||||||
|
strategy, server_url, screenshot_b64, target_spec,
|
||||||
|
fallback_x, fallback_y, screen_width, screen_height,
|
||||||
|
)
|
||||||
|
if result.found:
|
||||||
|
result.elapsed_ms = (time.time() - t_start) * 1000
|
||||||
|
return result
|
||||||
|
|
||||||
|
return GroundingResult(
|
||||||
|
found=False,
|
||||||
|
detail=f"Toutes les stratégies ont échoué ({', '.join(strategies)})",
|
||||||
|
elapsed_ms=(time.time() - t_start) * 1000,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _try_strategy(
|
||||||
|
self,
|
||||||
|
strategy: str,
|
||||||
|
server_url: str,
|
||||||
|
screenshot_b64: str,
|
||||||
|
target_spec: Dict[str, Any],
|
||||||
|
fallback_x: float,
|
||||||
|
fallback_y: float,
|
||||||
|
screen_width: int,
|
||||||
|
screen_height: int,
|
||||||
|
) -> GroundingResult:
|
||||||
|
"""Essayer une stratégie de grounding unique."""
|
||||||
|
|
||||||
|
if strategy == "server" and server_url:
|
||||||
|
raw = self._executor._server_resolve_target(
|
||||||
|
server_url, screenshot_b64, target_spec,
|
||||||
|
fallback_x, fallback_y, screen_width, screen_height,
|
||||||
|
)
|
||||||
|
if raw and raw.get("resolved"):
|
||||||
|
return GroundingResult(
|
||||||
|
found=True,
|
||||||
|
x_pct=raw["x_pct"],
|
||||||
|
y_pct=raw["y_pct"],
|
||||||
|
method=raw.get("method", "server"),
|
||||||
|
score=raw.get("score", 0.0),
|
||||||
|
detail=raw.get("matched_element", {}).get("label", ""),
|
||||||
|
raw=raw,
|
||||||
|
)
|
||||||
|
|
||||||
|
elif strategy == "template":
|
||||||
|
anchor_b64 = target_spec.get("anchor_image_base64", "")
|
||||||
|
if anchor_b64:
|
||||||
|
raw = self._executor._template_match_anchor(
|
||||||
|
screenshot_b64, anchor_b64, screen_width, screen_height,
|
||||||
|
)
|
||||||
|
if raw and raw.get("resolved"):
|
||||||
|
return GroundingResult(
|
||||||
|
found=True,
|
||||||
|
x_pct=raw["x_pct"],
|
||||||
|
y_pct=raw["y_pct"],
|
||||||
|
method="anchor_template",
|
||||||
|
score=raw.get("score", 0.0),
|
||||||
|
raw=raw,
|
||||||
|
)
|
||||||
|
|
||||||
|
elif strategy == "vlm_local":
|
||||||
|
by_text = target_spec.get("by_text", "")
|
||||||
|
vlm_desc = target_spec.get("vlm_description", "")
|
||||||
|
if vlm_desc or by_text:
|
||||||
|
raw = self._executor._hybrid_vlm_resolve(
|
||||||
|
screenshot_b64, target_spec, screen_width, screen_height,
|
||||||
|
)
|
||||||
|
if raw and raw.get("resolved"):
|
||||||
|
return GroundingResult(
|
||||||
|
found=True,
|
||||||
|
x_pct=raw["x_pct"],
|
||||||
|
y_pct=raw["y_pct"],
|
||||||
|
method=raw.get("method", "vlm_local"),
|
||||||
|
score=raw.get("score", 0.0),
|
||||||
|
detail=raw.get("matched_element", {}).get("label", ""),
|
||||||
|
raw=raw,
|
||||||
|
)
|
||||||
|
|
||||||
|
return GroundingResult(found=False, method=strategy, detail=f"{strategy}: pas trouvé")
|
||||||
152
agent_v0/agent_v1/core/policy.py
Normal file
152
agent_v0/agent_v1/core/policy.py
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
# agent_v1/core/policy.py
|
||||||
|
"""
|
||||||
|
Module Policy — décisions intelligentes quand le grounding échoue.
|
||||||
|
|
||||||
|
Responsabilité unique : "Le Grounding dit NOT_FOUND. Que fait-on ?"
|
||||||
|
Ne localise AUCUN élément — c'est le rôle du Grounding.
|
||||||
|
|
||||||
|
Décisions possibles :
|
||||||
|
- RETRY : re-tenter le grounding (après popup fermée, par exemple)
|
||||||
|
- SKIP : l'action n'est plus nécessaire (état déjà atteint)
|
||||||
|
- ABORT : arrêter le workflow (état incohérent)
|
||||||
|
- SUPERVISE : rendre la main à l'utilisateur
|
||||||
|
|
||||||
|
Séparé de Grounding (qui localise les éléments).
|
||||||
|
Ref: docs/PLAN_ACTEUR_V1.md — Architecture MÉSO (acteur intelligent)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Decision(Enum):
|
||||||
|
"""Décisions possibles quand le grounding échoue."""
|
||||||
|
RETRY = "retry" # Re-tenter (après correction : popup fermée, navigation...)
|
||||||
|
SKIP = "skip" # Action inutile (état déjà atteint)
|
||||||
|
ABORT = "abort" # Arrêter le workflow (état incohérent)
|
||||||
|
SUPERVISE = "supervise" # Rendre la main à l'utilisateur (Léa dit "je bloque")
|
||||||
|
CONTINUE = "continue" # Continuer malgré l'échec (action non critique)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PolicyDecision:
|
||||||
|
"""Résultat d'une décision Policy."""
|
||||||
|
decision: Decision
|
||||||
|
reason: str # Explication de la décision
|
||||||
|
action_taken: str = "" # Action corrective effectuée (ex: "popup fermée")
|
||||||
|
elapsed_ms: float = 0.0
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"decision": self.decision.value,
|
||||||
|
"reason": self.reason,
|
||||||
|
"action_taken": self.action_taken,
|
||||||
|
"elapsed_ms": round(self.elapsed_ms, 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class PolicyEngine:
|
||||||
|
"""Moteur de décision quand le grounding échoue.
|
||||||
|
|
||||||
|
Cascade de décision :
|
||||||
|
1. Popup détectée ? → fermer et RETRY
|
||||||
|
2. Acteur gemma4 → SKIP / ABORT / SUPERVISE
|
||||||
|
3. Fallback → SUPERVISE (rendre la main)
|
||||||
|
|
||||||
|
Usage :
|
||||||
|
policy = PolicyEngine(executor)
|
||||||
|
decision = policy.decide(action, target_spec, grounding_result)
|
||||||
|
if decision.decision == Decision.RETRY:
|
||||||
|
# re-tenter le grounding
|
||||||
|
elif decision.decision == Decision.SKIP:
|
||||||
|
# marquer comme réussi, passer à la suite
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, executor):
|
||||||
|
self._executor = executor
|
||||||
|
|
||||||
|
def decide(
|
||||||
|
self,
|
||||||
|
action: Dict[str, Any],
|
||||||
|
target_spec: Dict[str, Any],
|
||||||
|
retry_count: int = 0,
|
||||||
|
max_retries: int = 1,
|
||||||
|
) -> PolicyDecision:
|
||||||
|
"""Décider quoi faire quand le grounding a échoué.
|
||||||
|
|
||||||
|
Cascade :
|
||||||
|
1. Si c'est le premier essai → tenter de fermer une popup → RETRY
|
||||||
|
2. Si retry déjà fait → demander à l'acteur gemma4
|
||||||
|
3. Selon gemma4 : SKIP, ABORT, ou SUPERVISE
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: L'action qui a échoué
|
||||||
|
target_spec: La cible non trouvée
|
||||||
|
retry_count: Nombre de retries déjà faits
|
||||||
|
max_retries: Maximum de retries autorisés
|
||||||
|
"""
|
||||||
|
t_start = time.time()
|
||||||
|
|
||||||
|
# ── Étape 1 : Tentative de fermeture popup (premier essai) ──
|
||||||
|
if retry_count == 0:
|
||||||
|
popup_handled = self._try_close_popup()
|
||||||
|
if popup_handled:
|
||||||
|
return PolicyDecision(
|
||||||
|
decision=Decision.RETRY,
|
||||||
|
reason="Popup détectée et fermée, re-tentative",
|
||||||
|
action_taken="popup_closed",
|
||||||
|
elapsed_ms=(time.time() - t_start) * 1000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ── Étape 2 : Max retries atteint → acteur gemma4 ──
|
||||||
|
if retry_count >= max_retries:
|
||||||
|
actor_decision = self._ask_actor(action, target_spec)
|
||||||
|
|
||||||
|
if actor_decision == "PASSER":
|
||||||
|
return PolicyDecision(
|
||||||
|
decision=Decision.SKIP,
|
||||||
|
reason="Acteur gemma4 : l'état est déjà atteint",
|
||||||
|
elapsed_ms=(time.time() - t_start) * 1000,
|
||||||
|
)
|
||||||
|
elif actor_decision == "STOPPER":
|
||||||
|
return PolicyDecision(
|
||||||
|
decision=Decision.ABORT,
|
||||||
|
reason="Acteur gemma4 : état incohérent, arrêt",
|
||||||
|
elapsed_ms=(time.time() - t_start) * 1000,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# EXECUTER ou inconnu → pause supervisée
|
||||||
|
return PolicyDecision(
|
||||||
|
decision=Decision.SUPERVISE,
|
||||||
|
reason=f"Acteur gemma4 : {actor_decision}, pause supervisée",
|
||||||
|
elapsed_ms=(time.time() - t_start) * 1000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ── Étape 3 : Encore des retries disponibles → RETRY ──
|
||||||
|
return PolicyDecision(
|
||||||
|
decision=Decision.RETRY,
|
||||||
|
reason=f"Retry {retry_count + 1}/{max_retries}",
|
||||||
|
elapsed_ms=(time.time() - t_start) * 1000,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _try_close_popup(self) -> bool:
|
||||||
|
"""Tenter de fermer une popup via le handler VLM existant."""
|
||||||
|
try:
|
||||||
|
return self._executor._handle_popup_vlm()
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Policy: popup handler échoué : {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _ask_actor(self, action: Dict, target_spec: Dict) -> str:
|
||||||
|
"""Demander à gemma4 de décider (PASSER/EXECUTER/STOPPER)."""
|
||||||
|
try:
|
||||||
|
return self._executor._actor_decide(action, target_spec)
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Policy: acteur gemma4 échoué : {e}")
|
||||||
|
return "EXECUTER" # Fallback → supervisé
|
||||||
215
agent_v0/agent_v1/core/recovery.py
Normal file
215
agent_v0/agent_v1/core/recovery.py
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
# agent_v1/core/recovery.py
|
||||||
|
"""
|
||||||
|
Module Recovery — mécanisme de rollback quand une action échoue.
|
||||||
|
|
||||||
|
Responsabilité : "L'action a échoué ou produit un résultat inattendu.
|
||||||
|
Comment revenir en arrière ?"
|
||||||
|
|
||||||
|
Stratégies de recovery :
|
||||||
|
1. Ctrl+Z (undo natif) — pour les frappes et modifications
|
||||||
|
2. Escape (fermer dialogue) — pour les popups/menus
|
||||||
|
3. Alt+F4 (fermer fenêtre) — si mauvaise application ouverte
|
||||||
|
4. Clic hors zone — fermer un menu déroulant
|
||||||
|
5. Navigation retour — retourner à l'écran précédent
|
||||||
|
|
||||||
|
Le Recovery est appelé par le Policy quand le Critic détecte un
|
||||||
|
résultat inattendu (pixel OK + sémantique NON = changement inattendu).
|
||||||
|
|
||||||
|
Ref: docs/VISION_RPA_INTELLIGENT.md — "Il se trompe" → correction
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RecoveryAction(Enum):
|
||||||
|
"""Actions de recovery possibles."""
|
||||||
|
UNDO = "undo" # Ctrl+Z
|
||||||
|
ESCAPE = "escape" # Echap (fermer dialogue/menu)
|
||||||
|
CLOSE_WINDOW = "close" # Alt+F4
|
||||||
|
CLICK_AWAY = "click_away" # Clic hors zone (fermer menu)
|
||||||
|
NONE = "none" # Pas de recovery possible
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class RecoveryResult:
|
||||||
|
"""Résultat d'une tentative de recovery."""
|
||||||
|
action_taken: RecoveryAction
|
||||||
|
success: bool
|
||||||
|
detail: str = ""
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"action_taken": self.action_taken.value,
|
||||||
|
"success": self.success,
|
||||||
|
"detail": self.detail,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class RecoveryEngine:
|
||||||
|
"""Moteur de recovery — tente de revenir en arrière après un échec.
|
||||||
|
|
||||||
|
Choisit la stratégie de recovery en fonction du type d'action qui a échoué
|
||||||
|
et de l'état actuel de l'écran.
|
||||||
|
|
||||||
|
Usage :
|
||||||
|
recovery = RecoveryEngine(executor)
|
||||||
|
result = recovery.attempt(failed_action, critic_result)
|
||||||
|
if result.success:
|
||||||
|
# re-tenter l'action
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, executor):
|
||||||
|
self._executor = executor
|
||||||
|
|
||||||
|
def attempt(
|
||||||
|
self,
|
||||||
|
failed_action: Dict[str, Any],
|
||||||
|
critic_detail: str = "",
|
||||||
|
) -> RecoveryResult:
|
||||||
|
"""Tenter une recovery après un échec.
|
||||||
|
|
||||||
|
Sélectionne la stratégie appropriée selon le type d'action :
|
||||||
|
- click qui ouvre la mauvaise chose → Escape ou Ctrl+Z
|
||||||
|
- type qui tape au mauvais endroit → Ctrl+Z
|
||||||
|
- key_combo inattendu → Ctrl+Z
|
||||||
|
- popup apparue → Escape
|
||||||
|
|
||||||
|
Args:
|
||||||
|
failed_action: L'action qui a échoué
|
||||||
|
critic_detail: Détail du Critic (raison de l'échec sémantique)
|
||||||
|
"""
|
||||||
|
action_type = failed_action.get("type", "")
|
||||||
|
detail_lower = critic_detail.lower()
|
||||||
|
|
||||||
|
# Choisir la stratégie de recovery
|
||||||
|
strategy = self._select_strategy(action_type, detail_lower)
|
||||||
|
|
||||||
|
if strategy == RecoveryAction.NONE:
|
||||||
|
return RecoveryResult(
|
||||||
|
action_taken=RecoveryAction.NONE,
|
||||||
|
success=False,
|
||||||
|
detail="Pas de stratégie de recovery applicable",
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._execute_recovery(strategy)
|
||||||
|
|
||||||
|
def _select_strategy(self, action_type: str, critic_detail: str) -> RecoveryAction:
|
||||||
|
"""Sélectionner la meilleure stratégie de recovery.
|
||||||
|
|
||||||
|
Priorité : type d'action d'abord (frappe → undo), puis contexte.
|
||||||
|
"""
|
||||||
|
# Frappe ou modification incorrecte → toujours Ctrl+Z
|
||||||
|
if action_type in ("type", "key_combo"):
|
||||||
|
return RecoveryAction.UNDO
|
||||||
|
|
||||||
|
# Popup/dialogue détecté
|
||||||
|
if any(w in critic_detail for w in ["popup", "dialog", "erreur", "error", "modal"]):
|
||||||
|
return RecoveryAction.ESCAPE
|
||||||
|
|
||||||
|
# Menu ouvert par erreur
|
||||||
|
if any(w in critic_detail for w in ["menu", "dropdown", "déroulant"]):
|
||||||
|
return RecoveryAction.ESCAPE
|
||||||
|
|
||||||
|
# Mauvaise fenêtre ouverte
|
||||||
|
if any(w in critic_detail for w in ["mauvaise fenêtre", "wrong window"]):
|
||||||
|
return RecoveryAction.CLOSE_WINDOW
|
||||||
|
|
||||||
|
# Clic qui a produit un résultat inattendu
|
||||||
|
if action_type == "click":
|
||||||
|
return RecoveryAction.ESCAPE
|
||||||
|
|
||||||
|
return RecoveryAction.NONE
|
||||||
|
|
||||||
|
def _execute_recovery(self, strategy: RecoveryAction) -> RecoveryResult:
|
||||||
|
"""Exécuter la stratégie de recovery choisie."""
|
||||||
|
from pynput.keyboard import Controller as KeyboardController, Key
|
||||||
|
|
||||||
|
keyboard = self._executor.keyboard
|
||||||
|
|
||||||
|
try:
|
||||||
|
if strategy == RecoveryAction.UNDO:
|
||||||
|
# Ctrl+Z
|
||||||
|
logger.info("Recovery : Ctrl+Z (undo)")
|
||||||
|
print(" [RECOVERY] Ctrl+Z — annulation de la dernière action")
|
||||||
|
keyboard.press(Key.ctrl)
|
||||||
|
keyboard.press('z')
|
||||||
|
keyboard.release('z')
|
||||||
|
keyboard.release(Key.ctrl)
|
||||||
|
time.sleep(0.5)
|
||||||
|
return RecoveryResult(
|
||||||
|
action_taken=RecoveryAction.UNDO,
|
||||||
|
success=True,
|
||||||
|
detail="Ctrl+Z exécuté",
|
||||||
|
)
|
||||||
|
|
||||||
|
elif strategy == RecoveryAction.ESCAPE:
|
||||||
|
# Echap
|
||||||
|
logger.info("Recovery : Escape (fermer dialogue)")
|
||||||
|
print(" [RECOVERY] Escape — fermeture dialogue/menu")
|
||||||
|
keyboard.press(Key.esc)
|
||||||
|
keyboard.release(Key.esc)
|
||||||
|
time.sleep(0.5)
|
||||||
|
return RecoveryResult(
|
||||||
|
action_taken=RecoveryAction.ESCAPE,
|
||||||
|
success=True,
|
||||||
|
detail="Escape exécuté",
|
||||||
|
)
|
||||||
|
|
||||||
|
elif strategy == RecoveryAction.CLOSE_WINDOW:
|
||||||
|
# Alt+F4 — AVEC vérification fenêtre active
|
||||||
|
# Sur un poste hospitalier, Alt+F4 sans vérif peut fermer le DPI patient
|
||||||
|
try:
|
||||||
|
from ..window_info_crossplatform import get_active_window_info
|
||||||
|
active = get_active_window_info()
|
||||||
|
active_title = active.get("title", "")
|
||||||
|
logger.info(f"Recovery : Alt+F4 sur '{active_title}'")
|
||||||
|
print(f" [RECOVERY] Alt+F4 — fermeture de '{active_title}'")
|
||||||
|
except Exception:
|
||||||
|
logger.info("Recovery : Alt+F4 (fenêtre active inconnue)")
|
||||||
|
print(" [RECOVERY] Alt+F4 — fermeture fenêtre indésirable")
|
||||||
|
|
||||||
|
keyboard.press(Key.alt)
|
||||||
|
keyboard.press(Key.f4)
|
||||||
|
keyboard.release(Key.f4)
|
||||||
|
keyboard.release(Key.alt)
|
||||||
|
time.sleep(1.0)
|
||||||
|
return RecoveryResult(
|
||||||
|
action_taken=RecoveryAction.CLOSE_WINDOW,
|
||||||
|
success=True,
|
||||||
|
detail=f"Alt+F4 exécuté sur '{active_title if 'active_title' in dir() else '?'}'",
|
||||||
|
)
|
||||||
|
|
||||||
|
elif strategy == RecoveryAction.CLICK_AWAY:
|
||||||
|
# Clic au centre de l'écran (hors popup)
|
||||||
|
logger.info("Recovery : clic hors zone")
|
||||||
|
print(" [RECOVERY] Clic hors zone — fermeture menu")
|
||||||
|
monitor = self._executor.sct.monitors[1]
|
||||||
|
w, h = monitor["width"], monitor["height"]
|
||||||
|
# Cliquer dans un coin neutre (10% depuis le haut-gauche)
|
||||||
|
self._executor._click((int(w * 0.1), int(h * 0.1)), "left")
|
||||||
|
time.sleep(0.5)
|
||||||
|
return RecoveryResult(
|
||||||
|
action_taken=RecoveryAction.CLICK_AWAY,
|
||||||
|
success=True,
|
||||||
|
detail="Clic hors zone exécuté",
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Recovery échoué ({strategy.value}) : {e}")
|
||||||
|
return RecoveryResult(
|
||||||
|
action_taken=strategy,
|
||||||
|
success=False,
|
||||||
|
detail=f"Erreur : {e}",
|
||||||
|
)
|
||||||
|
|
||||||
|
return RecoveryResult(
|
||||||
|
action_taken=RecoveryAction.NONE,
|
||||||
|
success=False,
|
||||||
|
detail="Stratégie non implémentée",
|
||||||
|
)
|
||||||
File diff suppressed because it is too large
Load Diff
393
agent_v0/server_v1/audit_trail.py
Normal file
393
agent_v0/server_v1/audit_trail.py
Normal file
@@ -0,0 +1,393 @@
|
|||||||
|
# agent_v0/server_v1/audit_trail.py
|
||||||
|
"""
|
||||||
|
Module Audit Trail — traçabilité complète des actions RPA.
|
||||||
|
|
||||||
|
Responsabilité : "Chaque action exécutée par Léa est tracée, datée, attribuée."
|
||||||
|
|
||||||
|
En milieu hospitalier (codage CIM-10 via DPI), la traçabilité est une obligation
|
||||||
|
légale. Ce module enregistre chaque action avec :
|
||||||
|
- L'identité du TIM (Technicien d'Information Médicale) superviseur
|
||||||
|
- Le mode d'exécution (autonome, assisté, shadow)
|
||||||
|
- Le résultat détaillé (succès, échec, correction)
|
||||||
|
- L'horodatage ISO 8601
|
||||||
|
|
||||||
|
Format de stockage : fichiers JSONL datés dans data/audit/ (un par jour).
|
||||||
|
Aucune dépendance externe (stdlib + dataclasses uniquement).
|
||||||
|
|
||||||
|
Usage :
|
||||||
|
audit = AuditTrail()
|
||||||
|
audit.record(AuditEntry(
|
||||||
|
session_id="sess_abc",
|
||||||
|
action_id="act_001",
|
||||||
|
user_id="tim_dupont",
|
||||||
|
user_name="Marie Dupont",
|
||||||
|
...
|
||||||
|
))
|
||||||
|
entries = audit.query(user_id="tim_dupont", date_from="2026-04-01")
|
||||||
|
csv_data = audit.export_csv(date_from="2026-04-01", date_to="2026-04-06")
|
||||||
|
summary = audit.get_summary("2026-04-05")
|
||||||
|
"""
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
from dataclasses import dataclass, asdict, fields
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Répertoire par défaut pour le stockage des fichiers d'audit
|
||||||
|
_DEFAULT_AUDIT_DIR = os.environ.get("RPA_AUDIT_DIR", "data/audit")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AuditEntry:
|
||||||
|
"""Entrée d'audit — un événement tracé dans le système."""
|
||||||
|
|
||||||
|
# Horodatage ISO 8601 (ex: 2026-04-05T14:23:01.456789)
|
||||||
|
timestamp: str = ""
|
||||||
|
|
||||||
|
# Identifiants de session et d'action
|
||||||
|
session_id: str = ""
|
||||||
|
action_id: str = ""
|
||||||
|
|
||||||
|
# Identité de l'utilisateur superviseur
|
||||||
|
user_id: str = "" # Identifiant du TIM (login Windows ou configuré)
|
||||||
|
user_name: str = "" # Nom affiché (ex: "Marie Dupont")
|
||||||
|
machine_id: str = "" # ID du poste client (hostname ou configuré)
|
||||||
|
|
||||||
|
# Description de l'action
|
||||||
|
action_type: str = "" # click, type, key_combo, wait, etc.
|
||||||
|
action_detail: str = "" # Description humaine ("Clic sur 'Enregistrer' dans DxCare")
|
||||||
|
target_app: str = "" # Application cible (DxCare, Orbis, etc.)
|
||||||
|
|
||||||
|
# Mode d'exécution
|
||||||
|
execution_mode: str = "" # "autonomous", "assisted", "shadow"
|
||||||
|
|
||||||
|
# Résultat
|
||||||
|
result: str = "" # "success", "failed", "skipped", "recovered"
|
||||||
|
resolution_method: str = "" # Comment la cible a été trouvée (som_text_match, vlm_direct, etc.)
|
||||||
|
critic_result: str = "" # Résultat de la vérification sémantique
|
||||||
|
recovery_action: str = "" # Action corrective si échec (undo, escape, retry, none)
|
||||||
|
|
||||||
|
# Contexte métier
|
||||||
|
domain: str = "" # Domaine métier (tim_codage, generic, etc.)
|
||||||
|
workflow_id: str = "" # ID du workflow exécuté
|
||||||
|
workflow_name: str = "" # Nom lisible du workflow
|
||||||
|
|
||||||
|
# Performance
|
||||||
|
duration_ms: float = 0.0 # Durée de l'action en millisecondes
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convertir en dictionnaire sérialisable JSON."""
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "AuditEntry":
|
||||||
|
"""Créer une entrée depuis un dictionnaire.
|
||||||
|
|
||||||
|
Ignore les clés inconnues pour la compatibilité future.
|
||||||
|
"""
|
||||||
|
known_fields = {f.name for f in fields(cls)}
|
||||||
|
filtered = {k: v for k, v in data.items() if k in known_fields}
|
||||||
|
return cls(**filtered)
|
||||||
|
|
||||||
|
|
||||||
|
class AuditTrail:
|
||||||
|
"""Gestionnaire de traçabilité — enregistrement et consultation des actions.
|
||||||
|
|
||||||
|
Stocke chaque événement dans un fichier JSONL daté (un fichier par jour).
|
||||||
|
Thread-safe grâce à un verrou d'écriture.
|
||||||
|
|
||||||
|
Fichiers produits :
|
||||||
|
data/audit/audit_2026-04-05.jsonl
|
||||||
|
data/audit/audit_2026-04-06.jsonl
|
||||||
|
...
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, audit_dir: str = ""):
|
||||||
|
self.audit_dir = Path(audit_dir or _DEFAULT_AUDIT_DIR)
|
||||||
|
self.audit_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
logger.info(f"Audit Trail initialisé : {self.audit_dir}")
|
||||||
|
|
||||||
|
def _file_for_date(self, d: date) -> Path:
|
||||||
|
"""Chemin du fichier JSONL pour une date donnée."""
|
||||||
|
return self.audit_dir / f"audit_{d.isoformat()}.jsonl"
|
||||||
|
|
||||||
|
def record(self, entry: AuditEntry) -> None:
|
||||||
|
"""Enregistrer une entrée d'audit.
|
||||||
|
|
||||||
|
Ajoute un horodatage ISO 8601 si absent, puis écrit en append
|
||||||
|
dans le fichier JSONL du jour.
|
||||||
|
"""
|
||||||
|
# Horodatage automatique si absent
|
||||||
|
if not entry.timestamp:
|
||||||
|
entry.timestamp = datetime.now().isoformat()
|
||||||
|
|
||||||
|
# Déterminer le fichier du jour à partir du timestamp
|
||||||
|
try:
|
||||||
|
entry_date = datetime.fromisoformat(entry.timestamp).date()
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
entry_date = date.today()
|
||||||
|
|
||||||
|
audit_file = self._file_for_date(entry_date)
|
||||||
|
|
||||||
|
with self._lock:
|
||||||
|
try:
|
||||||
|
with open(audit_file, "a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(entry.to_dict(), ensure_ascii=False) + "\n")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Audit Trail: échec écriture {audit_file}: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Audit: {entry.result} {entry.action_type} "
|
||||||
|
f"'{entry.action_detail[:50]}' "
|
||||||
|
f"[user={entry.user_id}] [session={entry.session_id}]"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _load_file(self, filepath: Path) -> List[AuditEntry]:
|
||||||
|
"""Charger toutes les entrées d'un fichier JSONL."""
|
||||||
|
if not filepath.is_file():
|
||||||
|
return []
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
try:
|
||||||
|
with open(filepath, "r", encoding="utf-8") as f:
|
||||||
|
for line_num, line in enumerate(f, 1):
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
data = json.loads(line)
|
||||||
|
entries.append(AuditEntry.from_dict(data))
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.warning(
|
||||||
|
f"Audit Trail: ligne {line_num} invalide dans "
|
||||||
|
f"{filepath.name}: {e}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Audit Trail: échec lecture {filepath}: {e}")
|
||||||
|
|
||||||
|
return entries
|
||||||
|
|
||||||
|
def _date_range(self, date_from: str = "", date_to: str = "") -> List[date]:
|
||||||
|
"""Calculer la liste de dates entre date_from et date_to (inclus).
|
||||||
|
|
||||||
|
Si date_from est vide, utilise aujourd'hui.
|
||||||
|
Si date_to est vide, utilise date_from.
|
||||||
|
Format attendu : YYYY-MM-DD.
|
||||||
|
"""
|
||||||
|
if date_from:
|
||||||
|
try:
|
||||||
|
d_from = date.fromisoformat(date_from)
|
||||||
|
except ValueError:
|
||||||
|
d_from = date.today()
|
||||||
|
else:
|
||||||
|
d_from = date.today()
|
||||||
|
|
||||||
|
if date_to:
|
||||||
|
try:
|
||||||
|
d_to = date.fromisoformat(date_to)
|
||||||
|
except ValueError:
|
||||||
|
d_to = d_from
|
||||||
|
else:
|
||||||
|
d_to = d_from
|
||||||
|
|
||||||
|
# Assurer l'ordre chronologique
|
||||||
|
if d_to < d_from:
|
||||||
|
d_from, d_to = d_to, d_from
|
||||||
|
|
||||||
|
dates = []
|
||||||
|
current = d_from
|
||||||
|
while current <= d_to:
|
||||||
|
dates.append(current)
|
||||||
|
current += timedelta(days=1)
|
||||||
|
|
||||||
|
return dates
|
||||||
|
|
||||||
|
def query(
|
||||||
|
self,
|
||||||
|
date_from: str = "",
|
||||||
|
date_to: str = "",
|
||||||
|
user_id: str = "",
|
||||||
|
session_id: str = "",
|
||||||
|
result: str = "",
|
||||||
|
action_type: str = "",
|
||||||
|
workflow_id: str = "",
|
||||||
|
domain: str = "",
|
||||||
|
limit: int = 500,
|
||||||
|
offset: int = 0,
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Rechercher des entrées d'audit avec filtres.
|
||||||
|
|
||||||
|
Tous les filtres sont optionnels et combinés en AND.
|
||||||
|
Retourne les entrées triées par timestamp décroissant (plus récentes d'abord).
|
||||||
|
"""
|
||||||
|
dates = self._date_range(date_from, date_to)
|
||||||
|
all_entries: List[AuditEntry] = []
|
||||||
|
|
||||||
|
for d in dates:
|
||||||
|
filepath = self._file_for_date(d)
|
||||||
|
all_entries.extend(self._load_file(filepath))
|
||||||
|
|
||||||
|
# Appliquer les filtres
|
||||||
|
filtered = []
|
||||||
|
for entry in all_entries:
|
||||||
|
if user_id and entry.user_id != user_id:
|
||||||
|
continue
|
||||||
|
if session_id and entry.session_id != session_id:
|
||||||
|
continue
|
||||||
|
if result and entry.result != result:
|
||||||
|
continue
|
||||||
|
if action_type and entry.action_type != action_type:
|
||||||
|
continue
|
||||||
|
if workflow_id and entry.workflow_id != workflow_id:
|
||||||
|
continue
|
||||||
|
if domain and entry.domain != domain:
|
||||||
|
continue
|
||||||
|
filtered.append(entry)
|
||||||
|
|
||||||
|
# Tri par timestamp décroissant (plus récent en premier)
|
||||||
|
filtered.sort(key=lambda e: e.timestamp, reverse=True)
|
||||||
|
|
||||||
|
# Pagination
|
||||||
|
paginated = filtered[offset:offset + limit]
|
||||||
|
|
||||||
|
return [e.to_dict() for e in paginated]
|
||||||
|
|
||||||
|
def get_summary(self, target_date: str = "") -> Dict[str, Any]:
|
||||||
|
"""Résumé journalier d'une date donnée.
|
||||||
|
|
||||||
|
Retourne les statistiques agrégées :
|
||||||
|
- Nombre total d'actions
|
||||||
|
- Taux de succès
|
||||||
|
- Répartition par utilisateur
|
||||||
|
- Répartition par résultat
|
||||||
|
- Répartition par type d'action
|
||||||
|
- Répartition par workflow
|
||||||
|
- Répartition par mode d'exécution
|
||||||
|
"""
|
||||||
|
if not target_date:
|
||||||
|
target_date = date.today().isoformat()
|
||||||
|
|
||||||
|
try:
|
||||||
|
d = date.fromisoformat(target_date)
|
||||||
|
except ValueError:
|
||||||
|
d = date.today()
|
||||||
|
|
||||||
|
entries = self._load_file(self._file_for_date(d))
|
||||||
|
|
||||||
|
if not entries:
|
||||||
|
return {
|
||||||
|
"date": d.isoformat(),
|
||||||
|
"total_actions": 0,
|
||||||
|
"success_rate": 0.0,
|
||||||
|
"by_user": {},
|
||||||
|
"by_result": {},
|
||||||
|
"by_action_type": {},
|
||||||
|
"by_workflow": {},
|
||||||
|
"by_execution_mode": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
total = len(entries)
|
||||||
|
successes = sum(1 for e in entries if e.result == "success")
|
||||||
|
|
||||||
|
# Agrégations
|
||||||
|
by_user: Dict[str, Dict[str, Any]] = {}
|
||||||
|
by_result: Dict[str, int] = {}
|
||||||
|
by_action_type: Dict[str, int] = {}
|
||||||
|
by_workflow: Dict[str, int] = {}
|
||||||
|
by_execution_mode: Dict[str, int] = {}
|
||||||
|
|
||||||
|
for entry in entries:
|
||||||
|
# Par utilisateur
|
||||||
|
uid = entry.user_id or "inconnu"
|
||||||
|
if uid not in by_user:
|
||||||
|
by_user[uid] = {
|
||||||
|
"user_name": entry.user_name,
|
||||||
|
"total": 0,
|
||||||
|
"success": 0,
|
||||||
|
}
|
||||||
|
by_user[uid]["total"] += 1
|
||||||
|
if entry.result == "success":
|
||||||
|
by_user[uid]["success"] += 1
|
||||||
|
|
||||||
|
# Par résultat
|
||||||
|
r = entry.result or "inconnu"
|
||||||
|
by_result[r] = by_result.get(r, 0) + 1
|
||||||
|
|
||||||
|
# Par type d'action
|
||||||
|
at = entry.action_type or "inconnu"
|
||||||
|
by_action_type[at] = by_action_type.get(at, 0) + 1
|
||||||
|
|
||||||
|
# Par workflow
|
||||||
|
wf = entry.workflow_id or "inconnu"
|
||||||
|
by_workflow[wf] = by_workflow.get(wf, 0) + 1
|
||||||
|
|
||||||
|
# Par mode d'exécution
|
||||||
|
em = entry.execution_mode or "inconnu"
|
||||||
|
by_execution_mode[em] = by_execution_mode.get(em, 0) + 1
|
||||||
|
|
||||||
|
# Calculer le taux de succès par utilisateur
|
||||||
|
for uid, stats in by_user.items():
|
||||||
|
stats["success_rate"] = round(
|
||||||
|
stats["success"] / stats["total"], 3
|
||||||
|
) if stats["total"] > 0 else 0.0
|
||||||
|
|
||||||
|
return {
|
||||||
|
"date": d.isoformat(),
|
||||||
|
"total_actions": total,
|
||||||
|
"success_rate": round(successes / total, 3) if total > 0 else 0.0,
|
||||||
|
"by_user": by_user,
|
||||||
|
"by_result": by_result,
|
||||||
|
"by_action_type": by_action_type,
|
||||||
|
"by_workflow": by_workflow,
|
||||||
|
"by_execution_mode": by_execution_mode,
|
||||||
|
}
|
||||||
|
|
||||||
|
def export_csv(
|
||||||
|
self,
|
||||||
|
date_from: str = "",
|
||||||
|
date_to: str = "",
|
||||||
|
user_id: str = "",
|
||||||
|
session_id: str = "",
|
||||||
|
) -> str:
|
||||||
|
"""Exporter les entrées d'audit en CSV.
|
||||||
|
|
||||||
|
Retourne une chaîne CSV complète (avec en-tête).
|
||||||
|
Filtres optionnels par date, utilisateur, session.
|
||||||
|
"""
|
||||||
|
# Récupérer les entrées avec les mêmes filtres que query()
|
||||||
|
entries = self.query(
|
||||||
|
date_from=date_from,
|
||||||
|
date_to=date_to,
|
||||||
|
user_id=user_id,
|
||||||
|
session_id=session_id,
|
||||||
|
limit=100000, # Pas de pagination pour l'export
|
||||||
|
)
|
||||||
|
|
||||||
|
if not entries:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
# En-têtes CSV — même ordre que le dataclass
|
||||||
|
fieldnames = [f.name for f in fields(AuditEntry)]
|
||||||
|
|
||||||
|
output = io.StringIO()
|
||||||
|
writer = csv.DictWriter(
|
||||||
|
output,
|
||||||
|
fieldnames=fieldnames,
|
||||||
|
extrasaction="ignore",
|
||||||
|
quoting=csv.QUOTE_MINIMAL,
|
||||||
|
)
|
||||||
|
writer.writeheader()
|
||||||
|
for entry_dict in entries:
|
||||||
|
writer.writerow(entry_dict)
|
||||||
|
|
||||||
|
return output.getvalue()
|
||||||
201
agent_v0/server_v1/domain_context.py
Normal file
201
agent_v0/server_v1/domain_context.py
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
# agent_v0/server_v1/domain_context.py
|
||||||
|
"""
|
||||||
|
Contexte métier pour les appels VLM — rend Léa experte du domaine.
|
||||||
|
|
||||||
|
Chaque workflow est associé à un domaine métier (médical, comptable, etc.)
|
||||||
|
qui enrichit TOUS les prompts VLM (Observer, Critic, acteur, enrichissement).
|
||||||
|
|
||||||
|
Un gemma4 qui sait qu'il regarde un DPI et que l'utilisateur fait du codage
|
||||||
|
CIM-10 prend des décisions bien meilleures qu'un VLM générique.
|
||||||
|
|
||||||
|
Premier domaine : TIM (Technicien d'Information Médicale)
|
||||||
|
- Logiciels DPI/DMS (dossier patient informatisé)
|
||||||
|
- Codage CIM-10 / CCAM / GHM
|
||||||
|
- Lecture de comptes rendus médicaux
|
||||||
|
- Validation des séjours / RSS / RSA
|
||||||
|
|
||||||
|
Usage :
|
||||||
|
ctx = get_domain_context("tim_codage")
|
||||||
|
prompt = f"{ctx.system_prompt}\n\n{user_prompt}"
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DomainContext:
|
||||||
|
"""Contexte métier pour un domaine spécifique."""
|
||||||
|
domain_id: str # Identifiant unique (tim_codage, comptabilite, etc.)
|
||||||
|
name: str # Nom lisible (Codage médical TIM)
|
||||||
|
description: str # Description courte du métier
|
||||||
|
|
||||||
|
# Prompt système injecté dans TOUS les appels VLM
|
||||||
|
system_prompt: str = ""
|
||||||
|
|
||||||
|
# Vocabulaire métier (termes que le VLM doit connaître)
|
||||||
|
vocabulary: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
# Applications connues (noms de logiciels que le VLM peut rencontrer)
|
||||||
|
known_apps: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
# Écrans types (descriptions des écrans courants du métier)
|
||||||
|
screen_patterns: Dict[str, str] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def enrich_prompt(self, prompt: str, role: str = "") -> str:
|
||||||
|
"""Enrichir un prompt avec le contexte métier.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompt: Le prompt original
|
||||||
|
role: Le rôle du VLM (observer, critic, actor, enrichment)
|
||||||
|
"""
|
||||||
|
parts = []
|
||||||
|
|
||||||
|
if self.system_prompt:
|
||||||
|
parts.append(self.system_prompt)
|
||||||
|
|
||||||
|
if role:
|
||||||
|
role_hint = _ROLE_HINTS.get(role, "")
|
||||||
|
if role_hint:
|
||||||
|
parts.append(role_hint.format(domain=self.name))
|
||||||
|
|
||||||
|
parts.append(prompt)
|
||||||
|
return "\n\n".join(parts)
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"domain_id": self.domain_id,
|
||||||
|
"name": self.name,
|
||||||
|
"description": self.description,
|
||||||
|
"known_apps": self.known_apps,
|
||||||
|
"vocabulary_count": len(self.vocabulary),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Hints par rôle VLM — adaptés au contexte métier
|
||||||
|
_ROLE_HINTS = {
|
||||||
|
"observer": (
|
||||||
|
"Tu observes un écran utilisé dans le domaine '{domain}'. "
|
||||||
|
"Cherche les popups, erreurs, ou états incohérents avec ce métier."
|
||||||
|
),
|
||||||
|
"critic": (
|
||||||
|
"Tu vérifies qu'une action dans le domaine '{domain}' a produit "
|
||||||
|
"le bon résultat. Sois précis sur ce que tu vois à l'écran."
|
||||||
|
),
|
||||||
|
"actor": (
|
||||||
|
"Tu décides si une action est nécessaire dans le contexte '{domain}'. "
|
||||||
|
"Utilise ta connaissance du métier pour juger si l'état est cohérent."
|
||||||
|
),
|
||||||
|
"enrichment": (
|
||||||
|
"Tu analyses un enregistrement de workflow dans le domaine '{domain}'. "
|
||||||
|
"Décris les intentions métier, pas juste les clics."
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Domaines pré-configurés
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
_TIM_CODAGE = DomainContext(
|
||||||
|
domain_id="tim_codage",
|
||||||
|
name="Codage médical TIM",
|
||||||
|
description=(
|
||||||
|
"Technicien d'Information Médicale : lecture de comptes rendus médicaux, "
|
||||||
|
"codage des diagnostics en CIM-10, codage des actes en CCAM, "
|
||||||
|
"validation des groupes homogènes de malades (GHM), "
|
||||||
|
"gestion des résumés de sortie standardisés (RSS/RSA)."
|
||||||
|
),
|
||||||
|
system_prompt=(
|
||||||
|
"Tu es un assistant expert en codage médical hospitalier. "
|
||||||
|
"L'utilisateur est un TIM (Technicien d'Information Médicale) qui utilise "
|
||||||
|
"un logiciel DPI (Dossier Patient Informatisé) ou DIM (Département d'Information Médicale). "
|
||||||
|
"Son travail : lire les comptes rendus médicaux des patients et coder les diagnostics "
|
||||||
|
"en CIM-10, les actes en CCAM, et valider les séjours pour le PMSI.\n\n"
|
||||||
|
"Vocabulaire du métier :\n"
|
||||||
|
"- DPI/DMS : logiciel de dossier patient (ex: Orbis, DxCare, Crossway, Easily, Hopital Manager)\n"
|
||||||
|
"- CIM-10 : Classification Internationale des Maladies, 10ème révision (codes diagnostics)\n"
|
||||||
|
"- CCAM : Classification Commune des Actes Médicaux (codes actes chirurgicaux/médicaux)\n"
|
||||||
|
"- GHM : Groupe Homogène de Malades (regroupement tarifaire)\n"
|
||||||
|
"- RSS : Résumé de Sortie Standardisé (données du séjour)\n"
|
||||||
|
"- RSA : Résumé de Sortie Anonyme (RSS anonymisé pour la T2A)\n"
|
||||||
|
"- DP : Diagnostic Principal (le code CIM-10 principal du séjour)\n"
|
||||||
|
"- DAS : Diagnostics Associés Significatifs\n"
|
||||||
|
"- CMA : Complication ou Morbidité Associée (augmente la sévérité)\n"
|
||||||
|
"- T2A : Tarification À l'Activité (financement des hôpitaux)\n"
|
||||||
|
"- PMSI : Programme de Médicalisation des Systèmes d'Information\n"
|
||||||
|
"- UM : Unité Médicale (service hospitalier)\n"
|
||||||
|
"- CR : Compte Rendu (document médical)\n\n"
|
||||||
|
"Écrans courants :\n"
|
||||||
|
"- Liste de patients / dossiers à coder\n"
|
||||||
|
"- Fiche patient (identité, séjour, UM)\n"
|
||||||
|
"- Écran de codage CIM-10 (recherche de codes, saisie DP/DAS)\n"
|
||||||
|
"- Visualiseur de comptes rendus médicaux\n"
|
||||||
|
"- Écran de validation / groupage GHM\n"
|
||||||
|
"- Recherche de codes (arborescence CIM-10 ou recherche textuelle)"
|
||||||
|
),
|
||||||
|
vocabulary=[
|
||||||
|
"CIM-10", "CCAM", "GHM", "RSS", "RSA", "PMSI", "T2A",
|
||||||
|
"diagnostic principal", "DAS", "CMA", "compte rendu",
|
||||||
|
"dossier patient", "séjour", "unité médicale", "codage",
|
||||||
|
"groupage", "valorisation", "exhaustivité",
|
||||||
|
],
|
||||||
|
known_apps=[
|
||||||
|
"Orbis", "DxCare", "Crossway", "Easily", "Hopital Manager",
|
||||||
|
"CORA", "AGFA", "Dedalus", "Maincare", "Softway Medical",
|
||||||
|
"WebPIMS", "CEPAGE", "Medimust",
|
||||||
|
],
|
||||||
|
screen_patterns={
|
||||||
|
"liste_patients": "Liste de dossiers patients avec colonnes (nom, prénom, date entrée, UM, statut codage)",
|
||||||
|
"fiche_patient": "Fiche d'identité patient avec numéro IPP, séjour, dates, UM",
|
||||||
|
"codage_cim10": "Écran de saisie des codes CIM-10 avec diagnostic principal et DAS",
|
||||||
|
"compte_rendu": "Visualiseur de compte rendu médical (texte libre, souvent PDF intégré)",
|
||||||
|
"recherche_code": "Recherche de code CIM-10 ou CCAM (champ de recherche + arborescence)",
|
||||||
|
"validation_ghm": "Écran de validation du groupage avec GHM calculé et valorisation",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
_GENERIC = DomainContext(
|
||||||
|
domain_id="generic",
|
||||||
|
name="Bureautique générale",
|
||||||
|
description="Automatisation bureautique générale (Office, navigateur, etc.)",
|
||||||
|
system_prompt=(
|
||||||
|
"Tu es un assistant RPA qui observe des applications bureautiques. "
|
||||||
|
"Décris précisément ce que tu vois à l'écran."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Registre des domaines disponibles
|
||||||
|
_DOMAINS: Dict[str, DomainContext] = {
|
||||||
|
"tim_codage": _TIM_CODAGE,
|
||||||
|
"generic": _GENERIC,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_domain_context(domain_id: str = "generic") -> DomainContext:
|
||||||
|
"""Récupérer le contexte métier par ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_id: Identifiant du domaine (tim_codage, generic, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DomainContext correspondant, ou generic si non trouvé.
|
||||||
|
"""
|
||||||
|
ctx = _DOMAINS.get(domain_id, _GENERIC)
|
||||||
|
if ctx is _GENERIC and domain_id != "generic":
|
||||||
|
logger.warning(f"Domaine '{domain_id}' non trouvé, utilisation de 'generic'")
|
||||||
|
return ctx
|
||||||
|
|
||||||
|
|
||||||
|
def register_domain(context: DomainContext) -> None:
|
||||||
|
"""Enregistrer un nouveau domaine métier."""
|
||||||
|
_DOMAINS[context.domain_id] = context
|
||||||
|
logger.info(f"Domaine '{context.domain_id}' enregistré ({context.name})")
|
||||||
|
|
||||||
|
|
||||||
|
def list_domains() -> List[Dict[str, Any]]:
|
||||||
|
"""Lister tous les domaines disponibles."""
|
||||||
|
return [ctx.to_dict() for ctx in _DOMAINS.values()]
|
||||||
346
agent_v0/server_v1/replay_learner.py
Normal file
346
agent_v0/server_v1/replay_learner.py
Normal file
@@ -0,0 +1,346 @@
|
|||||||
|
# agent_v0/server_v1/replay_learner.py
|
||||||
|
"""
|
||||||
|
Module Learning — apprentissage à partir des résultats de replay.
|
||||||
|
|
||||||
|
Responsabilité : "Chaque replay qui échoue enrichit notre base de connaissances."
|
||||||
|
|
||||||
|
Stocke les résultats structurés de chaque action (succès/échec, méthode,
|
||||||
|
screenshots, correction appliquée) pour :
|
||||||
|
1. Améliorer les décisions futures (Policy)
|
||||||
|
2. Affiner les stratégies de grounding (quel méthode marche pour quel écran)
|
||||||
|
3. Détecter les patterns récurrents d'échec
|
||||||
|
4. Alimenter le fine-tuning futur du VLM
|
||||||
|
|
||||||
|
Format inspiré du cahier des charges (docs/VISION_RPA_INTELLIGENT.md) :
|
||||||
|
{
|
||||||
|
"screenshot_before": "base64...",
|
||||||
|
"action": {"type": "click", "target": "Bouton Valider", ...},
|
||||||
|
"screenshot_after": "base64...",
|
||||||
|
"success": true,
|
||||||
|
"resolution_method": "som_text_match",
|
||||||
|
"correction": null,
|
||||||
|
"human_validated": false
|
||||||
|
}
|
||||||
|
|
||||||
|
Ref: docs/VISION_RPA_INTELLIGENT.md — Boucle d'apprentissage (section 4)
|
||||||
|
Ref: docs/PLAN_ACTEUR_V1.md — Phase 3 : apprentissage continu
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Répertoire par défaut pour le stockage des résultats d'apprentissage
|
||||||
|
_DEFAULT_LEARNING_DIR = os.environ.get(
|
||||||
|
"RPA_LEARNING_DIR", "data/learning/replay_results"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ActionOutcome:
|
||||||
|
"""Résultat structuré d'une action de replay."""
|
||||||
|
# Identifiants
|
||||||
|
session_id: str
|
||||||
|
action_id: str
|
||||||
|
action_type: str # click, type, key_combo
|
||||||
|
timestamp: float = 0.0 # Epoch
|
||||||
|
|
||||||
|
# Contexte
|
||||||
|
target_description: str = "" # "Clic sur 'Enregistrer' dans Bloc-notes"
|
||||||
|
intention: str = "" # "Sauvegarder le fichier"
|
||||||
|
window_title: str = ""
|
||||||
|
|
||||||
|
# Résolution
|
||||||
|
resolution_method: str = "" # server_som, anchor_template, vlm_direct...
|
||||||
|
resolution_score: float = 0.0
|
||||||
|
resolution_elapsed_ms: float = 0.0
|
||||||
|
|
||||||
|
# Résultat
|
||||||
|
success: bool = False
|
||||||
|
error: str = ""
|
||||||
|
warning: str = ""
|
||||||
|
|
||||||
|
# Vérification (Critic)
|
||||||
|
pixel_verified: Optional[bool] = None
|
||||||
|
semantic_verified: Optional[bool] = None
|
||||||
|
critic_detail: str = ""
|
||||||
|
|
||||||
|
# Recovery
|
||||||
|
recovery_action: str = "" # undo, escape, close, none
|
||||||
|
recovery_success: bool = False
|
||||||
|
|
||||||
|
# Screenshots (chemins relatifs, pas base64 — trop lourd)
|
||||||
|
screenshot_before_path: str = ""
|
||||||
|
screenshot_after_path: str = ""
|
||||||
|
|
||||||
|
# Correction humaine (feedback loop)
|
||||||
|
human_validated: bool = False
|
||||||
|
human_correction: str = "" # Description de la correction
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
|
||||||
|
class ReplayLearner:
|
||||||
|
"""Apprentissage à partir des résultats de replay.
|
||||||
|
|
||||||
|
Stocke chaque action dans un fichier JSONL par session.
|
||||||
|
Fournit des requêtes pour améliorer les décisions futures.
|
||||||
|
|
||||||
|
Usage côté serveur (api_stream.py) :
|
||||||
|
learner = ReplayLearner()
|
||||||
|
learner.record(outcome)
|
||||||
|
|
||||||
|
Usage côté Policy :
|
||||||
|
history = learner.query_similar(target_description, window_title)
|
||||||
|
# → "La dernière fois, template matching a échoué mais SoM a trouvé"
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, learning_dir: str = ""):
|
||||||
|
self.learning_dir = Path(learning_dir or _DEFAULT_LEARNING_DIR)
|
||||||
|
self.learning_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
# Cache mémoire des derniers résultats (pour requêtes rapides)
|
||||||
|
self._recent: List[ActionOutcome] = []
|
||||||
|
self._max_recent = 500
|
||||||
|
|
||||||
|
def record(self, outcome: ActionOutcome) -> None:
|
||||||
|
"""Enregistrer le résultat d'une action.
|
||||||
|
|
||||||
|
Écrit en append dans un fichier JSONL par session.
|
||||||
|
Garde aussi en mémoire pour les requêtes rapides.
|
||||||
|
"""
|
||||||
|
if not outcome.timestamp:
|
||||||
|
outcome.timestamp = time.time()
|
||||||
|
|
||||||
|
# Fichier JSONL par session
|
||||||
|
session_file = self.learning_dir / f"{outcome.session_id}.jsonl"
|
||||||
|
try:
|
||||||
|
with open(session_file, "a") as f:
|
||||||
|
f.write(json.dumps(outcome.to_dict(), ensure_ascii=False) + "\n")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Learning: échec écriture {session_file}: {e}")
|
||||||
|
|
||||||
|
# Cache mémoire
|
||||||
|
self._recent.append(outcome)
|
||||||
|
if len(self._recent) > self._max_recent:
|
||||||
|
self._recent = self._recent[-self._max_recent:]
|
||||||
|
|
||||||
|
# Log résumé
|
||||||
|
status = "OK" if outcome.success else "ÉCHEC"
|
||||||
|
logger.info(
|
||||||
|
f"Learning: {status} {outcome.action_type} "
|
||||||
|
f"'{outcome.target_description[:40]}' "
|
||||||
|
f"[{outcome.resolution_method}] "
|
||||||
|
f"critic={'OK' if outcome.semantic_verified else 'NON' if outcome.semantic_verified is False else '?'}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def record_from_replay_result(
|
||||||
|
self,
|
||||||
|
session_id: str,
|
||||||
|
action: Dict[str, Any],
|
||||||
|
result: Dict[str, Any],
|
||||||
|
verification: Optional[Dict] = None,
|
||||||
|
) -> None:
|
||||||
|
"""Enregistrer depuis les structures existantes du replay.
|
||||||
|
|
||||||
|
Convertit le format action/result du replay en ActionOutcome.
|
||||||
|
Appelé depuis api_stream.py après chaque action de replay.
|
||||||
|
"""
|
||||||
|
target_spec = action.get("target_spec", {})
|
||||||
|
outcome = ActionOutcome(
|
||||||
|
session_id=session_id,
|
||||||
|
action_id=action.get("action_id", ""),
|
||||||
|
action_type=action.get("type", ""),
|
||||||
|
target_description=target_spec.get("by_text", ""),
|
||||||
|
intention=action.get("intention", ""),
|
||||||
|
window_title=target_spec.get("window_title", ""),
|
||||||
|
resolution_method=result.get("resolution_method", ""),
|
||||||
|
resolution_score=result.get("resolution_score", 0.0),
|
||||||
|
resolution_elapsed_ms=result.get("resolution_elapsed_ms", 0.0),
|
||||||
|
success=result.get("success", False),
|
||||||
|
error=result.get("error", ""),
|
||||||
|
warning=result.get("warning", ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
if verification:
|
||||||
|
outcome.pixel_verified = verification.get("verified")
|
||||||
|
outcome.semantic_verified = verification.get("semantic_verified")
|
||||||
|
outcome.critic_detail = verification.get("semantic_detail", "")
|
||||||
|
|
||||||
|
self.record(outcome)
|
||||||
|
|
||||||
|
def query_similar(
|
||||||
|
self,
|
||||||
|
target_description: str = "",
|
||||||
|
window_title: str = "",
|
||||||
|
limit: int = 10,
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Chercher des résultats similaires dans l'historique.
|
||||||
|
|
||||||
|
Recherche par correspondance textuelle sur la description de cible
|
||||||
|
et le titre de fenêtre. Retourne les plus récents en premier.
|
||||||
|
|
||||||
|
Utile pour le Policy : "qu'est-ce qui a marché avant pour cette cible ?"
|
||||||
|
"""
|
||||||
|
results = []
|
||||||
|
target_lower = target_description.lower()
|
||||||
|
window_lower = window_title.lower()
|
||||||
|
|
||||||
|
for outcome in reversed(self._recent):
|
||||||
|
score = 0
|
||||||
|
if target_lower and target_lower in outcome.target_description.lower():
|
||||||
|
score += 2
|
||||||
|
if window_lower and window_lower in outcome.window_title.lower():
|
||||||
|
score += 1
|
||||||
|
if score > 0:
|
||||||
|
results.append({
|
||||||
|
"outcome": outcome.to_dict(),
|
||||||
|
"relevance": score,
|
||||||
|
})
|
||||||
|
if len(results) >= limit:
|
||||||
|
break
|
||||||
|
|
||||||
|
return sorted(results, key=lambda x: x["relevance"], reverse=True)
|
||||||
|
|
||||||
|
def best_strategy_for(
|
||||||
|
self,
|
||||||
|
target_description: str = "",
|
||||||
|
window_title: str = "",
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""Quelle méthode de grounding a le mieux marché pour cette cible ?
|
||||||
|
|
||||||
|
Consulte l'historique et retourne la méthode qui a le plus haut
|
||||||
|
taux de succès pour des cibles similaires. C'est la boucle
|
||||||
|
d'apprentissage : les replays passés améliorent les suivants.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Nom de la meilleure méthode (ex: "som_text_match") ou None
|
||||||
|
"""
|
||||||
|
similar = self.query_similar(target_description, window_title, limit=20)
|
||||||
|
if not similar:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Compter les succès par méthode
|
||||||
|
method_stats: Dict[str, List[int]] = {} # method → [successes, total]
|
||||||
|
for entry in similar:
|
||||||
|
outcome = entry["outcome"]
|
||||||
|
method = outcome.get("resolution_method", "")
|
||||||
|
if not method:
|
||||||
|
continue
|
||||||
|
if method not in method_stats:
|
||||||
|
method_stats[method] = [0, 0]
|
||||||
|
method_stats[method][1] += 1
|
||||||
|
if outcome.get("success"):
|
||||||
|
method_stats[method][0] += 1
|
||||||
|
|
||||||
|
if not method_stats:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Retourner la méthode avec le meilleur taux de succès (minimum 2 occurrences)
|
||||||
|
best = None
|
||||||
|
best_rate = 0.0
|
||||||
|
for method, (successes, total) in method_stats.items():
|
||||||
|
if total >= 2: # Au moins 2 essais pour être significatif
|
||||||
|
rate = successes / total
|
||||||
|
if rate > best_rate:
|
||||||
|
best_rate = rate
|
||||||
|
best = method
|
||||||
|
|
||||||
|
if best:
|
||||||
|
logger.info(
|
||||||
|
f"Learning: meilleure stratégie pour '{target_description[:30]}' → "
|
||||||
|
f"{best} ({best_rate:.0%} sur {method_stats[best][1]} essais)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return best
|
||||||
|
|
||||||
|
def consolidate_workflow(
|
||||||
|
self,
|
||||||
|
actions: list,
|
||||||
|
session_id: str = "",
|
||||||
|
) -> int:
|
||||||
|
"""Consolider un workflow avec les apprentissages passés.
|
||||||
|
|
||||||
|
Pour chaque action du workflow, vérifie si l'historique suggère
|
||||||
|
une meilleure stratégie de résolution. Si oui, l'ajoute en
|
||||||
|
hint dans le target_spec de l'action.
|
||||||
|
|
||||||
|
Modifie les actions in-place. Retourne le nombre d'actions enrichies.
|
||||||
|
|
||||||
|
C'est la cross-pollination : un replay qui a réussi "Enregistrer"
|
||||||
|
via som_text améliore tous les futurs workflows qui cliquent sur "Enregistrer".
|
||||||
|
"""
|
||||||
|
enriched = 0
|
||||||
|
for action in actions:
|
||||||
|
if action.get("type") != "click":
|
||||||
|
continue
|
||||||
|
target_spec = action.get("target_spec", {})
|
||||||
|
by_text = target_spec.get("by_text", "")
|
||||||
|
window = target_spec.get("window_title", "")
|
||||||
|
if not by_text:
|
||||||
|
continue
|
||||||
|
|
||||||
|
best = self.best_strategy_for(by_text, window)
|
||||||
|
if best:
|
||||||
|
target_spec["_learned_strategy"] = best
|
||||||
|
enriched += 1
|
||||||
|
|
||||||
|
if enriched:
|
||||||
|
logger.info(
|
||||||
|
f"Consolidation : {enriched} actions enrichies par l'apprentissage "
|
||||||
|
f"(session {session_id})"
|
||||||
|
)
|
||||||
|
return enriched
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Statistiques globales des résultats de replay."""
|
||||||
|
if not self._recent:
|
||||||
|
return {"total": 0}
|
||||||
|
|
||||||
|
total = len(self._recent)
|
||||||
|
successes = sum(1 for o in self._recent if o.success)
|
||||||
|
methods = {}
|
||||||
|
for o in self._recent:
|
||||||
|
m = o.resolution_method or "unknown"
|
||||||
|
if m not in methods:
|
||||||
|
methods[m] = {"total": 0, "success": 0}
|
||||||
|
methods[m]["total"] += 1
|
||||||
|
if o.success:
|
||||||
|
methods[m]["success"] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total": total,
|
||||||
|
"success_rate": round(successes / total, 3) if total > 0 else 0,
|
||||||
|
"methods": {
|
||||||
|
m: {
|
||||||
|
"total": v["total"],
|
||||||
|
"success_rate": round(v["success"] / v["total"], 3) if v["total"] > 0 else 0,
|
||||||
|
}
|
||||||
|
for m, v in methods.items()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def load_session(self, session_id: str) -> List[ActionOutcome]:
|
||||||
|
"""Charger tous les résultats d'une session depuis le fichier JSONL."""
|
||||||
|
session_file = self.learning_dir / f"{session_id}.jsonl"
|
||||||
|
if not session_file.is_file():
|
||||||
|
return []
|
||||||
|
|
||||||
|
outcomes = []
|
||||||
|
try:
|
||||||
|
with open(session_file) as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if line:
|
||||||
|
data = json.loads(line)
|
||||||
|
outcomes.append(ActionOutcome(**data))
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Learning: échec lecture {session_file}: {e}")
|
||||||
|
|
||||||
|
return outcomes
|
||||||
@@ -1,20 +1,24 @@
|
|||||||
# agent_v0/server_v1/replay_verifier.py
|
# agent_v0/server_v1/replay_verifier.py
|
||||||
"""
|
"""
|
||||||
ReplayVerifier — Vérification post-action pour le replay de workflows.
|
ReplayVerifier — Vérification post-action (Critic) pour le replay de workflows.
|
||||||
|
|
||||||
Compare les screenshots avant/après une action pour détecter si elle a eu
|
Deux niveaux de vérification :
|
||||||
un effet visible. Utilisé par l'API de replay pour décider si une action
|
1. PIXEL : Différence d'image avant/après (rapide, ~10ms)
|
||||||
a réussi ou si un retry est nécessaire.
|
- L'écran a-t-il changé ? Où ? De combien ?
|
||||||
|
2. SÉMANTIQUE : VLM évalue si le résultat correspond à l'attendu (~2-5s)
|
||||||
|
- L'action a-t-elle eu l'EFFET voulu ? (pas juste "des pixels ont bougé")
|
||||||
|
|
||||||
Stratégies de vérification :
|
Le niveau pixel existait déjà. Le niveau sémantique (Critic) est le chaînon
|
||||||
1. Différence d'image globale (avant == après → probablement rien ne s'est passé)
|
manquant identifié par comparaison avec Claude Computer Use et OpenAdapt.
|
||||||
2. Zone locale autour du clic (si l'action est un clic)
|
|
||||||
3. Détection de texte apparu (si l'action est une frappe)
|
Ref: docs/VISION_RPA_INTELLIGENT.md — étape VERIFY du pipeline.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import Any, Dict, Optional, Tuple
|
from typing import Any, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -35,9 +39,13 @@ class VerificationResult:
|
|||||||
suggestion: str # "retry", "skip", "abort", "continue"
|
suggestion: str # "retry", "skip", "abort", "continue"
|
||||||
detail: str = "" # Description humaine du résultat
|
detail: str = "" # Description humaine du résultat
|
||||||
local_change_pct: float = 0.0 # % de changement dans la zone locale (si applicable)
|
local_change_pct: float = 0.0 # % de changement dans la zone locale (si applicable)
|
||||||
|
# Critic sémantique (VLM)
|
||||||
|
semantic_verified: Optional[bool] = None # None = pas de vérif sémantique
|
||||||
|
semantic_detail: str = "" # Explication du VLM
|
||||||
|
semantic_elapsed_ms: float = 0.0 # Temps de la vérif sémantique
|
||||||
|
|
||||||
def to_dict(self) -> Dict[str, Any]:
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
return {
|
d = {
|
||||||
"verified": self.verified,
|
"verified": self.verified,
|
||||||
"confidence": round(self.confidence, 3),
|
"confidence": round(self.confidence, 3),
|
||||||
"changes_detected": self.changes_detected,
|
"changes_detected": self.changes_detected,
|
||||||
@@ -46,6 +54,11 @@ class VerificationResult:
|
|||||||
"detail": self.detail,
|
"detail": self.detail,
|
||||||
"local_change_pct": round(self.local_change_pct, 3),
|
"local_change_pct": round(self.local_change_pct, 3),
|
||||||
}
|
}
|
||||||
|
if self.semantic_verified is not None:
|
||||||
|
d["semantic_verified"] = self.semantic_verified
|
||||||
|
d["semantic_detail"] = self.semantic_detail
|
||||||
|
d["semantic_elapsed_ms"] = round(self.semantic_elapsed_ms, 1)
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
class ReplayVerifier:
|
class ReplayVerifier:
|
||||||
@@ -345,3 +358,275 @@ class ReplayVerifier:
|
|||||||
f"(global={global_change_pct:.3f}%, local={local_change_pct:.3f}%)"
|
f"(global={global_change_pct:.3f}%, local={local_change_pct:.3f}%)"
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Critic sémantique — VLM évalue si le résultat correspond à l'attendu
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
def verify_with_critic(
|
||||||
|
self,
|
||||||
|
action: Dict[str, Any],
|
||||||
|
result: Dict[str, Any],
|
||||||
|
screenshot_before: Optional[str] = None,
|
||||||
|
screenshot_after: Optional[str] = None,
|
||||||
|
expected_result: str = "",
|
||||||
|
action_intention: str = "",
|
||||||
|
workflow_context: str = "",
|
||||||
|
) -> VerificationResult:
|
||||||
|
"""Vérification complète : pixel + sémantique (Critic).
|
||||||
|
|
||||||
|
Étape 1 : Vérification pixel (rapide, ~10ms) — l'écran a-t-il changé ?
|
||||||
|
Étape 2 : Vérification sémantique (VLM, ~2-5s) — le changement est-il le bon ?
|
||||||
|
|
||||||
|
La vérification sémantique n'est lancée que si :
|
||||||
|
- expected_result est fourni (description de l'état attendu après l'action)
|
||||||
|
- La vérification pixel a détecté un changement (sinon, pas besoin du VLM)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: L'action exécutée
|
||||||
|
result: Le résultat rapporté par l'agent
|
||||||
|
screenshot_before: Screenshot avant l'action (base64)
|
||||||
|
screenshot_after: Screenshot après l'action (base64)
|
||||||
|
expected_result: Description de l'état attendu après l'action
|
||||||
|
action_intention: Ce que l'action était censée faire
|
||||||
|
workflow_context: Contexte global (progression, objectif)
|
||||||
|
"""
|
||||||
|
# Étape 1 : vérification pixel (existante)
|
||||||
|
pixel_result = self.verify_action(
|
||||||
|
action=action,
|
||||||
|
result=result,
|
||||||
|
screenshot_before=screenshot_before,
|
||||||
|
screenshot_after=screenshot_after,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pas de description attendue → retourner le résultat pixel seul
|
||||||
|
if not expected_result:
|
||||||
|
return pixel_result
|
||||||
|
|
||||||
|
# Si aucun changement pixel ET suggestion retry → pas besoin du VLM
|
||||||
|
if not pixel_result.changes_detected and pixel_result.suggestion == "retry":
|
||||||
|
return pixel_result
|
||||||
|
|
||||||
|
# Étape 2 : vérification sémantique via VLM
|
||||||
|
semantic = self._verify_semantic(
|
||||||
|
screenshot_before=screenshot_before,
|
||||||
|
screenshot_after=screenshot_after,
|
||||||
|
expected_result=expected_result,
|
||||||
|
action_intention=action_intention,
|
||||||
|
workflow_context=workflow_context,
|
||||||
|
)
|
||||||
|
|
||||||
|
if semantic is None:
|
||||||
|
# VLM indisponible → garder le résultat pixel seul
|
||||||
|
return pixel_result
|
||||||
|
|
||||||
|
# Fusionner les résultats pixel + sémantique
|
||||||
|
return self._merge_results(pixel_result, semantic)
|
||||||
|
|
||||||
|
def _verify_semantic(
|
||||||
|
self,
|
||||||
|
screenshot_before: Optional[str],
|
||||||
|
screenshot_after: Optional[str],
|
||||||
|
expected_result: str,
|
||||||
|
action_intention: str = "",
|
||||||
|
workflow_context: str = "",
|
||||||
|
) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Appeler le VLM pour évaluer sémantiquement le résultat de l'action.
|
||||||
|
|
||||||
|
Utilise gemma4 en mode texte+images (Docker port 11435) pour analyser
|
||||||
|
les screenshots avant/après et dire si le résultat attendu est atteint.
|
||||||
|
|
||||||
|
Sur Citrix (image plate), c'est la SEULE façon de vérifier intelligemment
|
||||||
|
si une action a eu l'effet voulu.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict avec {"verified": bool, "detail": str, "elapsed_ms": float}
|
||||||
|
ou None si le VLM est indisponible.
|
||||||
|
"""
|
||||||
|
import requests as _requests
|
||||||
|
|
||||||
|
if not screenshot_after:
|
||||||
|
return None
|
||||||
|
|
||||||
|
gemma4_port = os.environ.get("GEMMA4_PORT", "11435")
|
||||||
|
gemma4_url = f"http://localhost:{gemma4_port}/api/chat"
|
||||||
|
|
||||||
|
# Construire le prompt Critic
|
||||||
|
context_parts = []
|
||||||
|
if action_intention:
|
||||||
|
context_parts.append(f"Action effectuée : {action_intention}")
|
||||||
|
if workflow_context:
|
||||||
|
context_parts.append(f"Contexte : {workflow_context}")
|
||||||
|
context_str = "\n".join(context_parts)
|
||||||
|
|
||||||
|
# Deux images : avant et après
|
||||||
|
images = []
|
||||||
|
prompt_images = ""
|
||||||
|
if screenshot_before and screenshot_after:
|
||||||
|
images = [screenshot_before, screenshot_after]
|
||||||
|
prompt_images = (
|
||||||
|
"Image 1 = écran AVANT l'action.\n"
|
||||||
|
"Image 2 = écran APRÈS l'action.\n"
|
||||||
|
)
|
||||||
|
elif screenshot_after:
|
||||||
|
images = [screenshot_after]
|
||||||
|
prompt_images = "Image = écran APRÈS l'action.\n"
|
||||||
|
|
||||||
|
prompt = (
|
||||||
|
f"Tu es le VÉRIFICATEUR d'un robot RPA. Tu dois dire si l'action a réussi.\n\n"
|
||||||
|
f"{prompt_images}"
|
||||||
|
f"{context_str}\n\n"
|
||||||
|
f"Résultat attendu : {expected_result}\n\n"
|
||||||
|
f"Est-ce que le résultat attendu est visible à l'écran ?\n"
|
||||||
|
f"Réponds EXACTEMENT dans ce format :\n"
|
||||||
|
f"VERDICT: OUI ou NON\n"
|
||||||
|
f"RAISON: explication courte (1 ligne)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Injecter le contexte métier si disponible
|
||||||
|
from .domain_context import get_domain_context
|
||||||
|
domain = get_domain_context(os.environ.get("RPA_DOMAIN", "generic"))
|
||||||
|
messages = []
|
||||||
|
if domain.system_prompt:
|
||||||
|
messages.append({"role": "system", "content": domain.system_prompt})
|
||||||
|
messages.append({"role": "user", "content": prompt, "images": images})
|
||||||
|
|
||||||
|
try:
|
||||||
|
t_start = time.time()
|
||||||
|
resp = _requests.post(
|
||||||
|
gemma4_url,
|
||||||
|
json={
|
||||||
|
"model": "gemma4:e4b",
|
||||||
|
"messages": messages,
|
||||||
|
"stream": False,
|
||||||
|
"think": True,
|
||||||
|
"options": {"temperature": 0.1, "num_predict": 800},
|
||||||
|
},
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
elapsed_ms = (time.time() - t_start) * 1000
|
||||||
|
|
||||||
|
if not resp.ok:
|
||||||
|
logger.warning(f"Critic VLM HTTP {resp.status_code}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
content = resp.json().get("message", {}).get("content", "").strip()
|
||||||
|
|
||||||
|
# Parser le verdict
|
||||||
|
verified = None
|
||||||
|
detail = content
|
||||||
|
for line in content.split("\n"):
|
||||||
|
line_upper = line.strip().upper()
|
||||||
|
if line_upper.startswith("VERDICT:"):
|
||||||
|
verdict_text = line_upper.replace("VERDICT:", "").strip()
|
||||||
|
if "OUI" in verdict_text or "YES" in verdict_text:
|
||||||
|
verified = True
|
||||||
|
elif "NON" in verdict_text or "NO" in verdict_text:
|
||||||
|
verified = False
|
||||||
|
elif line_upper.startswith("RAISON:"):
|
||||||
|
detail = line.strip().replace("RAISON:", "").strip()
|
||||||
|
|
||||||
|
if verified is None:
|
||||||
|
# Fallback : chercher OUI/NON dans le texte brut
|
||||||
|
upper = content.upper()
|
||||||
|
if "OUI" in upper and "NON" not in upper:
|
||||||
|
verified = True
|
||||||
|
elif "NON" in upper:
|
||||||
|
verified = False
|
||||||
|
else:
|
||||||
|
logger.warning(f"Critic VLM réponse non parsable : {content[:100]}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Critic VLM : {'OUI' if verified else 'NON'} en {elapsed_ms:.0f}ms — {detail[:80]}"
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"verified": verified,
|
||||||
|
"detail": detail,
|
||||||
|
"elapsed_ms": elapsed_ms,
|
||||||
|
}
|
||||||
|
|
||||||
|
except _requests.Timeout:
|
||||||
|
logger.warning("Critic VLM timeout (30s)")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Critic VLM erreur : {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _merge_results(
|
||||||
|
self,
|
||||||
|
pixel: VerificationResult,
|
||||||
|
semantic: Dict[str, Any],
|
||||||
|
) -> VerificationResult:
|
||||||
|
"""Fusionner les résultats pixel et sémantique.
|
||||||
|
|
||||||
|
Matrice de décision :
|
||||||
|
- Pixel OK + Semantic OK → vérifié (confiance haute)
|
||||||
|
- Pixel OK + Semantic NON → INATTENDU (l'écran a changé mais pas comme prévu)
|
||||||
|
- Pixel NON + Semantic OK → vérifié quand même (le VLM voit le résultat)
|
||||||
|
- Pixel NON + Semantic NON → échec (retry)
|
||||||
|
"""
|
||||||
|
sem_ok = semantic["verified"]
|
||||||
|
pix_ok = pixel.changes_detected
|
||||||
|
|
||||||
|
if pix_ok and sem_ok:
|
||||||
|
# Tout concorde — confiance maximale
|
||||||
|
return VerificationResult(
|
||||||
|
verified=True,
|
||||||
|
confidence=min(0.95, pixel.confidence + 0.2),
|
||||||
|
changes_detected=True,
|
||||||
|
change_area_pct=pixel.change_area_pct,
|
||||||
|
local_change_pct=pixel.local_change_pct,
|
||||||
|
suggestion="continue",
|
||||||
|
detail=f"Pixel OK + Critic OK : {semantic['detail']}",
|
||||||
|
semantic_verified=True,
|
||||||
|
semantic_detail=semantic["detail"],
|
||||||
|
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||||
|
)
|
||||||
|
|
||||||
|
elif pix_ok and not sem_ok:
|
||||||
|
# L'écran a changé mais pas dans le bon sens → INATTENDU
|
||||||
|
# C'est le cas le plus important : popup, erreur, mauvaise fenêtre
|
||||||
|
return VerificationResult(
|
||||||
|
verified=False,
|
||||||
|
confidence=0.7,
|
||||||
|
changes_detected=True,
|
||||||
|
change_area_pct=pixel.change_area_pct,
|
||||||
|
local_change_pct=pixel.local_change_pct,
|
||||||
|
suggestion="retry",
|
||||||
|
detail=f"Pixel OK mais Critic NON : {semantic['detail']}",
|
||||||
|
semantic_verified=False,
|
||||||
|
semantic_detail=semantic["detail"],
|
||||||
|
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||||
|
)
|
||||||
|
|
||||||
|
elif not pix_ok and sem_ok:
|
||||||
|
# Peu de pixels ont changé mais le VLM dit que le résultat est bon
|
||||||
|
# Ex: focus sur un onglet déjà visible (changement subtil)
|
||||||
|
return VerificationResult(
|
||||||
|
verified=True,
|
||||||
|
confidence=0.6,
|
||||||
|
changes_detected=False,
|
||||||
|
change_area_pct=pixel.change_area_pct,
|
||||||
|
local_change_pct=pixel.local_change_pct,
|
||||||
|
suggestion="continue",
|
||||||
|
detail=f"Pixel inchangé mais Critic OK : {semantic['detail']}",
|
||||||
|
semantic_verified=True,
|
||||||
|
semantic_detail=semantic["detail"],
|
||||||
|
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Rien n'a changé et le VLM confirme → échec
|
||||||
|
return VerificationResult(
|
||||||
|
verified=False,
|
||||||
|
confidence=0.8,
|
||||||
|
changes_detected=False,
|
||||||
|
change_area_pct=pixel.change_area_pct,
|
||||||
|
local_change_pct=pixel.local_change_pct,
|
||||||
|
suggestion="retry",
|
||||||
|
detail=f"Pixel inchangé + Critic NON : {semantic['detail']}",
|
||||||
|
semantic_verified=False,
|
||||||
|
semantic_detail=semantic["detail"],
|
||||||
|
semantic_elapsed_ms=semantic["elapsed_ms"],
|
||||||
|
)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
596
agent_v0/server_v1/task_planner.py
Normal file
596
agent_v0/server_v1/task_planner.py
Normal file
@@ -0,0 +1,596 @@
|
|||||||
|
# agent_v0/server_v1/task_planner.py
|
||||||
|
"""
|
||||||
|
TaskPlanner — Planificateur MACRO pour RPA Vision V3.
|
||||||
|
|
||||||
|
Responsabilité : comprendre un ordre en langage naturel et l'exécuter.
|
||||||
|
|
||||||
|
"Traite les dossiers de janvier" →
|
||||||
|
1. Comprendre l'instruction (gemma4)
|
||||||
|
2. Trouver le workflow appris correspondant
|
||||||
|
3. Identifier les paramètres/variables
|
||||||
|
4. Exécuter (replay avec substitution) ou planifier (actions libres)
|
||||||
|
|
||||||
|
C'est le niveau MACRO de l'architecture 3 niveaux :
|
||||||
|
MACRO (TaskPlanner) → décompose et orchestre
|
||||||
|
MÉSO (Policy/Observer/Critic) → décide et vérifie
|
||||||
|
MICRO (Grounding/Executor) → localise et clique
|
||||||
|
|
||||||
|
Ref: docs/PLAN_ACTEUR_V1.md — Phase 3 : Planificateur
|
||||||
|
Ref: docs/VISION_RPA_INTELLIGENT.md — "Il observe" → "Il devient autonome"
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TaskPlan:
|
||||||
|
"""Plan d'exécution généré par le planificateur."""
|
||||||
|
instruction: str # Instruction originale de l'utilisateur
|
||||||
|
understood: bool = False # L'instruction a été comprise
|
||||||
|
workflow_match: str = "" # ID du workflow correspondant (si trouvé)
|
||||||
|
workflow_name: str = "" # Nom du workflow correspondant
|
||||||
|
match_confidence: float = 0.0 # Confiance du match (0-1)
|
||||||
|
parameters: Dict[str, Any] = field(default_factory=dict) # Variables extraites
|
||||||
|
is_loop: bool = False # Boucle sur une liste d'éléments
|
||||||
|
loop_source: str = "" # Source des éléments (écran, fichier, requête)
|
||||||
|
steps: List[Dict[str, Any]] = field(default_factory=list) # Actions planifiées
|
||||||
|
mode: str = "" # "replay" (workflow connu) ou "free" (actions générées)
|
||||||
|
error: str = ""
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"instruction": self.instruction,
|
||||||
|
"understood": self.understood,
|
||||||
|
"workflow_match": self.workflow_match,
|
||||||
|
"workflow_name": self.workflow_name,
|
||||||
|
"match_confidence": round(self.match_confidence, 3),
|
||||||
|
"parameters": self.parameters,
|
||||||
|
"is_loop": self.is_loop,
|
||||||
|
"loop_source": self.loop_source,
|
||||||
|
"steps_count": len(self.steps),
|
||||||
|
"mode": self.mode,
|
||||||
|
"error": self.error,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TaskResult:
|
||||||
|
"""Résultat de l'exécution d'une tâche."""
|
||||||
|
instruction: str
|
||||||
|
success: bool
|
||||||
|
total_items: int = 1 # Nombre d'éléments traités (1 si pas de boucle)
|
||||||
|
completed_items: int = 0
|
||||||
|
failed_items: int = 0
|
||||||
|
results: List[Dict[str, Any]] = field(default_factory=list)
|
||||||
|
elapsed_s: float = 0.0
|
||||||
|
summary: str = ""
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"instruction": self.instruction,
|
||||||
|
"success": self.success,
|
||||||
|
"total_items": self.total_items,
|
||||||
|
"completed_items": self.completed_items,
|
||||||
|
"failed_items": self.failed_items,
|
||||||
|
"elapsed_s": round(self.elapsed_s, 1),
|
||||||
|
"summary": self.summary,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TaskPlanner:
|
||||||
|
"""Planificateur MACRO — comprend les instructions et orchestre l'exécution.
|
||||||
|
|
||||||
|
Usage :
|
||||||
|
planner = TaskPlanner()
|
||||||
|
plan = planner.understand("traite les dossiers de janvier")
|
||||||
|
result = planner.execute(plan, replay_callback=launch_replay)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, gemma4_port: str = "", domain_id: str = ""):
|
||||||
|
self._gemma4_port = gemma4_port or os.environ.get("GEMMA4_PORT", "11435")
|
||||||
|
self._gemma4_url = f"http://localhost:{self._gemma4_port}/api/chat"
|
||||||
|
self._domain_id = domain_id or os.environ.get("RPA_DOMAIN", "generic")
|
||||||
|
|
||||||
|
# Charger le contexte métier
|
||||||
|
try:
|
||||||
|
from .domain_context import get_domain_context
|
||||||
|
self._domain = get_domain_context(self._domain_id)
|
||||||
|
except Exception:
|
||||||
|
self._domain = None
|
||||||
|
|
||||||
|
def understand(
|
||||||
|
self,
|
||||||
|
instruction: str,
|
||||||
|
available_workflows: Optional[List[Dict[str, Any]]] = None,
|
||||||
|
screen_context: str = "",
|
||||||
|
) -> TaskPlan:
|
||||||
|
"""Comprendre une instruction en langage naturel.
|
||||||
|
|
||||||
|
Étape 1 : gemma4 analyse l'instruction et identifie :
|
||||||
|
- Le type de tâche (ouvrir, traiter, rechercher, etc.)
|
||||||
|
- Le workflow correspondant (s'il en existe un)
|
||||||
|
- Les paramètres/variables (nom, date, fichier, etc.)
|
||||||
|
- Si c'est une boucle (traiter TOUS les dossiers)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
instruction: L'ordre de l'utilisateur ("traite les dossiers de janvier")
|
||||||
|
available_workflows: Liste des workflows connus [{name, description, session_id}]
|
||||||
|
screen_context: Description de l'écran actuel (pour le contexte)
|
||||||
|
"""
|
||||||
|
import requests as _requests
|
||||||
|
|
||||||
|
plan = TaskPlan(instruction=instruction)
|
||||||
|
|
||||||
|
# Construire la liste des workflows disponibles pour le prompt (top 10)
|
||||||
|
workflows_desc = "Aucun workflow enregistré."
|
||||||
|
if available_workflows:
|
||||||
|
top_workflows = available_workflows[:10]
|
||||||
|
lines = []
|
||||||
|
for i, wf in enumerate(top_workflows):
|
||||||
|
name = wf.get("name", wf.get("session_id", f"workflow_{i}"))
|
||||||
|
desc = wf.get("description", "")
|
||||||
|
sid = wf.get("session_id", "")
|
||||||
|
# Montrer la description métier pour aider le matching sémantique
|
||||||
|
label = f"{name}"
|
||||||
|
if desc:
|
||||||
|
label += f" — {desc}"
|
||||||
|
lines.append(f" {i+1}. {label} (id={sid})")
|
||||||
|
workflows_desc = "\n".join(lines)
|
||||||
|
|
||||||
|
# Contexte métier
|
||||||
|
domain_prompt = ""
|
||||||
|
if self._domain and self._domain.system_prompt:
|
||||||
|
domain_prompt = f"\nCONTEXTE MÉTIER :\n{self._domain.system_prompt}\n"
|
||||||
|
|
||||||
|
prompt = (
|
||||||
|
f"Tu es le PLANIFICATEUR d'un robot RPA (Léa). "
|
||||||
|
f"Analyse l'ordre utilisateur et identifie le workflow correspondant.\n"
|
||||||
|
f"{domain_prompt}\n"
|
||||||
|
f"WORKFLOWS DISPONIBLES :\n{workflows_desc}\n\n"
|
||||||
|
f"ORDRE : \"{instruction}\"\n\n"
|
||||||
|
f"RÈGLE DE MATCHING :\n"
|
||||||
|
f"- Compare l'INTENTION de l'ordre avec la DESCRIPTION de chaque workflow\n"
|
||||||
|
f"- \"Ouvre le bloc-notes\" correspond à un workflow décrit \"Ouvrir Bloc-notes via recherche\"\n"
|
||||||
|
f"- Un workflow qui utilise la même application EST un match même si les mots diffèrent\n"
|
||||||
|
f"- Si aucun workflow ne correspond, réponds WORKFLOW: AUCUN\n\n"
|
||||||
|
f"Réponds EXACTEMENT dans ce format (une ligne par champ) :\n"
|
||||||
|
f"COMPRIS: OUI\n"
|
||||||
|
f"WORKFLOW: <numéro> (ou AUCUN)\n"
|
||||||
|
f"CONFIANCE: <0.0 à 1.0>\n"
|
||||||
|
f"PARAMETRES: clé1=valeur1, clé2=valeur2 (ou AUCUN)\n"
|
||||||
|
f"BOUCLE: OUI ou NON\n"
|
||||||
|
f"SOURCE_BOUCLE: écran, fichier, ou aucun\n"
|
||||||
|
f"PLAN:\n"
|
||||||
|
f"1. première étape\n"
|
||||||
|
f"2. deuxième étape\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = _requests.post(
|
||||||
|
self._gemma4_url,
|
||||||
|
json={
|
||||||
|
"model": "gemma4:e4b",
|
||||||
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
|
"stream": False,
|
||||||
|
"think": True,
|
||||||
|
"options": {"temperature": 0.2, "num_predict": 800},
|
||||||
|
},
|
||||||
|
timeout=120,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not resp.ok:
|
||||||
|
plan.error = f"gemma4 HTTP {resp.status_code}"
|
||||||
|
return plan
|
||||||
|
|
||||||
|
content = resp.json().get("message", {}).get("content", "").strip()
|
||||||
|
logger.info(f"TaskPlanner: réponse gemma4 ({len(content)} chars)")
|
||||||
|
|
||||||
|
# Parser la réponse
|
||||||
|
plan = self._parse_understanding(plan, content, available_workflows)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
plan.error = f"gemma4 erreur: {e}"
|
||||||
|
logger.warning(f"TaskPlanner: {plan.error}")
|
||||||
|
|
||||||
|
return plan
|
||||||
|
|
||||||
|
def _parse_understanding(
|
||||||
|
self,
|
||||||
|
plan: TaskPlan,
|
||||||
|
content: str,
|
||||||
|
available_workflows: Optional[List[Dict]] = None,
|
||||||
|
) -> TaskPlan:
|
||||||
|
"""Parser la réponse de gemma4 pour construire le plan.
|
||||||
|
|
||||||
|
Tolérant aux variations de format :
|
||||||
|
- "COMPRIS : OUI" ou "COMPRIS: oui" ou "**COMPRIS:** OUI"
|
||||||
|
- Numéros de workflow : "1", "1.", "#1", "Workflow 1"
|
||||||
|
- Paramètres : "clé=valeur" ou "clé: valeur" sur la même ligne ou les suivantes
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
|
||||||
|
# Nettoyer le markdown (gras, italique)
|
||||||
|
content_clean = re.sub(r'\*{1,2}([^*]+)\*{1,2}', r'\1', content)
|
||||||
|
|
||||||
|
in_params_section = False
|
||||||
|
in_plan_section = False
|
||||||
|
|
||||||
|
for line in content_clean.split("\n"):
|
||||||
|
line_clean = line.strip()
|
||||||
|
if not line_clean:
|
||||||
|
continue
|
||||||
|
upper = line_clean.upper()
|
||||||
|
|
||||||
|
# --- COMPRIS ---
|
||||||
|
if re.match(r'^COMPRIS\s*[:=]', upper):
|
||||||
|
val = re.split(r'[:=]', upper, 1)[1].strip()
|
||||||
|
plan.understood = "OUI" in val or "YES" in val or "TRUE" in val
|
||||||
|
in_params_section = False
|
||||||
|
in_plan_section = False
|
||||||
|
|
||||||
|
# --- WORKFLOW ---
|
||||||
|
elif re.match(r'^WORKFLOW\s*[:=]', upper):
|
||||||
|
val = line_clean.split(":", 1)[1].strip() if ":" in line_clean else line_clean.split("=", 1)[1].strip()
|
||||||
|
val_upper = val.upper().strip()
|
||||||
|
in_params_section = False
|
||||||
|
in_plan_section = False
|
||||||
|
if val_upper in ("AUCUN", "NONE", "NON", "N/A", "-", ""):
|
||||||
|
continue
|
||||||
|
# Extraire le numéro : "1", "1.", "#1", "Workflow 1", "1 (Bloc-notes)"
|
||||||
|
num_match = re.search(r'(\d+)', val)
|
||||||
|
if num_match and available_workflows:
|
||||||
|
idx = int(num_match.group(1)) - 1
|
||||||
|
if 0 <= idx < len(available_workflows):
|
||||||
|
wf = available_workflows[idx]
|
||||||
|
plan.workflow_match = wf.get("session_id", "")
|
||||||
|
plan.workflow_name = wf.get("name", "")
|
||||||
|
plan.match_confidence = 0.8
|
||||||
|
plan.mode = "replay"
|
||||||
|
|
||||||
|
# --- CONFIANCE ---
|
||||||
|
elif re.match(r'^CONFIANCE\s*[:=]', upper):
|
||||||
|
val = re.split(r'[:=]', line_clean, 1)[1].strip()
|
||||||
|
in_params_section = False
|
||||||
|
in_plan_section = False
|
||||||
|
# Extraire un float : "0.9", "0,9", "90%"
|
||||||
|
float_match = re.search(r'(\d+[.,]\d+)', val)
|
||||||
|
if float_match:
|
||||||
|
try:
|
||||||
|
plan.match_confidence = float(float_match.group(1).replace(",", "."))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
elif "%" in val:
|
||||||
|
pct_match = re.search(r'(\d+)', val)
|
||||||
|
if pct_match:
|
||||||
|
plan.match_confidence = int(pct_match.group(1)) / 100.0
|
||||||
|
|
||||||
|
# --- PARAMETRES ---
|
||||||
|
elif re.match(r'^PARAM[EÈ]TRES?\s*[:=]', upper):
|
||||||
|
val = re.split(r'[:=]', line_clean, 1)[1].strip()
|
||||||
|
in_plan_section = False
|
||||||
|
val_upper = val.upper().strip()
|
||||||
|
if val_upper in ("AUCUN", "NONE", "NON", "N/A", "-"):
|
||||||
|
in_params_section = False
|
||||||
|
continue
|
||||||
|
# Vide = paramètres sur les lignes suivantes
|
||||||
|
in_params_section = True
|
||||||
|
if val and val_upper not in ("", ):
|
||||||
|
# Paramètres sur la même ligne : "clé1=val1, clé2=val2"
|
||||||
|
self._extract_params_from_line(val, plan)
|
||||||
|
|
||||||
|
# --- BOUCLE ---
|
||||||
|
elif re.match(r'^BOUCLE\s*[:=]', upper):
|
||||||
|
val = re.split(r'[:=]', upper, 1)[1].strip()
|
||||||
|
plan.is_loop = "OUI" in val or "YES" in val or "TRUE" in val
|
||||||
|
in_params_section = False
|
||||||
|
in_plan_section = False
|
||||||
|
|
||||||
|
# --- SOURCE_BOUCLE ---
|
||||||
|
elif re.match(r'^SOURCE[_ ]BOUCLE\s*[:=]', upper):
|
||||||
|
plan.loop_source = re.split(r'[:=]', line_clean, 1)[1].strip()
|
||||||
|
in_params_section = False
|
||||||
|
in_plan_section = False
|
||||||
|
|
||||||
|
# --- PLAN ---
|
||||||
|
elif re.match(r'^PLAN\s*[:=]?\s*$', upper) or upper == "PLAN:":
|
||||||
|
in_plan_section = True
|
||||||
|
in_params_section = False
|
||||||
|
|
||||||
|
# --- Lignes de contenu (paramètres d'abord, puis étapes) ---
|
||||||
|
elif in_params_section and ("=" in line_clean or ": " in line_clean):
|
||||||
|
self._extract_params_from_line(line_clean, plan)
|
||||||
|
|
||||||
|
elif in_plan_section and re.match(r'^(\d+[.)]\s+|- )', line_clean):
|
||||||
|
plan.steps.append({"description": line_clean})
|
||||||
|
|
||||||
|
elif re.match(r'^(\d+[.)]\s+|- )', line_clean) and not in_params_section:
|
||||||
|
# Étape numérotée en dehors d'une section explicite
|
||||||
|
plan.steps.append({"description": line_clean})
|
||||||
|
|
||||||
|
# Si pas de workflow trouvé mais compris → mode libre
|
||||||
|
if plan.understood and not plan.workflow_match:
|
||||||
|
plan.mode = "free"
|
||||||
|
|
||||||
|
return plan
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_params_from_line(text: str, plan: TaskPlan) -> None:
|
||||||
|
"""Extraire des paramètres clé=valeur ou clé: valeur d'une ligne."""
|
||||||
|
import re
|
||||||
|
text = text.strip().strip("- ")
|
||||||
|
# Ignorer les labels de section
|
||||||
|
if re.match(r'^(COMPRIS|WORKFLOW|BOUCLE|SOURCE|PLAN|CONFIANCE)', text.upper()):
|
||||||
|
return
|
||||||
|
# Essayer clé=valeur d'abord
|
||||||
|
if "=" in text:
|
||||||
|
for part in text.split(","):
|
||||||
|
part = part.strip()
|
||||||
|
if "=" in part:
|
||||||
|
k, v = part.split("=", 1)
|
||||||
|
k, v = k.strip().strip("- "), v.strip()
|
||||||
|
if k and v and v.upper() not in ("AUCUN", "NONE"):
|
||||||
|
plan.parameters[k] = v
|
||||||
|
# Sinon clé: valeur (mais pas les labels de section)
|
||||||
|
elif ": " in text:
|
||||||
|
k, v = text.split(": ", 1)
|
||||||
|
k, v = k.strip().strip("- "), v.strip()
|
||||||
|
if k and v and len(k) < 30 and v.upper() not in ("AUCUN", "NONE"):
|
||||||
|
plan.parameters[k] = v
|
||||||
|
|
||||||
|
def execute(
|
||||||
|
self,
|
||||||
|
plan: TaskPlan,
|
||||||
|
replay_callback=None,
|
||||||
|
machine_id: str = "default",
|
||||||
|
) -> TaskResult:
|
||||||
|
"""Exécuter un plan.
|
||||||
|
|
||||||
|
Deux modes :
|
||||||
|
1. "replay" : relancer un workflow enregistré avec substitution de variables
|
||||||
|
2. "free" : exécuter les actions planifiées par gemma4
|
||||||
|
|
||||||
|
Args:
|
||||||
|
plan: Le plan généré par understand()
|
||||||
|
replay_callback: Fonction qui lance un replay
|
||||||
|
signature: (session_id, machine_id, params) → replay_id
|
||||||
|
machine_id: Machine cible pour l'exécution
|
||||||
|
"""
|
||||||
|
t_start = time.time()
|
||||||
|
result = TaskResult(instruction=plan.instruction, success=False)
|
||||||
|
|
||||||
|
if not plan.understood:
|
||||||
|
result.summary = f"Instruction non comprise : {plan.error or 'réponse gemma4 invalide'}"
|
||||||
|
return result
|
||||||
|
|
||||||
|
if plan.mode == "replay" and plan.workflow_match:
|
||||||
|
# Mode replay : relancer un workflow connu
|
||||||
|
result = self._execute_replay(plan, replay_callback, machine_id)
|
||||||
|
|
||||||
|
elif plan.mode == "free" and plan.steps:
|
||||||
|
# Mode libre : actions planifiées par gemma4
|
||||||
|
result = self._execute_free(plan, replay_callback, machine_id)
|
||||||
|
|
||||||
|
else:
|
||||||
|
result.summary = "Pas de workflow correspondant et pas d'actions planifiées"
|
||||||
|
|
||||||
|
result.elapsed_s = time.time() - t_start
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _execute_replay(
|
||||||
|
self,
|
||||||
|
plan: TaskPlan,
|
||||||
|
replay_callback,
|
||||||
|
machine_id: str,
|
||||||
|
) -> TaskResult:
|
||||||
|
"""Exécuter en mode replay (workflow connu)."""
|
||||||
|
result = TaskResult(instruction=plan.instruction, success=False)
|
||||||
|
|
||||||
|
if not replay_callback:
|
||||||
|
result.summary = "Pas de callback replay configuré"
|
||||||
|
return result
|
||||||
|
|
||||||
|
if plan.is_loop:
|
||||||
|
# Boucle : TODO — lister les éléments puis itérer
|
||||||
|
# Pour l'instant, exécution simple
|
||||||
|
logger.info(
|
||||||
|
f"TaskPlanner: boucle détectée mais pas encore implémentée, "
|
||||||
|
f"exécution simple du workflow {plan.workflow_name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
replay_id = replay_callback(
|
||||||
|
session_id=plan.workflow_match,
|
||||||
|
machine_id=machine_id,
|
||||||
|
params=plan.parameters,
|
||||||
|
)
|
||||||
|
result.success = True
|
||||||
|
result.completed_items = 1
|
||||||
|
result.total_items = 1
|
||||||
|
result.summary = (
|
||||||
|
f"Workflow '{plan.workflow_name}' lancé (replay={replay_id})"
|
||||||
|
f" avec paramètres {plan.parameters}" if plan.parameters else ""
|
||||||
|
)
|
||||||
|
result.results.append({
|
||||||
|
"replay_id": replay_id,
|
||||||
|
"workflow": plan.workflow_name,
|
||||||
|
"params": plan.parameters,
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
result.summary = f"Erreur lancement replay : {e}"
|
||||||
|
logger.error(f"TaskPlanner: {result.summary}")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _execute_free(
|
||||||
|
self,
|
||||||
|
plan: TaskPlan,
|
||||||
|
replay_callback,
|
||||||
|
machine_id: str,
|
||||||
|
) -> TaskResult:
|
||||||
|
"""Exécuter en mode libre (actions planifiées par gemma4)."""
|
||||||
|
result = TaskResult(instruction=plan.instruction, success=False)
|
||||||
|
|
||||||
|
# Convertir les étapes en actions replay
|
||||||
|
actions = self._steps_to_actions(plan.steps, plan.parameters)
|
||||||
|
|
||||||
|
if not actions:
|
||||||
|
result.summary = "Impossible de convertir le plan en actions exécutables"
|
||||||
|
return result
|
||||||
|
|
||||||
|
if replay_callback:
|
||||||
|
try:
|
||||||
|
replay_id = replay_callback(
|
||||||
|
actions=actions,
|
||||||
|
machine_id=machine_id,
|
||||||
|
task_description=plan.instruction,
|
||||||
|
)
|
||||||
|
result.success = True
|
||||||
|
result.completed_items = 1
|
||||||
|
result.summary = f"Plan libre exécuté ({len(actions)} actions, replay={replay_id})"
|
||||||
|
except Exception as e:
|
||||||
|
result.summary = f"Erreur exécution plan libre : {e}"
|
||||||
|
else:
|
||||||
|
result.summary = f"Plan prêt ({len(actions)} actions) mais pas de callback"
|
||||||
|
result.results = actions
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _steps_to_actions(
|
||||||
|
self,
|
||||||
|
steps: List[Dict[str, Any]],
|
||||||
|
parameters: Dict[str, Any],
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Convertir les étapes textuelles en actions replay.
|
||||||
|
|
||||||
|
Utilise gemma4 pour traduire chaque étape en action structurée.
|
||||||
|
Les types d'actions supportés : click, type, key_combo, wait.
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
import requests as _requests
|
||||||
|
|
||||||
|
steps_text = "\n".join(
|
||||||
|
s.get("description", str(s)) for s in steps
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt = (
|
||||||
|
"Convertis ces étapes RPA en actions JSON.\n\n"
|
||||||
|
f"ÉTAPES :\n{steps_text}\n\n"
|
||||||
|
f"PARAMÈTRES : {json.dumps(parameters, ensure_ascii=False)}\n\n"
|
||||||
|
"TYPES D'ACTIONS DISPONIBLES :\n"
|
||||||
|
'- Cliquer : {"type": "click", "target_spec": {"by_text": "texte du bouton"}}\n'
|
||||||
|
'- Taper du texte : {"type": "type", "text": "texte à taper"}\n'
|
||||||
|
'- Raccourci clavier : {"type": "key_combo", "keys": ["ctrl", "s"]}\n'
|
||||||
|
'- Attendre : {"type": "wait", "duration_ms": 2000}\n\n'
|
||||||
|
"RÈGLES :\n"
|
||||||
|
"- UNE action JSON par ligne\n"
|
||||||
|
"- Pas de commentaires, pas de texte autour, JUSTE le JSON\n"
|
||||||
|
"- Utilise les paramètres fournis dans les valeurs\n\n"
|
||||||
|
"ACTIONS :\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = _requests.post(
|
||||||
|
self._gemma4_url,
|
||||||
|
json={
|
||||||
|
"model": "gemma4:e4b",
|
||||||
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
|
"stream": False,
|
||||||
|
"think": True,
|
||||||
|
"options": {"temperature": 0.1, "num_predict": 1500},
|
||||||
|
},
|
||||||
|
timeout=120,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not resp.ok:
|
||||||
|
return []
|
||||||
|
|
||||||
|
content = resp.json().get("message", {}).get("content", "")
|
||||||
|
return self._parse_actions_json(content)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"TaskPlanner: conversion étapes échouée : {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_actions_json(content: str) -> List[Dict[str, Any]]:
|
||||||
|
"""Parser des actions JSON depuis une réponse VLM.
|
||||||
|
|
||||||
|
Tolère :
|
||||||
|
- Un JSON par ligne
|
||||||
|
- Un tableau JSON [...]
|
||||||
|
- Du texte autour des JSON (markdown, commentaires)
|
||||||
|
- Des objets imbriqués (target_spec)
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
|
||||||
|
actions = []
|
||||||
|
valid_types = {"click", "type", "key_combo", "wait"}
|
||||||
|
|
||||||
|
# Stratégie 1 : essayer de parser comme un tableau JSON
|
||||||
|
array_match = re.search(r'\[[\s\S]*\]', content)
|
||||||
|
if array_match:
|
||||||
|
try:
|
||||||
|
parsed = json.loads(array_match.group())
|
||||||
|
if isinstance(parsed, list):
|
||||||
|
for item in parsed:
|
||||||
|
if isinstance(item, dict) and item.get("type") in valid_types:
|
||||||
|
if item["type"] == "click":
|
||||||
|
item["visual_mode"] = True
|
||||||
|
actions.append(item)
|
||||||
|
if actions:
|
||||||
|
return actions
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Stratégie 2 : extraire les objets JSON individuels (supporte imbrication)
|
||||||
|
# Trouver chaque { ... } en gérant les accolades imbriquées
|
||||||
|
i = 0
|
||||||
|
while i < len(content):
|
||||||
|
if content[i] == '{':
|
||||||
|
depth = 0
|
||||||
|
start = i
|
||||||
|
while i < len(content):
|
||||||
|
if content[i] == '{':
|
||||||
|
depth += 1
|
||||||
|
elif content[i] == '}':
|
||||||
|
depth -= 1
|
||||||
|
if depth == 0:
|
||||||
|
candidate = content[start:i+1]
|
||||||
|
try:
|
||||||
|
action = json.loads(candidate)
|
||||||
|
if isinstance(action, dict) and action.get("type") in valid_types:
|
||||||
|
if action["type"] == "click":
|
||||||
|
action["visual_mode"] = True
|
||||||
|
actions.append(action)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
break
|
||||||
|
i += 1
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return actions
|
||||||
|
|
||||||
|
def list_capabilities(
|
||||||
|
self,
|
||||||
|
available_workflows: List[Dict[str, Any]],
|
||||||
|
) -> str:
|
||||||
|
"""Lister ce que Léa sait faire (pour l'interface utilisateur)."""
|
||||||
|
if not available_workflows:
|
||||||
|
return "Léa n'a pas encore appris de workflows. Enregistrez-en un d'abord."
|
||||||
|
|
||||||
|
lines = ["Léa sait faire :"]
|
||||||
|
for wf in available_workflows:
|
||||||
|
name = wf.get("name", "?")
|
||||||
|
desc = wf.get("description", "")
|
||||||
|
lines.append(f" - {name}" + (f" ({desc})" if desc else ""))
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Dites-lui ce que vous voulez faire en langage naturel.")
|
||||||
|
return "\n".join(lines)
|
||||||
185
agent_v0/server_v1/workflow_replay.py
Normal file
185
agent_v0/server_v1/workflow_replay.py
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
"""
|
||||||
|
workflow_replay.py — Pont entre le WorkflowRunner et le replay Agent V1.
|
||||||
|
|
||||||
|
Convertit un Workflow enrichi (avec embeddings CLIP + FAISS) en actions
|
||||||
|
de replay pour l'Agent V1, avec vérification FAISS à chaque étape.
|
||||||
|
|
||||||
|
Architecture :
|
||||||
|
Workflow (nodes + edges + embeddings)
|
||||||
|
→ pour chaque edge : action + embedding du node source
|
||||||
|
→ FAISS vérifie que l'écran actuel correspond au node attendu
|
||||||
|
→ si OK : exécuter l'action normalement
|
||||||
|
→ si MISMATCH : stopper ou adapter
|
||||||
|
|
||||||
|
Auteur : Dom + Claude
|
||||||
|
Date : 5 avril 2026
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def build_workflow_replay(
|
||||||
|
workflow_path: str,
|
||||||
|
session_dir: str,
|
||||||
|
faiss_manager=None,
|
||||||
|
clip_embedder=None,
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Convertir un Workflow enrichi en actions de replay avec vérification FAISS.
|
||||||
|
|
||||||
|
Chaque action de clic est enrichie avec :
|
||||||
|
- L'embedding CLIP du node source (pour vérification au replay)
|
||||||
|
- Le titre de fenêtre attendu
|
||||||
|
- Les textes OCR du node (pour le grounding)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
workflow_path: Chemin vers le workflow JSON
|
||||||
|
session_dir: Répertoire de la session (pour les screenshots/crops)
|
||||||
|
faiss_manager: FAISSManager pré-chargé (optionnel, créé si None)
|
||||||
|
clip_embedder: CLIPEmbedder pré-chargé (optionnel, créé si None)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Liste d'actions prêtes pour la queue de replay Agent V1.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
# Charger le workflow
|
||||||
|
with open(workflow_path) as f:
|
||||||
|
wf_data = json.load(f)
|
||||||
|
|
||||||
|
nodes = {n["node_id"]: n for n in wf_data.get("nodes", [])}
|
||||||
|
edges = wf_data.get("edges", [])
|
||||||
|
entry_nodes = wf_data.get("entry_nodes", [])
|
||||||
|
|
||||||
|
if not nodes or not edges:
|
||||||
|
logger.warning("Workflow vide : %d nodes, %d edges", len(nodes), len(edges))
|
||||||
|
return []
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Workflow '%s' chargé : %d nodes, %d edges",
|
||||||
|
wf_data.get("name", "?"), len(nodes), len(edges),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Construire la séquence d'actions depuis le graphe (BFS linéaire)
|
||||||
|
actions = []
|
||||||
|
visited = set()
|
||||||
|
current_node_id = entry_nodes[0] if entry_nodes else list(nodes.keys())[0]
|
||||||
|
|
||||||
|
while current_node_id and current_node_id not in visited:
|
||||||
|
visited.add(current_node_id)
|
||||||
|
node = nodes.get(current_node_id)
|
||||||
|
if not node:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Trouver l'edge sortant
|
||||||
|
outgoing = [e for e in edges if e.get("from_node") == current_node_id]
|
||||||
|
if not outgoing:
|
||||||
|
break
|
||||||
|
|
||||||
|
edge = outgoing[0] # Premier edge (linéaire)
|
||||||
|
action_data = edge.get("action", {})
|
||||||
|
next_node_id = edge.get("to_node")
|
||||||
|
next_node = nodes.get(next_node_id, {})
|
||||||
|
|
||||||
|
# Extraire les infos du node source pour la vérification
|
||||||
|
node_metadata = node.get("metadata", {})
|
||||||
|
node_title = node_metadata.get("window_title", "")
|
||||||
|
|
||||||
|
# Extraire les infos de l'action
|
||||||
|
action_type = action_data.get("type", "unknown")
|
||||||
|
target = action_data.get("target", {})
|
||||||
|
params = action_data.get("parameters", {})
|
||||||
|
|
||||||
|
if action_type == "compound":
|
||||||
|
# Actions compound : décomposer en étapes
|
||||||
|
steps = params.get("steps", [])
|
||||||
|
for step in steps:
|
||||||
|
step_type = step.get("type", "unknown")
|
||||||
|
step_action = {
|
||||||
|
"action_id": f"wf_{uuid.uuid4().hex[:8]}",
|
||||||
|
"type": _map_action_type(step_type),
|
||||||
|
"workflow_node": current_node_id,
|
||||||
|
"expected_window_title": node_title,
|
||||||
|
}
|
||||||
|
|
||||||
|
if step_type == "mouse_click":
|
||||||
|
step_action["x_pct"] = step.get("x_pct", 0)
|
||||||
|
step_action["y_pct"] = step.get("y_pct", 0)
|
||||||
|
step_action["button"] = step.get("button", "left")
|
||||||
|
step_action["visual_mode"] = True
|
||||||
|
# Target spec pour le grounding
|
||||||
|
step_action["target_spec"] = {
|
||||||
|
"by_text": target.get("by_text", ""),
|
||||||
|
"by_role": target.get("by_role", ""),
|
||||||
|
"by_text_source": "ocr" if target.get("by_text") else "",
|
||||||
|
"window_title": node_title,
|
||||||
|
"original_position": {
|
||||||
|
"y_relative": "",
|
||||||
|
"x_relative": "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
# Ajouter le crop anchor si disponible
|
||||||
|
_attach_anchor(step_action, step, session_dir)
|
||||||
|
|
||||||
|
elif step_type in ("text_input", "key_press"):
|
||||||
|
if step_type == "text_input":
|
||||||
|
step_action["type"] = "type"
|
||||||
|
step_action["text"] = step.get("text", "")
|
||||||
|
else:
|
||||||
|
step_action["type"] = "key_combo"
|
||||||
|
step_action["keys"] = step.get("keys", [])
|
||||||
|
|
||||||
|
elif step_type == "wait":
|
||||||
|
step_action["type"] = "wait"
|
||||||
|
step_action["duration_ms"] = step.get("duration_ms", 500)
|
||||||
|
|
||||||
|
actions.append(step_action)
|
||||||
|
|
||||||
|
# Passer au node suivant
|
||||||
|
current_node_id = next_node_id
|
||||||
|
|
||||||
|
# Ajouter expected_window_title pour la post-vérification
|
||||||
|
click_indices = [i for i, a in enumerate(actions) if a.get("type") == "click"]
|
||||||
|
for j, ci in enumerate(click_indices):
|
||||||
|
if j + 1 < len(click_indices):
|
||||||
|
next_ci = click_indices[j + 1]
|
||||||
|
next_title = actions[next_ci].get("expected_window_title", "")
|
||||||
|
if next_title:
|
||||||
|
actions[ci]["expected_window_title"] = next_title
|
||||||
|
|
||||||
|
logger.info("Workflow → %d actions de replay", len(actions))
|
||||||
|
return actions
|
||||||
|
|
||||||
|
|
||||||
|
def _map_action_type(step_type: str) -> str:
|
||||||
|
"""Mapper les types d'action du workflow vers les types de replay."""
|
||||||
|
mapping = {
|
||||||
|
"mouse_click": "click",
|
||||||
|
"text_input": "type",
|
||||||
|
"key_press": "key_combo",
|
||||||
|
"wait": "wait",
|
||||||
|
"scroll": "scroll",
|
||||||
|
}
|
||||||
|
return mapping.get(step_type, step_type)
|
||||||
|
|
||||||
|
|
||||||
|
def _attach_anchor(action: dict, step: dict, session_dir: str) -> None:
|
||||||
|
"""Attacher le crop anchor au target_spec si disponible."""
|
||||||
|
import base64
|
||||||
|
|
||||||
|
# Chercher le crop dans le session_dir
|
||||||
|
screenshot_id = step.get("screenshot_id", "")
|
||||||
|
if screenshot_id and session_dir:
|
||||||
|
crop_path = Path(session_dir) / "shots" / f"{screenshot_id}_crop.png"
|
||||||
|
if crop_path.is_file():
|
||||||
|
action["target_spec"]["anchor_image_base64"] = base64.b64encode(
|
||||||
|
crop_path.read_bytes()
|
||||||
|
).decode()
|
||||||
@@ -300,7 +300,7 @@ _shared_engine: Optional[SomEngine] = None
|
|||||||
_shared_lock = __import__("threading").Lock()
|
_shared_lock = __import__("threading").Lock()
|
||||||
|
|
||||||
|
|
||||||
def get_shared_engine(device: str = "cuda") -> Optional[SomEngine]:
|
def get_shared_engine(device: str = "cpu") -> Optional[SomEngine]:
|
||||||
"""Singleton SomEngine partagé entre tous les modules."""
|
"""Singleton SomEngine partagé entre tous les modules."""
|
||||||
global _shared_engine
|
global _shared_engine
|
||||||
if _shared_engine is None:
|
if _shared_engine is None:
|
||||||
|
|||||||
@@ -68,12 +68,19 @@ class TokenManager:
|
|||||||
logger.info(f"Loading token config. RPA_TOKEN_ADMIN present: {bool(admin_token)}")
|
logger.info(f"Loading token config. RPA_TOKEN_ADMIN present: {bool(admin_token)}")
|
||||||
logger.info(f"Loading token config. RPA_TOKEN_READONLY present: {bool(readonly_token)}")
|
logger.info(f"Loading token config. RPA_TOKEN_READONLY present: {bool(readonly_token)}")
|
||||||
if admin_token:
|
if admin_token:
|
||||||
logger.info(f"RPA_TOKEN_ADMIN value: {admin_token[:8]}...")
|
logger.info("RPA_TOKEN_ADMIN configuré")
|
||||||
if readonly_token:
|
if readonly_token:
|
||||||
logger.info(f"RPA_TOKEN_READONLY value: {readonly_token[:8]}...")
|
logger.info("RPA_TOKEN_READONLY configuré")
|
||||||
|
|
||||||
# Clé secrète pour signer les tokens
|
# Clé secrète pour signer les tokens — OBLIGATOIRE en production
|
||||||
self.secret_key = os.getenv("TOKEN_SECRET_KEY", "dev-token-secret-change-in-production")
|
self.secret_key = os.getenv("TOKEN_SECRET_KEY", "")
|
||||||
|
if not self.secret_key:
|
||||||
|
logger.warning(
|
||||||
|
"TOKEN_SECRET_KEY non défini — utilisation d'une clé aléatoire. "
|
||||||
|
"Définir TOKEN_SECRET_KEY dans .env.local pour la production."
|
||||||
|
)
|
||||||
|
import secrets
|
||||||
|
self.secret_key = secrets.token_hex(32)
|
||||||
|
|
||||||
# Tokens statiques pour rétrocompatibilité
|
# Tokens statiques pour rétrocompatibilité
|
||||||
self.admin_tokens = set()
|
self.admin_tokens = set()
|
||||||
@@ -89,24 +96,26 @@ class TokenManager:
|
|||||||
self.admin_tokens.add(admin_token)
|
self.admin_tokens.add(admin_token)
|
||||||
logger.info(f"Added RPA_TOKEN_ADMIN to admin_tokens")
|
logger.info(f"Added RPA_TOKEN_ADMIN to admin_tokens")
|
||||||
|
|
||||||
# Temporary fix: Add production tokens directly
|
# Tokens de production : lus EXCLUSIVEMENT depuis les variables d'environnement.
|
||||||
prod_admin_token = "73cf0db73f9a5064e79afebba96c85338be65cc2060b9c1d42c3ea5dd7d4e490"
|
# Ne JAMAIS hardcoder de tokens dans le code source.
|
||||||
prod_readonly_token = "7eea1de415cc69c02381ce09ff63aeebf3e1d9b476d54aa6730ba9de849e3dc6"
|
prod_admin_token = os.getenv("RPA_PROD_ADMIN_TOKEN", "")
|
||||||
self.admin_tokens.add(prod_admin_token)
|
prod_readonly_token = os.getenv("RPA_PROD_READONLY_TOKEN", "")
|
||||||
logger.info(f"Added hardcoded production admin token")
|
if prod_admin_token:
|
||||||
|
self.admin_tokens.add(prod_admin_token)
|
||||||
|
logger.info("Added RPA_PROD_ADMIN_TOKEN to admin_tokens")
|
||||||
|
|
||||||
self.read_only_tokens = set()
|
self.read_only_tokens = set()
|
||||||
if os.getenv("READ_ONLY_TOKENS"):
|
if os.getenv("READ_ONLY_TOKENS"):
|
||||||
self.read_only_tokens = set(os.getenv("READ_ONLY_TOKENS").split(","))
|
self.read_only_tokens = set(os.getenv("READ_ONLY_TOKENS").split(","))
|
||||||
|
|
||||||
# Support tokens RPA Vision V3 (Fiche #23)
|
# Support tokens RPA Vision V3 (Fiche #23)
|
||||||
if readonly_token:
|
if readonly_token:
|
||||||
self.read_only_tokens.add(readonly_token)
|
self.read_only_tokens.add(readonly_token)
|
||||||
logger.info(f"Added RPA_TOKEN_READONLY to read_only_tokens")
|
logger.info("Added RPA_TOKEN_READONLY to read_only_tokens")
|
||||||
|
|
||||||
# Temporary fix: Add production tokens directly
|
if prod_readonly_token:
|
||||||
self.read_only_tokens.add(prod_readonly_token)
|
self.read_only_tokens.add(prod_readonly_token)
|
||||||
logger.info(f"Added hardcoded production readonly token")
|
logger.info("Added RPA_PROD_READONLY_TOKEN to read_only_tokens")
|
||||||
|
|
||||||
# Configuration expiration
|
# Configuration expiration
|
||||||
self.default_expiry_hours = int(os.getenv("TOKEN_EXPIRY_HOURS", "24"))
|
self.default_expiry_hours = int(os.getenv("TOKEN_EXPIRY_HOURS", "24"))
|
||||||
|
|||||||
@@ -85,7 +85,10 @@ echo ""
|
|||||||
# 4. Copier le package agent_v1 (code Python)
|
# 4. Copier le package agent_v1 (code Python)
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
echo "[4/7] Copie du code agent_v1..."
|
echo "[4/7] Copie du code agent_v1..."
|
||||||
# Copier tout le dossier en excluant les fichiers inutiles
|
# Copier tout le dossier en excluant uniquement les artefacts de build/test.
|
||||||
|
# IMPORTANT : ne PAS exclure les modules Python ui/ (shared_state, chat_window,
|
||||||
|
# capture_server) — ils sont requis par main.py et causent un crash au demarrage
|
||||||
|
# s'ils sont absents.
|
||||||
rsync -a \
|
rsync -a \
|
||||||
--exclude='__pycache__' \
|
--exclude='__pycache__' \
|
||||||
--exclude='*.pyc' \
|
--exclude='*.pyc' \
|
||||||
@@ -94,9 +97,6 @@ rsync -a \
|
|||||||
--exclude='logs/*.log' \
|
--exclude='logs/*.log' \
|
||||||
--exclude='.hypothesis' \
|
--exclude='.hypothesis' \
|
||||||
--exclude='*.md' \
|
--exclude='*.md' \
|
||||||
--exclude='ui/chat_window.py' \
|
|
||||||
--exclude='ui/shared_state.py' \
|
|
||||||
--exclude='ui/capture_server.py' \
|
|
||||||
"$PROJECT_ROOT/agent_v0/agent_v1/" \
|
"$PROJECT_ROOT/agent_v0/agent_v1/" \
|
||||||
"$PACKAGE_DIR/agent_v1/"
|
"$PACKAGE_DIR/agent_v1/"
|
||||||
|
|
||||||
@@ -132,6 +132,56 @@ echo "[6/7] Configuration des packages Python..."
|
|||||||
echo " Structure d'imports verifiee"
|
echo " Structure d'imports verifiee"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 6b. Verification des modules requis
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
echo "[6b/7] Verification des modules Python requis..."
|
||||||
|
MISSING=0
|
||||||
|
REQUIRED_FILES=(
|
||||||
|
"agent_v1/__init__.py"
|
||||||
|
"agent_v1/main.py"
|
||||||
|
"agent_v1/config.py"
|
||||||
|
"agent_v1/window_info.py"
|
||||||
|
"agent_v1/window_info_crossplatform.py"
|
||||||
|
"agent_v1/core/__init__.py"
|
||||||
|
"agent_v1/core/captor.py"
|
||||||
|
"agent_v1/core/executor.py"
|
||||||
|
"agent_v1/network/__init__.py"
|
||||||
|
"agent_v1/network/streamer.py"
|
||||||
|
"agent_v1/session/__init__.py"
|
||||||
|
"agent_v1/session/storage.py"
|
||||||
|
"agent_v1/ui/__init__.py"
|
||||||
|
"agent_v1/ui/shared_state.py"
|
||||||
|
"agent_v1/ui/smart_tray.py"
|
||||||
|
"agent_v1/ui/chat_window.py"
|
||||||
|
"agent_v1/ui/capture_server.py"
|
||||||
|
"agent_v1/ui/notifications.py"
|
||||||
|
"agent_v1/vision/__init__.py"
|
||||||
|
"agent_v1/vision/capturer.py"
|
||||||
|
"agent_v1/vision/blur_sensitive.py"
|
||||||
|
"agent_v1/vision/system_info.py"
|
||||||
|
"agent_v1/monitoring/__init__.py"
|
||||||
|
"lea_ui/__init__.py"
|
||||||
|
"lea_ui/server_client.py"
|
||||||
|
"run_agent_v1.py"
|
||||||
|
)
|
||||||
|
|
||||||
|
for req_file in "${REQUIRED_FILES[@]}"; do
|
||||||
|
if [[ ! -f "$PACKAGE_DIR/$req_file" ]]; then
|
||||||
|
echo -e " ${RED}MANQUANT : $req_file${NC}"
|
||||||
|
MISSING=$((MISSING + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ $MISSING -gt 0 ]]; then
|
||||||
|
echo ""
|
||||||
|
echo -e "${RED} ERREUR : $MISSING fichier(s) requis manquant(s) !${NC}"
|
||||||
|
echo -e "${RED} Le package est INCOMPLET — corrigez build_package.sh avant de deployer.${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo -e " ${GREEN}Tous les ${#REQUIRED_FILES[@]} fichiers requis sont presents.${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 7. Creer le zip
|
# 7. Creer le zip
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|||||||
77
docs/CONSOLIDATION_20260405.md
Normal file
77
docs/CONSOLIDATION_20260405.md
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
# Consolidation — 5 avril 2026
|
||||||
|
|
||||||
|
## Ce qui fonctionne
|
||||||
|
|
||||||
|
### Pipeline d'entraînement (mesuré)
|
||||||
|
| Étape | Temps/screenshot | Extrapolation 1h |
|
||||||
|
|-------|-----------------|------------------|
|
||||||
|
| ScreenAnalyzer (OCR docTR) | 1.05s | 9 min |
|
||||||
|
| CLIP Embeddings (ViT-B-32) | 0.093s | 1 min |
|
||||||
|
| FAISS Index | <0.01s | <1s |
|
||||||
|
| GraphBuilder | 0.7s total | <1 min |
|
||||||
|
| **Total** | **1.2s/shot** | **~10 min** |
|
||||||
|
|
||||||
|
### Résolution visuelle
|
||||||
|
- **Grounding qwen2.5vl** : fonctionne sur les fenêtres croppées (by_text OCR/VLM)
|
||||||
|
- **Template matching** : fonctionne pour les icônes/taskbar (crop 80x80)
|
||||||
|
- **gemma4 enrichissement** : lit le texte des éléments sans OCR (onglets, icônes)
|
||||||
|
|
||||||
|
### Vérifications
|
||||||
|
- **CLIP** : vérifie la bonne application (sim > 0.75 sur fenêtre)
|
||||||
|
- **Titre fenêtre** : vérifie l'état par nom d'app (polling 10s)
|
||||||
|
- **Pré-vérification** : stoppe si mauvaise fenêtre AVANT de cliquer
|
||||||
|
|
||||||
|
### Acteur intelligent
|
||||||
|
- **gemma4 think=True** : décide PASSER/EXECUTER/STOPPER (5s, 75% correct)
|
||||||
|
- Branché dans l'executor quand target_not_found
|
||||||
|
- Mode texte CPU (pas d'image, pas de VRAM)
|
||||||
|
|
||||||
|
### Infrastructure
|
||||||
|
- Ollama 0.16.3 host (port 11434) : qwen2.5vl:7b GPU 9.4GB
|
||||||
|
- Docker Ollama 0.20 (port 11435) : gemma4:e4b GPU 3.6GB
|
||||||
|
- Jamais simultanés (auto-unload gemma4 après build)
|
||||||
|
- VM Win11 (192.168.122.14) : SSH + agent Léa
|
||||||
|
- Anti-bot : Bézier mouse + frappe char-by-char
|
||||||
|
|
||||||
|
## Problèmes identifiés (non résolus)
|
||||||
|
|
||||||
|
### P1 : Ambiguïté "Rechercher" (taskbar vs explorateur)
|
||||||
|
Le crop 80x80 de la barre de recherche Windows ressemble à la barre
|
||||||
|
de recherche de l'explorateur. Le template matching clique au mauvais
|
||||||
|
endroit. L'acteur doit apprendre à distinguer les contextes.
|
||||||
|
|
||||||
|
### P2 : Éléments VLM (by_text_source="vlm")
|
||||||
|
Le grounding qwen2.5vl ne trouve pas toujours les textes lus par
|
||||||
|
gemma4 (ex: "voiture elec" — texte d'onglet). L'acteur prend le
|
||||||
|
relais et décide PASSER quand l'état est déjà atteint.
|
||||||
|
|
||||||
|
### P3 : Premier chargement VLM lent
|
||||||
|
Le premier appel à qwen2.5vl ou gemma4 après redémarrage prend 30-60s
|
||||||
|
(chargement en VRAM). Les appels suivants sont rapides (0.2-5s).
|
||||||
|
|
||||||
|
## Architecture validée
|
||||||
|
|
||||||
|
```
|
||||||
|
ENREGISTREMENT (une fois)
|
||||||
|
Agent capture → screenshots + events
|
||||||
|
↓
|
||||||
|
BUILD (une fois, ~15s)
|
||||||
|
ScreenAnalyzer (OCR) → CLIP → FAISS → GraphBuilder
|
||||||
|
gemma4 enrichit les éléments sans OCR
|
||||||
|
→ Workflow enrichi + actions avec embeddings CLIP
|
||||||
|
|
||||||
|
REPLAY (à chaque exécution)
|
||||||
|
Fast path :
|
||||||
|
Titre fenêtre OK → grounding qwen2.5vl → clic → polling titre → OK
|
||||||
|
Slow path (quand target_not_found) :
|
||||||
|
Acteur gemma4 → PASSER / STOPPER / EXECUTER
|
||||||
|
```
|
||||||
|
|
||||||
|
## Métriques de replay sur VM
|
||||||
|
|
||||||
|
| Session | Résultat | Détail |
|
||||||
|
|---------|----------|--------|
|
||||||
|
| Notepad (dernier test) | 4/32 actions | Recherche + ouverture Bloc-notes OK, bloque sur onglet |
|
||||||
|
| Grounding texte OCR | 100% | Rechercher, Ouvrir, Fichier, Enregistrer |
|
||||||
|
| CLIP vérification | 100% | sim 0.87-0.99 |
|
||||||
|
| Acteur gemma4 | Validé unitairement | PASSER correct pour onglet actif |
|
||||||
146
docs/PLAN_ACTEUR_V1.md
Normal file
146
docs/PLAN_ACTEUR_V1.md
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
# Plan Acteur Intelligent — RPA Vision V3
|
||||||
|
|
||||||
|
**Date** : 5 avril 2026
|
||||||
|
**Validé par** : Dom + Claude
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Vision finale
|
||||||
|
|
||||||
|
L'utilisateur dit : "Traite-moi tous les dossiers du mois de janvier"
|
||||||
|
Le robot exécute. Autonome, adaptatif, intelligent.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Architecture 3 niveaux
|
||||||
|
|
||||||
|
```
|
||||||
|
MACRO → Planificateur LLM
|
||||||
|
"traite les dossiers de janvier"
|
||||||
|
→ décompose en étapes
|
||||||
|
→ boucle sur les éléments
|
||||||
|
→ rend compte des résultats
|
||||||
|
|
||||||
|
MÉSO → Acteur intelligent
|
||||||
|
Pour chaque étape :
|
||||||
|
→ regarde l'écran (gemma4)
|
||||||
|
→ comprend l'état
|
||||||
|
→ décide : agir / adapter / passer
|
||||||
|
→ exécute l'action
|
||||||
|
|
||||||
|
MICRO → Grounding + exécution
|
||||||
|
→ qwen2.5vl localise l'élément (bbox_2d)
|
||||||
|
→ Bézier mouse + char-by-char typing
|
||||||
|
→ Polling titre pour vérification
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## État actuel (5 avril 2026)
|
||||||
|
|
||||||
|
### MICRO — Opérationnel
|
||||||
|
- Grounding qwen2.5vl:7b sur GPU (Ollama 0.16.3, port 11434)
|
||||||
|
- Grounding sur fenêtre active (crop depuis screenshot live)
|
||||||
|
- Template matching 80x80 pour icônes (seuil 0.90)
|
||||||
|
- Position hint pour désambiguïser (en bas, en haut, à gauche)
|
||||||
|
- Pré-vérification titre fenêtre (par nom d'application)
|
||||||
|
- Post-vérification polling titre (max 10s)
|
||||||
|
- Bézier mouse + frappe char-by-char (anti-bot)
|
||||||
|
|
||||||
|
### MÉSO — À construire
|
||||||
|
- gemma4:e4b disponible sur GPU (Docker Ollama 0.20, port 11435)
|
||||||
|
- Enrichissement build_replay : gemma4 lit les éléments sans OCR ✓
|
||||||
|
- Auto-déchargement gemma4 après build ✓
|
||||||
|
- Manque : boucle perception → compréhension → décision au replay
|
||||||
|
|
||||||
|
### MACRO — À concevoir
|
||||||
|
- Pas encore démarré
|
||||||
|
- Nécessite : workflows comme templates avec variables
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Plan d'implémentation
|
||||||
|
|
||||||
|
### Phase 1 : Workflow comme template (build_replay)
|
||||||
|
|
||||||
|
**Objectif** : l'enregistrement produit un template paramétrable
|
||||||
|
|
||||||
|
Pour chaque action, stocker :
|
||||||
|
- `intention` : ce que l'utilisateur veut faire (gemma4)
|
||||||
|
- `variables` : les données qui changent (nom de fichier, texte, date)
|
||||||
|
- `expected_state` : description de l'écran avant l'action (gemma4)
|
||||||
|
- `expected_result` : description de l'écran après l'action
|
||||||
|
|
||||||
|
Exemple :
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"intention": "Ouvrir un fichier existant dans le Bloc-notes",
|
||||||
|
"action": "click",
|
||||||
|
"by_text": "voiture elec",
|
||||||
|
"variables": {"filename": "voiture elec"},
|
||||||
|
"expected_state": "Bloc-notes ouvert avec plusieurs onglets",
|
||||||
|
"expected_result": "L'onglet du fichier est actif"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Moteur** : gemma4 (Docker, port 11435) — une seule fois pendant le build
|
||||||
|
**Impact** : le build_replay devient plus riche, pas de changement au replay
|
||||||
|
|
||||||
|
### Phase 2 : Acteur décisionnel (replay)
|
||||||
|
|
||||||
|
**Objectif** : l'acteur compare l'état attendu et décide
|
||||||
|
|
||||||
|
Avant chaque action :
|
||||||
|
1. Capturer la fenêtre active
|
||||||
|
2. Comparer titre de fenêtre (rapide, gratuit)
|
||||||
|
3. Si mismatch → décrire l'état via gemma4 (texte pur, pas d'image)
|
||||||
|
4. Comparer état décrit vs expected_state
|
||||||
|
5. Décider : exécuter / adapter / passer
|
||||||
|
|
||||||
|
La décision est prise par gemma4 en **mode texte** (pas d'image = pas de VRAM) :
|
||||||
|
```
|
||||||
|
État attendu : "Bloc-notes ouvert avec un document vide"
|
||||||
|
État actuel : "Bloc-notes ouvert avec du contenu existant"
|
||||||
|
Action prévue : "Taper du texte"
|
||||||
|
→ Décision : "Ouvrir un nouvel onglet avant de taper"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Moteur** : gemma4 texte (CPU, rapide) pour les décisions
|
||||||
|
**Impact** : changement dans l'executor côté agent
|
||||||
|
|
||||||
|
### Phase 3 : Planificateur (macro)
|
||||||
|
|
||||||
|
**Objectif** : décomposer une instruction en étapes
|
||||||
|
|
||||||
|
L'utilisateur dit : "Traite les dossiers de janvier"
|
||||||
|
Le planificateur :
|
||||||
|
1. Identifie le workflow appris ("traiter un dossier")
|
||||||
|
2. Liste les éléments (fichiers de janvier)
|
||||||
|
3. Pour chaque élément, instancie le template avec les variables
|
||||||
|
4. Lance l'acteur sur chaque instance
|
||||||
|
5. Collecte les résultats
|
||||||
|
|
||||||
|
**Moteur** : LLM (gemma4 ou plus gros modèle)
|
||||||
|
**Impact** : nouveau module de planification
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Contraintes techniques
|
||||||
|
|
||||||
|
| Ressource | Utilisation |
|
||||||
|
|-----------|-------------|
|
||||||
|
| GPU (12 GB) | Un seul modèle VLM à la fois |
|
||||||
|
| Port 11434 (host) | qwen2.5vl:7b — grounding (replay) |
|
||||||
|
| Port 11435 (Docker) | gemma4:e4b — compréhension (build + décision) |
|
||||||
|
| Séquencement | Build (gemma4) → auto-unload → Replay (qwen2.5vl) |
|
||||||
|
| Décisions replay | gemma4 en mode texte (CPU, pas de VRAM) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Ordre de réalisation
|
||||||
|
|
||||||
|
1. **Phase 1** d'abord — enrichir l'enregistrement (intentions + variables + état)
|
||||||
|
2. **Phase 2** ensuite — acteur qui décide pendant le replay
|
||||||
|
3. **Phase 3** après — planificateur macro
|
||||||
|
|
||||||
|
Chaque phase est testable indépendamment.
|
||||||
@@ -26,6 +26,7 @@ markers =
|
|||||||
fiche8: Tests Fiche #8 (anti-bugs terrain)
|
fiche8: Tests Fiche #8 (anti-bugs terrain)
|
||||||
fiche9: Tests Fiche #9 (postconditions retry backoff)
|
fiche9: Tests Fiche #9 (postconditions retry backoff)
|
||||||
fiche10: Tests Fiche #10 (precision metrics engine)
|
fiche10: Tests Fiche #10 (precision metrics engine)
|
||||||
|
visual: Tests visuels sur captures réelles (nécessite serveur GPU)
|
||||||
|
|
||||||
# Note: Chemins Python gérés par tests/conftest.py
|
# Note: Chemins Python gérés par tests/conftest.py
|
||||||
|
|
||||||
|
|||||||
683
tests/unit/test_audit_trail.py
Normal file
683
tests/unit/test_audit_trail.py
Normal file
@@ -0,0 +1,683 @@
|
|||||||
|
# tests/unit/test_audit_trail.py
|
||||||
|
"""
|
||||||
|
Tests unitaires du module Audit Trail.
|
||||||
|
|
||||||
|
Vérifie l'enregistrement, la recherche, l'export CSV et le résumé
|
||||||
|
journalier des entrées d'audit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
# Importer depuis le bon chemin (agent_v0/server_v1/)
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
|
||||||
|
|
||||||
|
from agent_v0.server_v1.audit_trail import AuditEntry, AuditTrail
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Fixtures
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def audit_dir(tmp_path):
|
||||||
|
"""Répertoire temporaire pour les fichiers d'audit."""
|
||||||
|
d = tmp_path / "audit"
|
||||||
|
d.mkdir()
|
||||||
|
return str(d)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def audit(audit_dir):
|
||||||
|
"""Instance AuditTrail avec répertoire temporaire."""
|
||||||
|
return AuditTrail(audit_dir=audit_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_entry(**kwargs) -> AuditEntry:
|
||||||
|
"""Créer une entrée d'audit avec des valeurs par défaut."""
|
||||||
|
defaults = {
|
||||||
|
"timestamp": datetime.now().isoformat(),
|
||||||
|
"session_id": "sess_test_001",
|
||||||
|
"action_id": "act_001",
|
||||||
|
"user_id": "tim_dupont",
|
||||||
|
"user_name": "Marie Dupont",
|
||||||
|
"machine_id": "PC-TIM-01",
|
||||||
|
"action_type": "click",
|
||||||
|
"action_detail": "Clic sur 'Enregistrer' dans DxCare",
|
||||||
|
"target_app": "DxCare",
|
||||||
|
"execution_mode": "assisted",
|
||||||
|
"result": "success",
|
||||||
|
"resolution_method": "som_text_match",
|
||||||
|
"critic_result": "semantic_ok",
|
||||||
|
"recovery_action": "",
|
||||||
|
"domain": "tim_codage",
|
||||||
|
"workflow_id": "wf_codage_cim10",
|
||||||
|
"workflow_name": "Codage CIM-10 séjour",
|
||||||
|
"duration_ms": 234.5,
|
||||||
|
}
|
||||||
|
defaults.update(kwargs)
|
||||||
|
return AuditEntry(**defaults)
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests AuditEntry
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestAuditEntry:
|
||||||
|
"""Tests de la structure AuditEntry."""
|
||||||
|
|
||||||
|
def test_creation_basique(self):
|
||||||
|
"""Créer une entrée avec tous les champs."""
|
||||||
|
entry = _make_entry()
|
||||||
|
assert entry.user_id == "tim_dupont"
|
||||||
|
assert entry.action_type == "click"
|
||||||
|
assert entry.result == "success"
|
||||||
|
assert entry.duration_ms == 234.5
|
||||||
|
|
||||||
|
def test_to_dict(self):
|
||||||
|
"""Sérialiser en dictionnaire."""
|
||||||
|
entry = _make_entry()
|
||||||
|
d = entry.to_dict()
|
||||||
|
assert isinstance(d, dict)
|
||||||
|
assert d["user_id"] == "tim_dupont"
|
||||||
|
assert d["domain"] == "tim_codage"
|
||||||
|
assert d["duration_ms"] == 234.5
|
||||||
|
|
||||||
|
def test_from_dict(self):
|
||||||
|
"""Désérialiser depuis un dictionnaire."""
|
||||||
|
entry = _make_entry()
|
||||||
|
d = entry.to_dict()
|
||||||
|
restored = AuditEntry.from_dict(d)
|
||||||
|
assert restored.user_id == entry.user_id
|
||||||
|
assert restored.action_detail == entry.action_detail
|
||||||
|
assert restored.duration_ms == entry.duration_ms
|
||||||
|
|
||||||
|
def test_from_dict_ignore_unknown_keys(self):
|
||||||
|
"""Les clés inconnues sont ignorées (compatibilité future)."""
|
||||||
|
d = {"user_id": "test", "unknown_field": "valeur", "future_key": 42}
|
||||||
|
entry = AuditEntry.from_dict(d)
|
||||||
|
assert entry.user_id == "test"
|
||||||
|
# Les champs inconnus ne lèvent pas d'erreur
|
||||||
|
|
||||||
|
def test_to_dict_json_serializable(self):
|
||||||
|
"""Le dictionnaire est sérialisable en JSON."""
|
||||||
|
entry = _make_entry(action_detail="Clic sur 'Validé' — accent français")
|
||||||
|
d = entry.to_dict()
|
||||||
|
json_str = json.dumps(d, ensure_ascii=False)
|
||||||
|
assert "accent français" in json_str
|
||||||
|
|
||||||
|
def test_default_values(self):
|
||||||
|
"""Une entrée vide a des valeurs par défaut cohérentes."""
|
||||||
|
entry = AuditEntry()
|
||||||
|
assert entry.timestamp == ""
|
||||||
|
assert entry.user_id == ""
|
||||||
|
assert entry.duration_ms == 0.0
|
||||||
|
assert entry.result == ""
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests AuditTrail — enregistrement et lecture
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestAuditTrailRecord:
|
||||||
|
"""Tests d'enregistrement des entrées."""
|
||||||
|
|
||||||
|
def test_record_and_reload(self, audit, audit_dir):
|
||||||
|
"""Enregistrer une entrée puis la relire depuis le fichier."""
|
||||||
|
entry = _make_entry()
|
||||||
|
audit.record(entry)
|
||||||
|
|
||||||
|
# Vérifier que le fichier existe
|
||||||
|
today = date.today().isoformat()
|
||||||
|
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||||
|
assert filepath.exists()
|
||||||
|
|
||||||
|
# Lire le fichier directement
|
||||||
|
with open(filepath, "r", encoding="utf-8") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
assert len(lines) == 1
|
||||||
|
|
||||||
|
data = json.loads(lines[0])
|
||||||
|
assert data["user_id"] == "tim_dupont"
|
||||||
|
assert data["action_detail"] == "Clic sur 'Enregistrer' dans DxCare"
|
||||||
|
|
||||||
|
def test_record_multiple_entries(self, audit, audit_dir):
|
||||||
|
"""Enregistrer plusieurs entrées dans le même fichier."""
|
||||||
|
for i in range(5):
|
||||||
|
entry = _make_entry(action_id=f"act_{i:03d}")
|
||||||
|
audit.record(entry)
|
||||||
|
|
||||||
|
today = date.today().isoformat()
|
||||||
|
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||||
|
with open(filepath, "r", encoding="utf-8") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
assert len(lines) == 5
|
||||||
|
|
||||||
|
def test_record_auto_timestamp(self, audit):
|
||||||
|
"""Le timestamp est généré automatiquement si absent."""
|
||||||
|
entry = _make_entry(timestamp="")
|
||||||
|
audit.record(entry)
|
||||||
|
|
||||||
|
# Le timestamp doit avoir été rempli
|
||||||
|
entries = audit.query()
|
||||||
|
assert len(entries) == 1
|
||||||
|
assert entries[0]["timestamp"] != ""
|
||||||
|
# Vérifier le format ISO 8601
|
||||||
|
datetime.fromisoformat(entries[0]["timestamp"])
|
||||||
|
|
||||||
|
def test_record_utf8_french(self, audit):
|
||||||
|
"""Les caractères français sont correctement enregistrés."""
|
||||||
|
entry = _make_entry(
|
||||||
|
action_detail="Saisie du diagnostic 'Hépatite à cytomégalovirus' — CIM-10: B25.1",
|
||||||
|
user_name="François Müller",
|
||||||
|
workflow_name="Codage séjour réanimation néonatale",
|
||||||
|
)
|
||||||
|
audit.record(entry)
|
||||||
|
|
||||||
|
entries = audit.query()
|
||||||
|
assert len(entries) == 1
|
||||||
|
assert "Hépatite" in entries[0]["action_detail"]
|
||||||
|
assert "François Müller" in entries[0]["user_name"]
|
||||||
|
assert "néonatale" in entries[0]["workflow_name"]
|
||||||
|
|
||||||
|
def test_record_creates_directory(self, tmp_path):
|
||||||
|
"""Le répertoire est créé automatiquement s'il n'existe pas."""
|
||||||
|
new_dir = str(tmp_path / "sub" / "deep" / "audit")
|
||||||
|
audit = AuditTrail(audit_dir=new_dir)
|
||||||
|
entry = _make_entry()
|
||||||
|
audit.record(entry)
|
||||||
|
|
||||||
|
assert Path(new_dir).exists()
|
||||||
|
entries = audit.query()
|
||||||
|
assert len(entries) == 1
|
||||||
|
|
||||||
|
def test_record_different_dates(self, audit, audit_dir):
|
||||||
|
"""Les entrées de dates différentes vont dans des fichiers différents."""
|
||||||
|
today = date.today()
|
||||||
|
yesterday = today - timedelta(days=1)
|
||||||
|
|
||||||
|
entry_today = _make_entry(timestamp=datetime.now().isoformat())
|
||||||
|
entry_yesterday = _make_entry(
|
||||||
|
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||||
|
action_id="act_yesterday",
|
||||||
|
)
|
||||||
|
|
||||||
|
audit.record(entry_today)
|
||||||
|
audit.record(entry_yesterday)
|
||||||
|
|
||||||
|
# Vérifier les fichiers
|
||||||
|
file_today = Path(audit_dir) / f"audit_{today.isoformat()}.jsonl"
|
||||||
|
file_yesterday = Path(audit_dir) / f"audit_{yesterday.isoformat()}.jsonl"
|
||||||
|
assert file_today.exists()
|
||||||
|
assert file_yesterday.exists()
|
||||||
|
|
||||||
|
def test_jsonl_format(self, audit, audit_dir):
|
||||||
|
"""Chaque ligne du fichier est un JSON valide (format JSONL)."""
|
||||||
|
for i in range(3):
|
||||||
|
audit.record(_make_entry(action_id=f"act_{i}"))
|
||||||
|
|
||||||
|
today = date.today().isoformat()
|
||||||
|
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||||
|
with open(filepath, "r", encoding="utf-8") as f:
|
||||||
|
for line_num, line in enumerate(f, 1):
|
||||||
|
line = line.strip()
|
||||||
|
assert line, f"Ligne {line_num} vide"
|
||||||
|
data = json.loads(line) # Ne doit pas lever d'exception
|
||||||
|
assert "action_id" in data
|
||||||
|
assert "timestamp" in data
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests AuditTrail — requêtes avec filtres
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestAuditTrailQuery:
|
||||||
|
"""Tests de recherche et filtrage."""
|
||||||
|
|
||||||
|
def _seed_entries(self, audit):
|
||||||
|
"""Insérer des entrées de test variées."""
|
||||||
|
entries = [
|
||||||
|
_make_entry(
|
||||||
|
action_id="act_001",
|
||||||
|
user_id="tim_dupont",
|
||||||
|
result="success",
|
||||||
|
action_type="click",
|
||||||
|
workflow_id="wf_01",
|
||||||
|
domain="tim_codage",
|
||||||
|
),
|
||||||
|
_make_entry(
|
||||||
|
action_id="act_002",
|
||||||
|
user_id="tim_dupont",
|
||||||
|
result="failed",
|
||||||
|
action_type="type",
|
||||||
|
workflow_id="wf_01",
|
||||||
|
domain="generic",
|
||||||
|
),
|
||||||
|
_make_entry(
|
||||||
|
action_id="act_003",
|
||||||
|
user_id="tim_martin",
|
||||||
|
user_name="Jean Martin",
|
||||||
|
result="success",
|
||||||
|
action_type="click",
|
||||||
|
workflow_id="wf_02",
|
||||||
|
domain="generic",
|
||||||
|
),
|
||||||
|
_make_entry(
|
||||||
|
action_id="act_004",
|
||||||
|
user_id="tim_martin",
|
||||||
|
user_name="Jean Martin",
|
||||||
|
result="recovered",
|
||||||
|
action_type="key_combo",
|
||||||
|
workflow_id="wf_02",
|
||||||
|
domain="generic",
|
||||||
|
),
|
||||||
|
_make_entry(
|
||||||
|
action_id="act_005",
|
||||||
|
user_id="tim_dupont",
|
||||||
|
result="success",
|
||||||
|
action_type="click",
|
||||||
|
workflow_id="wf_01",
|
||||||
|
domain="generic",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
for e in entries:
|
||||||
|
audit.record(e)
|
||||||
|
|
||||||
|
def test_query_all(self, audit):
|
||||||
|
"""Requête sans filtre retourne tout."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query()
|
||||||
|
assert len(results) == 5
|
||||||
|
|
||||||
|
def test_query_by_user(self, audit):
|
||||||
|
"""Filtrer par identifiant utilisateur."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(user_id="tim_dupont")
|
||||||
|
assert len(results) == 3
|
||||||
|
assert all(r["user_id"] == "tim_dupont" for r in results)
|
||||||
|
|
||||||
|
def test_query_by_result(self, audit):
|
||||||
|
"""Filtrer par résultat."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(result="success")
|
||||||
|
assert len(results) == 3
|
||||||
|
assert all(r["result"] == "success" for r in results)
|
||||||
|
|
||||||
|
def test_query_by_action_type(self, audit):
|
||||||
|
"""Filtrer par type d'action."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(action_type="click")
|
||||||
|
assert len(results) == 3
|
||||||
|
|
||||||
|
def test_query_by_workflow(self, audit):
|
||||||
|
"""Filtrer par workflow."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(workflow_id="wf_02")
|
||||||
|
assert len(results) == 2
|
||||||
|
|
||||||
|
def test_query_by_domain(self, audit):
|
||||||
|
"""Filtrer par domaine métier."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(domain="tim_codage")
|
||||||
|
assert len(results) == 1
|
||||||
|
assert results[0]["action_id"] == "act_001"
|
||||||
|
|
||||||
|
def test_query_by_session(self, audit):
|
||||||
|
"""Filtrer par session."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(session_id="sess_test_001")
|
||||||
|
assert len(results) == 5 # Toutes les entrées ont la même session
|
||||||
|
|
||||||
|
def test_query_combined_filters(self, audit):
|
||||||
|
"""Combinaison de plusieurs filtres (AND)."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(user_id="tim_dupont", result="success")
|
||||||
|
assert len(results) == 2
|
||||||
|
|
||||||
|
def test_query_no_match(self, audit):
|
||||||
|
"""Filtre sans correspondance retourne une liste vide."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(user_id="tim_inexistant")
|
||||||
|
assert len(results) == 0
|
||||||
|
|
||||||
|
def test_query_pagination_limit(self, audit):
|
||||||
|
"""Limiter le nombre de résultats."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
results = audit.query(limit=2)
|
||||||
|
assert len(results) == 2
|
||||||
|
|
||||||
|
def test_query_pagination_offset(self, audit):
|
||||||
|
"""Décalage dans les résultats."""
|
||||||
|
self._seed_entries(audit)
|
||||||
|
all_results = audit.query()
|
||||||
|
offset_results = audit.query(offset=3)
|
||||||
|
assert len(offset_results) == 2
|
||||||
|
assert offset_results[0] == all_results[3]
|
||||||
|
|
||||||
|
def test_query_sorted_by_timestamp_desc(self, audit):
|
||||||
|
"""Les résultats sont triés par timestamp décroissant."""
|
||||||
|
now = datetime.now()
|
||||||
|
for i in range(5):
|
||||||
|
ts = (now - timedelta(minutes=i)).isoformat()
|
||||||
|
audit.record(_make_entry(
|
||||||
|
timestamp=ts,
|
||||||
|
action_id=f"act_{i}",
|
||||||
|
))
|
||||||
|
|
||||||
|
results = audit.query()
|
||||||
|
timestamps = [r["timestamp"] for r in results]
|
||||||
|
assert timestamps == sorted(timestamps, reverse=True)
|
||||||
|
|
||||||
|
def test_query_date_range(self, audit):
|
||||||
|
"""Filtrer par plage de dates."""
|
||||||
|
today = date.today()
|
||||||
|
yesterday = today - timedelta(days=1)
|
||||||
|
|
||||||
|
# Entrée d'hier
|
||||||
|
audit.record(_make_entry(
|
||||||
|
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||||
|
action_id="act_yesterday",
|
||||||
|
))
|
||||||
|
# Entrée d'aujourd'hui
|
||||||
|
audit.record(_make_entry(
|
||||||
|
timestamp=datetime.now().isoformat(),
|
||||||
|
action_id="act_today",
|
||||||
|
))
|
||||||
|
|
||||||
|
# Filtrer uniquement hier
|
||||||
|
results = audit.query(
|
||||||
|
date_from=yesterday.isoformat(),
|
||||||
|
date_to=yesterday.isoformat(),
|
||||||
|
)
|
||||||
|
assert len(results) == 1
|
||||||
|
assert results[0]["action_id"] == "act_yesterday"
|
||||||
|
|
||||||
|
# Filtrer les deux jours
|
||||||
|
results = audit.query(
|
||||||
|
date_from=yesterday.isoformat(),
|
||||||
|
date_to=today.isoformat(),
|
||||||
|
)
|
||||||
|
assert len(results) == 2
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests AuditTrail — résumé journalier
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestAuditTrailSummary:
|
||||||
|
"""Tests du résumé journalier."""
|
||||||
|
|
||||||
|
def test_summary_empty(self, audit):
|
||||||
|
"""Résumé d'un jour sans données."""
|
||||||
|
summary = audit.get_summary("2025-01-01")
|
||||||
|
assert summary["total_actions"] == 0
|
||||||
|
assert summary["success_rate"] == 0.0
|
||||||
|
assert summary["by_user"] == {}
|
||||||
|
|
||||||
|
def test_summary_basic(self, audit):
|
||||||
|
"""Résumé avec quelques entrées."""
|
||||||
|
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||||
|
audit.record(_make_entry(user_id="tim_dupont", result="failed"))
|
||||||
|
audit.record(_make_entry(user_id="tim_martin", user_name="Jean Martin", result="success"))
|
||||||
|
|
||||||
|
summary = audit.get_summary()
|
||||||
|
assert summary["total_actions"] == 3
|
||||||
|
assert summary["success_rate"] == round(2 / 3, 3)
|
||||||
|
|
||||||
|
def test_summary_by_user(self, audit):
|
||||||
|
"""Répartition par utilisateur."""
|
||||||
|
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||||
|
audit.record(_make_entry(user_id="tim_dupont", result="success"))
|
||||||
|
audit.record(_make_entry(user_id="tim_dupont", result="failed"))
|
||||||
|
audit.record(_make_entry(user_id="tim_martin", user_name="Jean Martin", result="success"))
|
||||||
|
|
||||||
|
summary = audit.get_summary()
|
||||||
|
assert "tim_dupont" in summary["by_user"]
|
||||||
|
assert summary["by_user"]["tim_dupont"]["total"] == 3
|
||||||
|
assert summary["by_user"]["tim_dupont"]["success"] == 2
|
||||||
|
assert summary["by_user"]["tim_dupont"]["success_rate"] == round(2 / 3, 3)
|
||||||
|
assert summary["by_user"]["tim_martin"]["total"] == 1
|
||||||
|
assert summary["by_user"]["tim_martin"]["success_rate"] == 1.0
|
||||||
|
|
||||||
|
def test_summary_by_result(self, audit):
|
||||||
|
"""Répartition par résultat."""
|
||||||
|
audit.record(_make_entry(result="success"))
|
||||||
|
audit.record(_make_entry(result="success"))
|
||||||
|
audit.record(_make_entry(result="failed"))
|
||||||
|
audit.record(_make_entry(result="recovered"))
|
||||||
|
|
||||||
|
summary = audit.get_summary()
|
||||||
|
assert summary["by_result"]["success"] == 2
|
||||||
|
assert summary["by_result"]["failed"] == 1
|
||||||
|
assert summary["by_result"]["recovered"] == 1
|
||||||
|
|
||||||
|
def test_summary_by_action_type(self, audit):
|
||||||
|
"""Répartition par type d'action."""
|
||||||
|
audit.record(_make_entry(action_type="click"))
|
||||||
|
audit.record(_make_entry(action_type="click"))
|
||||||
|
audit.record(_make_entry(action_type="type"))
|
||||||
|
|
||||||
|
summary = audit.get_summary()
|
||||||
|
assert summary["by_action_type"]["click"] == 2
|
||||||
|
assert summary["by_action_type"]["type"] == 1
|
||||||
|
|
||||||
|
def test_summary_by_workflow(self, audit):
|
||||||
|
"""Répartition par workflow."""
|
||||||
|
audit.record(_make_entry(workflow_id="wf_01"))
|
||||||
|
audit.record(_make_entry(workflow_id="wf_01"))
|
||||||
|
audit.record(_make_entry(workflow_id="wf_02"))
|
||||||
|
|
||||||
|
summary = audit.get_summary()
|
||||||
|
assert summary["by_workflow"]["wf_01"] == 2
|
||||||
|
assert summary["by_workflow"]["wf_02"] == 1
|
||||||
|
|
||||||
|
def test_summary_by_execution_mode(self, audit):
|
||||||
|
"""Répartition par mode d'exécution."""
|
||||||
|
audit.record(_make_entry(execution_mode="autonomous"))
|
||||||
|
audit.record(_make_entry(execution_mode="assisted"))
|
||||||
|
audit.record(_make_entry(execution_mode="assisted"))
|
||||||
|
|
||||||
|
summary = audit.get_summary()
|
||||||
|
assert summary["by_execution_mode"]["autonomous"] == 1
|
||||||
|
assert summary["by_execution_mode"]["assisted"] == 2
|
||||||
|
|
||||||
|
def test_summary_date_field(self, audit):
|
||||||
|
"""Le résumé contient la date demandée."""
|
||||||
|
today = date.today().isoformat()
|
||||||
|
summary = audit.get_summary(today)
|
||||||
|
assert summary["date"] == today
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests AuditTrail — export CSV
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestAuditTrailExportCSV:
|
||||||
|
"""Tests de l'export CSV."""
|
||||||
|
|
||||||
|
def test_export_csv_empty(self, audit):
|
||||||
|
"""Export sans données retourne une chaîne vide."""
|
||||||
|
csv_data = audit.export_csv(date_from="2025-01-01")
|
||||||
|
assert csv_data == ""
|
||||||
|
|
||||||
|
def test_export_csv_basic(self, audit):
|
||||||
|
"""Export CSV avec quelques entrées."""
|
||||||
|
audit.record(_make_entry(action_id="act_001"))
|
||||||
|
audit.record(_make_entry(action_id="act_002"))
|
||||||
|
|
||||||
|
csv_data = audit.export_csv()
|
||||||
|
assert csv_data
|
||||||
|
assert "act_001" in csv_data
|
||||||
|
assert "act_002" in csv_data
|
||||||
|
|
||||||
|
def test_export_csv_header(self, audit):
|
||||||
|
"""L'en-tête CSV contient tous les champs du dataclass."""
|
||||||
|
audit.record(_make_entry())
|
||||||
|
|
||||||
|
csv_data = audit.export_csv()
|
||||||
|
reader = csv.DictReader(io.StringIO(csv_data))
|
||||||
|
fieldnames = reader.fieldnames
|
||||||
|
assert "timestamp" in fieldnames
|
||||||
|
assert "user_id" in fieldnames
|
||||||
|
assert "action_detail" in fieldnames
|
||||||
|
assert "domain" in fieldnames
|
||||||
|
assert "duration_ms" in fieldnames
|
||||||
|
|
||||||
|
def test_export_csv_parseable(self, audit):
|
||||||
|
"""Le CSV produit est parseable par le module csv."""
|
||||||
|
for i in range(5):
|
||||||
|
audit.record(_make_entry(
|
||||||
|
action_id=f"act_{i}",
|
||||||
|
action_detail=f"Action {i} — avec des 'guillemets' et des, virgules",
|
||||||
|
))
|
||||||
|
|
||||||
|
csv_data = audit.export_csv()
|
||||||
|
reader = csv.DictReader(io.StringIO(csv_data))
|
||||||
|
rows = list(reader)
|
||||||
|
assert len(rows) == 5
|
||||||
|
|
||||||
|
# Vérifier que les valeurs sont correctes malgré les caractères spéciaux
|
||||||
|
for row in rows:
|
||||||
|
assert "virgules" in row["action_detail"]
|
||||||
|
|
||||||
|
def test_export_csv_filter_by_user(self, audit):
|
||||||
|
"""Export filtré par utilisateur."""
|
||||||
|
audit.record(_make_entry(user_id="tim_dupont", action_id="act_001"))
|
||||||
|
audit.record(_make_entry(user_id="tim_martin", action_id="act_002"))
|
||||||
|
|
||||||
|
csv_data = audit.export_csv(user_id="tim_dupont")
|
||||||
|
reader = csv.DictReader(io.StringIO(csv_data))
|
||||||
|
rows = list(reader)
|
||||||
|
assert len(rows) == 1
|
||||||
|
assert rows[0]["user_id"] == "tim_dupont"
|
||||||
|
|
||||||
|
def test_export_csv_utf8(self, audit):
|
||||||
|
"""L'export CSV gère correctement l'UTF-8 français."""
|
||||||
|
audit.record(_make_entry(
|
||||||
|
action_detail="Saisie 'Hépatite à cytomégalovirus' — réanimation néonatale",
|
||||||
|
user_name="François Müller",
|
||||||
|
))
|
||||||
|
|
||||||
|
csv_data = audit.export_csv()
|
||||||
|
assert "Hépatite" in csv_data
|
||||||
|
assert "François Müller" in csv_data
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests de robustesse
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestAuditTrailRobustness:
|
||||||
|
"""Tests de robustesse et cas limites."""
|
||||||
|
|
||||||
|
def test_directory_auto_creation(self, tmp_path):
|
||||||
|
"""Le répertoire est créé automatiquement s'il n'existe pas."""
|
||||||
|
audit_dir = str(tmp_path / "nonexistent" / "deep" / "audit")
|
||||||
|
assert not Path(audit_dir).exists()
|
||||||
|
|
||||||
|
audit = AuditTrail(audit_dir=audit_dir)
|
||||||
|
assert Path(audit_dir).exists()
|
||||||
|
|
||||||
|
def test_corrupted_jsonl_line(self, audit, audit_dir):
|
||||||
|
"""Une ligne corrompue dans le fichier JSONL ne fait pas crasher la lecture."""
|
||||||
|
# Écrire des entrées normales
|
||||||
|
audit.record(_make_entry(action_id="act_001"))
|
||||||
|
audit.record(_make_entry(action_id="act_002"))
|
||||||
|
|
||||||
|
# Injecter une ligne corrompue
|
||||||
|
today = date.today().isoformat()
|
||||||
|
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||||
|
with open(filepath, "a", encoding="utf-8") as f:
|
||||||
|
f.write("{invalid json line\n")
|
||||||
|
|
||||||
|
# Ajouter encore une entrée valide
|
||||||
|
audit.record(_make_entry(action_id="act_003"))
|
||||||
|
|
||||||
|
# La lecture doit fonctionner et ignorer la ligne corrompue
|
||||||
|
entries = audit.query()
|
||||||
|
assert len(entries) == 3 # 2 valides avant + 1 valide après
|
||||||
|
|
||||||
|
def test_empty_file(self, audit, audit_dir):
|
||||||
|
"""Un fichier vide ne fait pas crasher."""
|
||||||
|
today = date.today().isoformat()
|
||||||
|
filepath = Path(audit_dir) / f"audit_{today}.jsonl"
|
||||||
|
filepath.touch() # Fichier vide
|
||||||
|
|
||||||
|
entries = audit.query()
|
||||||
|
assert len(entries) == 0
|
||||||
|
|
||||||
|
def test_concurrent_writes(self, audit):
|
||||||
|
"""Écritures concurrentes grâce au verrou threading."""
|
||||||
|
import threading
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
def write_entries(start):
|
||||||
|
try:
|
||||||
|
for i in range(20):
|
||||||
|
audit.record(_make_entry(action_id=f"act_{start}_{i}"))
|
||||||
|
except Exception as e:
|
||||||
|
errors.append(str(e))
|
||||||
|
|
||||||
|
threads = [
|
||||||
|
threading.Thread(target=write_entries, args=(t,))
|
||||||
|
for t in range(5)
|
||||||
|
]
|
||||||
|
for t in threads:
|
||||||
|
t.start()
|
||||||
|
for t in threads:
|
||||||
|
t.join()
|
||||||
|
|
||||||
|
assert not errors, f"Erreurs concurrentes: {errors}"
|
||||||
|
entries = audit.query(limit=200)
|
||||||
|
assert len(entries) == 100 # 5 threads x 20 entrées
|
||||||
|
|
||||||
|
def test_query_invalid_date(self, audit):
|
||||||
|
"""Dates invalides ne font pas crasher."""
|
||||||
|
# Ne doit pas lever d'exception
|
||||||
|
results = audit.query(date_from="not-a-date")
|
||||||
|
assert isinstance(results, list)
|
||||||
|
|
||||||
|
def test_summary_invalid_date(self, audit):
|
||||||
|
"""Date invalide dans get_summary ne fait pas crasher."""
|
||||||
|
summary = audit.get_summary("not-a-date")
|
||||||
|
assert summary["total_actions"] == 0
|
||||||
|
|
||||||
|
def test_entry_all_fields_present_in_export(self, audit):
|
||||||
|
"""Tous les champs du dataclass sont présents dans l'export CSV."""
|
||||||
|
from dataclasses import fields as dc_fields
|
||||||
|
entry = _make_entry()
|
||||||
|
audit.record(entry)
|
||||||
|
|
||||||
|
csv_data = audit.export_csv()
|
||||||
|
reader = csv.DictReader(io.StringIO(csv_data))
|
||||||
|
row = next(reader)
|
||||||
|
|
||||||
|
expected_fields = {f.name for f in dc_fields(AuditEntry)}
|
||||||
|
actual_fields = set(row.keys())
|
||||||
|
assert expected_fields == actual_fields
|
||||||
|
|
||||||
|
def test_date_range_reversed(self, audit):
|
||||||
|
"""Plage de dates inversée (date_to < date_from) fonctionne quand même."""
|
||||||
|
today = date.today()
|
||||||
|
yesterday = today - timedelta(days=1)
|
||||||
|
|
||||||
|
audit.record(_make_entry(
|
||||||
|
timestamp=datetime.combine(yesterday, datetime.min.time()).isoformat(),
|
||||||
|
))
|
||||||
|
|
||||||
|
# date_from > date_to → doit quand même fonctionner
|
||||||
|
results = audit.query(
|
||||||
|
date_from=today.isoformat(),
|
||||||
|
date_to=yesterday.isoformat(),
|
||||||
|
)
|
||||||
|
# L'implémentation inverse automatiquement les dates
|
||||||
|
assert isinstance(results, list)
|
||||||
530
tests/unit/test_policy_grounding_recovery_learning.py
Normal file
530
tests/unit/test_policy_grounding_recovery_learning.py
Normal file
@@ -0,0 +1,530 @@
|
|||||||
|
"""
|
||||||
|
Tests fonctionnels pour P2 (Policy/Grounding), P3 (Recovery), P4 (Learning).
|
||||||
|
|
||||||
|
Vérifie que chaque module fait bien son travail :
|
||||||
|
- Grounding : localise ou retourne NOT_FOUND (pas de décision)
|
||||||
|
- Policy : décide RETRY/SKIP/ABORT/SUPERVISE (pas de localisation)
|
||||||
|
- Recovery : exécute Ctrl+Z / Escape / Alt+F4 selon le contexte
|
||||||
|
- Learning : enregistre et requête les résultats structurés
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest.mock import MagicMock, patch, PropertyMock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||||
|
if _ROOT not in sys.path:
|
||||||
|
sys.path.insert(0, _ROOT)
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# P2 : Grounding — localisation pure
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestGroundingEngine:
|
||||||
|
|
||||||
|
def _make_engine(self):
|
||||||
|
from agent_v0.agent_v1.core.grounding import GroundingEngine
|
||||||
|
executor = MagicMock()
|
||||||
|
executor._capture_screenshot_b64.return_value = "fake_b64_data"
|
||||||
|
return GroundingEngine(executor), executor
|
||||||
|
|
||||||
|
def test_server_found_retourne_coordonnees(self):
|
||||||
|
"""Si le serveur trouve l'élément, retourne ses coordonnées."""
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._server_resolve_target.return_value = {
|
||||||
|
"resolved": True, "x_pct": 0.5, "y_pct": 0.3,
|
||||||
|
"method": "som_text", "score": 0.95,
|
||||||
|
"matched_element": {"label": "Enregistrer"},
|
||||||
|
}
|
||||||
|
result = engine.locate("http://server", {"by_text": "Enregistrer"}, 0.5, 0.3, 1920, 1080)
|
||||||
|
assert result.found is True
|
||||||
|
assert result.x_pct == 0.5
|
||||||
|
assert result.y_pct == 0.3
|
||||||
|
assert result.method == "som_text"
|
||||||
|
|
||||||
|
def test_server_not_found_cascade_template(self):
|
||||||
|
"""Si serveur échoue, cascade vers template matching."""
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._server_resolve_target.return_value = None
|
||||||
|
executor._template_match_anchor.return_value = {
|
||||||
|
"resolved": True, "x_pct": 0.4, "y_pct": 0.6,
|
||||||
|
"score": 0.85,
|
||||||
|
}
|
||||||
|
result = engine.locate(
|
||||||
|
"http://server",
|
||||||
|
{"by_text": "OK", "anchor_image_base64": "abc123"},
|
||||||
|
0.5, 0.3, 1920, 1080,
|
||||||
|
)
|
||||||
|
assert result.found is True
|
||||||
|
assert result.method == "anchor_template"
|
||||||
|
|
||||||
|
def test_toutes_strategies_echouent_retourne_not_found(self):
|
||||||
|
"""Si toutes les stratégies échouent, retourne NOT_FOUND."""
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._server_resolve_target.return_value = None
|
||||||
|
executor._template_match_anchor.return_value = None
|
||||||
|
executor._hybrid_vlm_resolve.return_value = None
|
||||||
|
result = engine.locate(
|
||||||
|
"http://server",
|
||||||
|
{"by_text": "Inexistant", "anchor_image_base64": "abc", "vlm_description": "bouton"},
|
||||||
|
0.5, 0.3, 1920, 1080,
|
||||||
|
)
|
||||||
|
assert result.found is False
|
||||||
|
assert "échoué" in result.detail
|
||||||
|
|
||||||
|
def test_screenshot_echoue_retourne_not_found(self):
|
||||||
|
"""Si la capture screenshot échoue, NOT_FOUND immédiat."""
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._capture_screenshot_b64.return_value = None
|
||||||
|
result = engine.locate("http://server", {"by_text": "OK"}, 0.5, 0.3, 1920, 1080)
|
||||||
|
assert result.found is False
|
||||||
|
assert "screenshot" in result.detail.lower()
|
||||||
|
|
||||||
|
def test_strategies_custom(self):
|
||||||
|
"""On peut spécifier les stratégies à utiliser."""
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._template_match_anchor.return_value = {
|
||||||
|
"resolved": True, "x_pct": 0.2, "y_pct": 0.8, "score": 0.9,
|
||||||
|
}
|
||||||
|
# Seulement template, pas de serveur
|
||||||
|
result = engine.locate(
|
||||||
|
"", {"anchor_image_base64": "abc"}, 0.5, 0.3, 1920, 1080,
|
||||||
|
strategies=["template"],
|
||||||
|
)
|
||||||
|
assert result.found is True
|
||||||
|
# Le serveur n'a PAS été appelé
|
||||||
|
executor._server_resolve_target.assert_not_called()
|
||||||
|
|
||||||
|
def test_grounding_result_to_dict(self):
|
||||||
|
"""Le GroundingResult se sérialise correctement."""
|
||||||
|
from agent_v0.agent_v1.core.grounding import GroundingResult
|
||||||
|
r = GroundingResult(found=True, x_pct=0.5, y_pct=0.3, method="som", score=0.9)
|
||||||
|
d = r.to_dict()
|
||||||
|
assert d["found"] is True
|
||||||
|
assert d["x_pct"] == 0.5
|
||||||
|
assert d["method"] == "som"
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# P2 : Policy — décisions quand grounding échoue
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestPolicyEngine:
|
||||||
|
|
||||||
|
def _make_engine(self):
|
||||||
|
from agent_v0.agent_v1.core.policy import PolicyEngine
|
||||||
|
executor = MagicMock()
|
||||||
|
return PolicyEngine(executor), executor
|
||||||
|
|
||||||
|
def test_premier_essai_popup_fermee_retry(self):
|
||||||
|
"""Premier échec + popup fermée → RETRY."""
|
||||||
|
from agent_v0.agent_v1.core.policy import Decision
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._handle_popup_vlm.return_value = True # Popup fermée
|
||||||
|
|
||||||
|
decision = engine.decide(
|
||||||
|
action={"type": "click"},
|
||||||
|
target_spec={"by_text": "OK"},
|
||||||
|
retry_count=0,
|
||||||
|
)
|
||||||
|
assert decision.decision == Decision.RETRY
|
||||||
|
assert "popup" in decision.reason.lower()
|
||||||
|
|
||||||
|
def test_premier_essai_pas_de_popup_retry(self):
|
||||||
|
"""Premier échec + pas de popup → RETRY quand même (max_retries > 0)."""
|
||||||
|
from agent_v0.agent_v1.core.policy import Decision
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._handle_popup_vlm.return_value = False
|
||||||
|
|
||||||
|
decision = engine.decide(
|
||||||
|
action={"type": "click"},
|
||||||
|
target_spec={"by_text": "OK"},
|
||||||
|
retry_count=0,
|
||||||
|
max_retries=2,
|
||||||
|
)
|
||||||
|
assert decision.decision == Decision.RETRY
|
||||||
|
|
||||||
|
def test_max_retries_acteur_passer_skip(self):
|
||||||
|
"""Max retries atteint + acteur dit PASSER → SKIP."""
|
||||||
|
from agent_v0.agent_v1.core.policy import Decision
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._actor_decide.return_value = "PASSER"
|
||||||
|
|
||||||
|
decision = engine.decide(
|
||||||
|
action={"type": "click"},
|
||||||
|
target_spec={"by_text": "Onglet"},
|
||||||
|
retry_count=1,
|
||||||
|
max_retries=1,
|
||||||
|
)
|
||||||
|
assert decision.decision == Decision.SKIP
|
||||||
|
|
||||||
|
def test_max_retries_acteur_stopper_abort(self):
|
||||||
|
"""Max retries atteint + acteur dit STOPPER → ABORT."""
|
||||||
|
from agent_v0.agent_v1.core.policy import Decision
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._actor_decide.return_value = "STOPPER"
|
||||||
|
|
||||||
|
decision = engine.decide(
|
||||||
|
action={"type": "click"},
|
||||||
|
target_spec={"by_text": "X"},
|
||||||
|
retry_count=1,
|
||||||
|
max_retries=1,
|
||||||
|
)
|
||||||
|
assert decision.decision == Decision.ABORT
|
||||||
|
|
||||||
|
def test_max_retries_acteur_executer_supervise(self):
|
||||||
|
"""Max retries + acteur dit EXECUTER → SUPERVISE (rendre la main)."""
|
||||||
|
from agent_v0.agent_v1.core.policy import Decision
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
executor._actor_decide.return_value = "EXECUTER"
|
||||||
|
|
||||||
|
decision = engine.decide(
|
||||||
|
action={"type": "click"},
|
||||||
|
target_spec={"by_text": "X"},
|
||||||
|
retry_count=1,
|
||||||
|
max_retries=1,
|
||||||
|
)
|
||||||
|
assert decision.decision == Decision.SUPERVISE
|
||||||
|
|
||||||
|
def test_policy_decision_to_dict(self):
|
||||||
|
"""PolicyDecision se sérialise correctement."""
|
||||||
|
from agent_v0.agent_v1.core.policy import PolicyDecision, Decision
|
||||||
|
d = PolicyDecision(decision=Decision.SKIP, reason="État atteint").to_dict()
|
||||||
|
assert d["decision"] == "skip"
|
||||||
|
assert d["reason"] == "État atteint"
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# P3 : Recovery — rollback après échec
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestRecoveryEngine:
|
||||||
|
|
||||||
|
def _make_engine(self):
|
||||||
|
from agent_v0.agent_v1.core.recovery import RecoveryEngine
|
||||||
|
executor = MagicMock()
|
||||||
|
executor.keyboard = MagicMock()
|
||||||
|
executor.sct = MagicMock()
|
||||||
|
executor.sct.monitors = [{}, {"width": 1920, "height": 1080}]
|
||||||
|
executor._click = MagicMock()
|
||||||
|
return RecoveryEngine(executor), executor
|
||||||
|
|
||||||
|
def test_popup_detectee_escape(self):
|
||||||
|
"""Critic dit "popup" → Recovery fait Escape."""
|
||||||
|
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
result = engine.attempt(
|
||||||
|
failed_action={"type": "click"},
|
||||||
|
critic_detail="Une popup d'erreur est apparue",
|
||||||
|
)
|
||||||
|
assert result.action_taken == RecoveryAction.ESCAPE
|
||||||
|
assert result.success is True
|
||||||
|
# Vérifie que Escape a été pressé
|
||||||
|
executor.keyboard.press.assert_called()
|
||||||
|
|
||||||
|
def test_frappe_incorrecte_undo(self):
|
||||||
|
"""Frappe incorrecte → Recovery fait Ctrl+Z."""
|
||||||
|
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
result = engine.attempt(
|
||||||
|
failed_action={"type": "type"},
|
||||||
|
critic_detail="Le texte a été tapé au mauvais endroit",
|
||||||
|
)
|
||||||
|
assert result.action_taken == RecoveryAction.UNDO
|
||||||
|
assert result.success is True
|
||||||
|
|
||||||
|
def test_mauvaise_fenetre_close(self):
|
||||||
|
"""Mauvaise fenêtre → Recovery fait Alt+F4."""
|
||||||
|
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
result = engine.attempt(
|
||||||
|
failed_action={"type": "click"},
|
||||||
|
critic_detail="Mauvaise fenêtre ouverte au lieu du bloc-notes",
|
||||||
|
)
|
||||||
|
assert result.action_taken == RecoveryAction.CLOSE_WINDOW
|
||||||
|
assert result.success is True
|
||||||
|
|
||||||
|
def test_menu_ouvert_escape(self):
|
||||||
|
"""Menu déroulant ouvert → Recovery fait Escape."""
|
||||||
|
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
result = engine.attempt(
|
||||||
|
failed_action={"type": "click"},
|
||||||
|
critic_detail="Un menu déroulant s'est ouvert",
|
||||||
|
)
|
||||||
|
assert result.action_taken == RecoveryAction.ESCAPE
|
||||||
|
assert result.success is True
|
||||||
|
|
||||||
|
def test_aucune_strategie_applicable(self):
|
||||||
|
"""Pas de pattern reconnu → NONE."""
|
||||||
|
from agent_v0.agent_v1.core.recovery import RecoveryAction
|
||||||
|
engine, executor = self._make_engine()
|
||||||
|
result = engine.attempt(
|
||||||
|
failed_action={"type": "wait"},
|
||||||
|
critic_detail="Quelque chose d'inattendu",
|
||||||
|
)
|
||||||
|
assert result.action_taken == RecoveryAction.NONE
|
||||||
|
assert result.success is False
|
||||||
|
|
||||||
|
def test_recovery_result_to_dict(self):
|
||||||
|
"""RecoveryResult se sérialise correctement."""
|
||||||
|
from agent_v0.agent_v1.core.recovery import RecoveryResult, RecoveryAction
|
||||||
|
d = RecoveryResult(
|
||||||
|
action_taken=RecoveryAction.UNDO, success=True, detail="Ctrl+Z"
|
||||||
|
).to_dict()
|
||||||
|
assert d["action_taken"] == "undo"
|
||||||
|
assert d["success"] is True
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# P4 : Learning — apprentissage runtime
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestReplayLearner:
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def learner(self):
|
||||||
|
tmpdir = tempfile.mkdtemp(prefix="test_learning_")
|
||||||
|
from agent_v0.server_v1.replay_learner import ReplayLearner
|
||||||
|
l = ReplayLearner(learning_dir=tmpdir)
|
||||||
|
yield l
|
||||||
|
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||||
|
|
||||||
|
def test_record_et_load_session(self, learner):
|
||||||
|
"""Enregistrer un résultat et le relire depuis le fichier."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
outcome = ActionOutcome(
|
||||||
|
session_id="test_session",
|
||||||
|
action_id="act_001",
|
||||||
|
action_type="click",
|
||||||
|
target_description="Bouton Enregistrer",
|
||||||
|
resolution_method="som_text",
|
||||||
|
resolution_score=0.95,
|
||||||
|
success=True,
|
||||||
|
)
|
||||||
|
learner.record(outcome)
|
||||||
|
|
||||||
|
# Relire
|
||||||
|
loaded = learner.load_session("test_session")
|
||||||
|
assert len(loaded) == 1
|
||||||
|
assert loaded[0].action_id == "act_001"
|
||||||
|
assert loaded[0].success is True
|
||||||
|
assert loaded[0].resolution_method == "som_text"
|
||||||
|
|
||||||
|
def test_record_from_replay_result(self, learner):
|
||||||
|
"""Convertir le format replay en ActionOutcome."""
|
||||||
|
learner.record_from_replay_result(
|
||||||
|
session_id="s1",
|
||||||
|
action={"action_id": "a1", "type": "click", "target_spec": {"by_text": "OK", "window_title": "App"}},
|
||||||
|
result={"success": True, "resolution_method": "template", "resolution_score": 0.9},
|
||||||
|
verification={"verified": True, "semantic_verified": True, "semantic_detail": "OK"},
|
||||||
|
)
|
||||||
|
loaded = learner.load_session("s1")
|
||||||
|
assert len(loaded) == 1
|
||||||
|
assert loaded[0].target_description == "OK"
|
||||||
|
assert loaded[0].semantic_verified is True
|
||||||
|
|
||||||
|
def test_query_similar(self, learner):
|
||||||
|
"""Requêter des résultats similaires par description."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
# Enregistrer plusieurs résultats
|
||||||
|
for i, (desc, method, success) in enumerate([
|
||||||
|
("Bouton Enregistrer", "som_text", True),
|
||||||
|
("Bouton Annuler", "template", True),
|
||||||
|
("Bouton Enregistrer", "vlm_direct", False),
|
||||||
|
("Menu Fichier", "som_text", True),
|
||||||
|
]):
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id="s1", action_id=f"a{i}",
|
||||||
|
action_type="click", target_description=desc,
|
||||||
|
resolution_method=method, success=success,
|
||||||
|
))
|
||||||
|
|
||||||
|
# Chercher "Enregistrer"
|
||||||
|
results = learner.query_similar(target_description="Enregistrer")
|
||||||
|
assert len(results) == 2
|
||||||
|
# Les deux résultats concernent "Enregistrer"
|
||||||
|
for r in results:
|
||||||
|
assert "enregistrer" in r["outcome"]["target_description"].lower()
|
||||||
|
|
||||||
|
def test_get_stats(self, learner):
|
||||||
|
"""Les statistiques globales sont correctes."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
for success, method in [(True, "som"), (True, "som"), (False, "template"), (True, "vlm")]:
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id="s1", action_id="a",
|
||||||
|
action_type="click", success=success,
|
||||||
|
resolution_method=method,
|
||||||
|
))
|
||||||
|
|
||||||
|
stats = learner.get_stats()
|
||||||
|
assert stats["total"] == 4
|
||||||
|
assert stats["success_rate"] == 0.75
|
||||||
|
assert stats["methods"]["som"]["success_rate"] == 1.0
|
||||||
|
assert stats["methods"]["template"]["success_rate"] == 0.0
|
||||||
|
|
||||||
|
def test_gemma4_indisponible_pas_de_crash(self, learner):
|
||||||
|
"""Le learning fonctionne même sans VLM."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
# Pas de crash, juste un record simple
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id="s1", action_id="a1", action_type="click",
|
||||||
|
success=False, error="target_not_found",
|
||||||
|
))
|
||||||
|
stats = learner.get_stats()
|
||||||
|
assert stats["total"] == 1
|
||||||
|
assert stats["success_rate"] == 0.0
|
||||||
|
|
||||||
|
def test_fichier_jsonl_format(self, learner):
|
||||||
|
"""Le fichier JSONL contient du JSON valide ligne par ligne."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id="s1", action_id="a1", action_type="click", success=True,
|
||||||
|
))
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id="s1", action_id="a2", action_type="type", success=False,
|
||||||
|
))
|
||||||
|
|
||||||
|
jsonl_file = learner.learning_dir / "s1.jsonl"
|
||||||
|
assert jsonl_file.is_file()
|
||||||
|
|
||||||
|
with open(jsonl_file) as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
assert len(lines) == 2
|
||||||
|
for line in lines:
|
||||||
|
data = json.loads(line) # Doit être du JSON valide
|
||||||
|
assert "action_id" in data
|
||||||
|
assert "success" in data
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Boucle d'apprentissage : consolidation cross-workflow
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestLearningLoop:
|
||||||
|
"""Tests de la boucle d'apprentissage : les replays passés améliorent les suivants."""
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def learner(self):
|
||||||
|
tmpdir = tempfile.mkdtemp(prefix="test_learning_loop_")
|
||||||
|
from agent_v0.server_v1.replay_learner import ReplayLearner
|
||||||
|
l = ReplayLearner(learning_dir=tmpdir)
|
||||||
|
yield l
|
||||||
|
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||||
|
|
||||||
|
def test_best_strategy_apprend_du_succes(self, learner):
|
||||||
|
"""La meilleure stratégie est celle qui a le plus de succès."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
# template échoue 3 fois sur "Enregistrer"
|
||||||
|
for i in range(3):
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id=f"s{i}", action_id=f"a{i}", action_type="click",
|
||||||
|
target_description="Enregistrer", resolution_method="anchor_template",
|
||||||
|
success=False,
|
||||||
|
))
|
||||||
|
# som_text réussit 2 fois sur "Enregistrer"
|
||||||
|
for i in range(2):
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id=f"s{10+i}", action_id=f"a{10+i}", action_type="click",
|
||||||
|
target_description="Enregistrer", resolution_method="som_text_match",
|
||||||
|
success=True,
|
||||||
|
))
|
||||||
|
|
||||||
|
best = learner.best_strategy_for("Enregistrer")
|
||||||
|
assert best == "som_text_match"
|
||||||
|
|
||||||
|
def test_best_strategy_minimum_2_essais(self, learner):
|
||||||
|
"""Il faut au moins 2 essais pour qu'une stratégie soit recommandée."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
# Un seul succès → pas assez pour recommander
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id="s1", action_id="a1", action_type="click",
|
||||||
|
target_description="OK", resolution_method="vlm_direct",
|
||||||
|
success=True,
|
||||||
|
))
|
||||||
|
best = learner.best_strategy_for("OK")
|
||||||
|
assert best is None
|
||||||
|
|
||||||
|
def test_best_strategy_rien_si_historique_vide(self, learner):
|
||||||
|
"""Pas d'historique → pas de recommandation."""
|
||||||
|
best = learner.best_strategy_for("Inexistant")
|
||||||
|
assert best is None
|
||||||
|
|
||||||
|
def test_consolidate_workflow_enrichit_les_actions(self, learner):
|
||||||
|
"""La consolidation injecte _learned_strategy dans les target_spec."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
# Historique : som_text_match marche pour "Fichier"
|
||||||
|
for i in range(3):
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id=f"s{i}", action_id=f"a{i}", action_type="click",
|
||||||
|
target_description="Fichier", resolution_method="som_text_match",
|
||||||
|
success=True,
|
||||||
|
))
|
||||||
|
|
||||||
|
# Workflow avec une action "Fichier"
|
||||||
|
actions = [
|
||||||
|
{"type": "click", "target_spec": {"by_text": "Fichier", "window_title": "Bloc-notes"}},
|
||||||
|
{"type": "type", "text": "bonjour"},
|
||||||
|
{"type": "click", "target_spec": {"by_text": "Inconnu"}},
|
||||||
|
]
|
||||||
|
|
||||||
|
enriched = learner.consolidate_workflow(actions)
|
||||||
|
assert enriched == 1 # Seul "Fichier" a un historique
|
||||||
|
assert actions[0]["target_spec"]["_learned_strategy"] == "som_text_match"
|
||||||
|
assert "_learned_strategy" not in actions[2].get("target_spec", {})
|
||||||
|
|
||||||
|
def test_consolidation_cross_workflow(self, learner):
|
||||||
|
"""Un succès dans le workflow A améliore le workflow B."""
|
||||||
|
from agent_v0.server_v1.replay_learner import ActionOutcome
|
||||||
|
# Workflow A : "Enregistrer" réussit avec grounding_vlm
|
||||||
|
for i in range(3):
|
||||||
|
learner.record(ActionOutcome(
|
||||||
|
session_id="workflow_A", action_id=f"a{i}", action_type="click",
|
||||||
|
target_description="Enregistrer",
|
||||||
|
window_title="Bloc-notes",
|
||||||
|
resolution_method="grounding_vlm", success=True,
|
||||||
|
))
|
||||||
|
|
||||||
|
# Workflow B : contient aussi "Enregistrer"
|
||||||
|
workflow_b = [
|
||||||
|
{"type": "click", "target_spec": {"by_text": "Enregistrer", "window_title": "Bloc-notes"}},
|
||||||
|
]
|
||||||
|
enriched = learner.consolidate_workflow(workflow_b, "workflow_B")
|
||||||
|
assert enriched == 1
|
||||||
|
assert workflow_b[0]["target_spec"]["_learned_strategy"] == "grounding_vlm"
|
||||||
|
|
||||||
|
def test_grounding_reordonne_strategies(self):
|
||||||
|
"""Le GroundingEngine réordonne ses stratégies selon _learned_strategy."""
|
||||||
|
from agent_v0.agent_v1.core.grounding import GroundingEngine
|
||||||
|
executor = MagicMock()
|
||||||
|
executor._capture_screenshot_b64.return_value = "fake"
|
||||||
|
# Simuler que template marche
|
||||||
|
executor._server_resolve_target.return_value = None
|
||||||
|
executor._template_match_anchor.return_value = {
|
||||||
|
"resolved": True, "x_pct": 0.5, "y_pct": 0.5, "score": 0.9,
|
||||||
|
}
|
||||||
|
executor._hybrid_vlm_resolve.return_value = None
|
||||||
|
|
||||||
|
engine = GroundingEngine(executor)
|
||||||
|
|
||||||
|
# Avec _learned_strategy = anchor_template → template en premier
|
||||||
|
result = engine.locate(
|
||||||
|
"http://server",
|
||||||
|
{"by_text": "OK", "anchor_image_base64": "abc", "_learned_strategy": "anchor_template"},
|
||||||
|
0.5, 0.3, 1920, 1080,
|
||||||
|
)
|
||||||
|
assert result.found is True
|
||||||
|
assert result.method == "anchor_template"
|
||||||
|
# Le serveur n'a PAS été appelé (template était en premier)
|
||||||
|
executor._server_resolve_target.assert_not_called()
|
||||||
441
tests/unit/test_replay_critic.py
Normal file
441
tests/unit/test_replay_critic.py
Normal file
@@ -0,0 +1,441 @@
|
|||||||
|
"""
|
||||||
|
Tests unitaires pour le Critic (ReplayVerifier.verify_with_critic)
|
||||||
|
et l'enrichissement des actions avec intentions.
|
||||||
|
|
||||||
|
Vérifie les FONCTIONNALITÉS, pas juste la non-régression :
|
||||||
|
1. Le Critic fusionne correctement pixel + sémantique
|
||||||
|
2. La matrice de décision (4 cas) est correcte
|
||||||
|
3. L'enrichissement intentions parse bien les réponses gemma4
|
||||||
|
4. Les fallbacks fonctionnent quand le VLM est indisponible
|
||||||
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest.mock import MagicMock, patch, Mock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||||
|
if _ROOT not in sys.path:
|
||||||
|
sys.path.insert(0, _ROOT)
|
||||||
|
|
||||||
|
from agent_v0.server_v1.replay_verifier import ReplayVerifier, VerificationResult
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Fixtures
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _make_screenshot_b64(width=100, height=100, color=(128, 128, 128)):
|
||||||
|
"""Créer un screenshot base64 factice (JPEG)."""
|
||||||
|
from PIL import Image
|
||||||
|
img = Image.new("RGB", (width, height), color)
|
||||||
|
buf = io.BytesIO()
|
||||||
|
img.save(buf, format="JPEG", quality=50)
|
||||||
|
return base64.b64encode(buf.getvalue()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def verifier():
|
||||||
|
return ReplayVerifier()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def screenshot_gray():
|
||||||
|
return _make_screenshot_b64(100, 100, (128, 128, 128))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def screenshot_white():
|
||||||
|
return _make_screenshot_b64(100, 100, (255, 255, 255))
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests VerificationResult — nouveaux champs sémantiques
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestVerificationResult:
|
||||||
|
|
||||||
|
def test_to_dict_sans_semantique(self):
|
||||||
|
"""Sans vérification sémantique, les champs semantic_ sont absents du dict."""
|
||||||
|
r = VerificationResult(
|
||||||
|
verified=True, confidence=0.8, changes_detected=True,
|
||||||
|
change_area_pct=5.0, suggestion="continue", detail="test",
|
||||||
|
)
|
||||||
|
d = r.to_dict()
|
||||||
|
assert "semantic_verified" not in d
|
||||||
|
assert d["verified"] is True
|
||||||
|
assert d["confidence"] == 0.8
|
||||||
|
|
||||||
|
def test_to_dict_avec_semantique(self):
|
||||||
|
"""Avec vérification sémantique, les champs semantic_ sont présents."""
|
||||||
|
r = VerificationResult(
|
||||||
|
verified=True, confidence=0.9, changes_detected=True,
|
||||||
|
change_area_pct=5.0, suggestion="continue", detail="test",
|
||||||
|
semantic_verified=True, semantic_detail="Bouton visible",
|
||||||
|
semantic_elapsed_ms=1500.0,
|
||||||
|
)
|
||||||
|
d = r.to_dict()
|
||||||
|
assert d["semantic_verified"] is True
|
||||||
|
assert d["semantic_detail"] == "Bouton visible"
|
||||||
|
assert d["semantic_elapsed_ms"] == 1500.0
|
||||||
|
|
||||||
|
def test_to_dict_semantique_false(self):
|
||||||
|
"""semantic_verified=False doit apparaître dans le dict."""
|
||||||
|
r = VerificationResult(
|
||||||
|
verified=False, confidence=0.7, changes_detected=True,
|
||||||
|
change_area_pct=5.0, suggestion="retry",
|
||||||
|
semantic_verified=False, semantic_detail="Mauvais écran",
|
||||||
|
semantic_elapsed_ms=2000.0,
|
||||||
|
)
|
||||||
|
d = r.to_dict()
|
||||||
|
assert d["semantic_verified"] is False
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests verify_with_critic — matrice de décision
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestVerifyWithCritic:
|
||||||
|
|
||||||
|
def test_sans_expected_result_retourne_pixel_seul(self, verifier, screenshot_gray):
|
||||||
|
"""Sans expected_result, verify_with_critic = verify_action (pixel seul)."""
|
||||||
|
result = verifier.verify_with_critic(
|
||||||
|
action={"type": "click", "action_id": "test"},
|
||||||
|
result={"success": True},
|
||||||
|
screenshot_before=screenshot_gray,
|
||||||
|
screenshot_after=screenshot_gray,
|
||||||
|
expected_result="", # Pas d'attendu
|
||||||
|
)
|
||||||
|
# Pixel seul — pas de champ semantic
|
||||||
|
assert result.semantic_verified is None
|
||||||
|
|
||||||
|
def test_sans_screenshots_pas_de_semantique(self, verifier):
|
||||||
|
"""Sans screenshots, pas de vérification sémantique possible."""
|
||||||
|
result = verifier.verify_with_critic(
|
||||||
|
action={"type": "click", "action_id": "test"},
|
||||||
|
result={"success": True},
|
||||||
|
screenshot_before=None,
|
||||||
|
screenshot_after=None,
|
||||||
|
expected_result="Le fichier est ouvert",
|
||||||
|
)
|
||||||
|
# Pas de screenshots → pixel seul (confidence basse)
|
||||||
|
assert result.verified is True
|
||||||
|
assert result.confidence < 0.5
|
||||||
|
|
||||||
|
def test_pixel_pas_change_et_expected_result_skip_vlm(
|
||||||
|
self, verifier, screenshot_gray,
|
||||||
|
):
|
||||||
|
"""Si pixel identiques + expected_result → skip VLM (pas de changement = retry)."""
|
||||||
|
result = verifier.verify_with_critic(
|
||||||
|
action={"type": "click", "action_id": "test", "x_pct": 0.5, "y_pct": 0.5},
|
||||||
|
result={"success": True},
|
||||||
|
screenshot_before=screenshot_gray,
|
||||||
|
screenshot_after=screenshot_gray, # Même image → aucun changement
|
||||||
|
expected_result="Le menu s'est ouvert",
|
||||||
|
)
|
||||||
|
# Pas de changement pixel → retry, VLM non appelé
|
||||||
|
assert result.verified is False
|
||||||
|
assert result.suggestion == "retry"
|
||||||
|
assert result.semantic_verified is None # VLM non appelé
|
||||||
|
|
||||||
|
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||||
|
def test_pixel_ok_semantic_ok(
|
||||||
|
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||||
|
):
|
||||||
|
"""Pixel OK + Semantic OK → vérifié avec haute confiance."""
|
||||||
|
mock_semantic.return_value = {
|
||||||
|
"verified": True,
|
||||||
|
"detail": "Le menu est bien ouvert",
|
||||||
|
"elapsed_ms": 2000.0,
|
||||||
|
}
|
||||||
|
result = verifier.verify_with_critic(
|
||||||
|
action={"type": "click", "action_id": "test"},
|
||||||
|
result={"success": True},
|
||||||
|
screenshot_before=screenshot_gray,
|
||||||
|
screenshot_after=screenshot_white, # Différent → changement détecté
|
||||||
|
expected_result="Le menu s'est ouvert",
|
||||||
|
)
|
||||||
|
assert result.verified is True
|
||||||
|
assert result.semantic_verified is True
|
||||||
|
assert result.confidence >= 0.7
|
||||||
|
assert "Critic OK" in result.detail
|
||||||
|
|
||||||
|
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||||
|
def test_pixel_ok_semantic_non(
|
||||||
|
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||||
|
):
|
||||||
|
"""Pixel OK + Semantic NON → INATTENDU (changement mais pas le bon)."""
|
||||||
|
mock_semantic.return_value = {
|
||||||
|
"verified": False,
|
||||||
|
"detail": "Une erreur est apparue au lieu du menu",
|
||||||
|
"elapsed_ms": 2500.0,
|
||||||
|
}
|
||||||
|
result = verifier.verify_with_critic(
|
||||||
|
action={"type": "click", "action_id": "test"},
|
||||||
|
result={"success": True},
|
||||||
|
screenshot_before=screenshot_gray,
|
||||||
|
screenshot_after=screenshot_white,
|
||||||
|
expected_result="Le menu s'est ouvert",
|
||||||
|
)
|
||||||
|
assert result.verified is False
|
||||||
|
assert result.semantic_verified is False
|
||||||
|
assert result.suggestion == "retry"
|
||||||
|
assert "Critic NON" in result.detail
|
||||||
|
|
||||||
|
@patch("agent_v0.server_v1.replay_verifier.ReplayVerifier._verify_semantic")
|
||||||
|
def test_vlm_indisponible_fallback_pixel(
|
||||||
|
self, mock_semantic, verifier, screenshot_gray, screenshot_white,
|
||||||
|
):
|
||||||
|
"""VLM indisponible → fallback sur pixel seul."""
|
||||||
|
mock_semantic.return_value = None # VLM down
|
||||||
|
result = verifier.verify_with_critic(
|
||||||
|
action={"type": "click", "action_id": "test"},
|
||||||
|
result={"success": True},
|
||||||
|
screenshot_before=screenshot_gray,
|
||||||
|
screenshot_after=screenshot_white,
|
||||||
|
expected_result="Le menu s'est ouvert",
|
||||||
|
)
|
||||||
|
# Fallback pixel seul — le changement est détecté
|
||||||
|
assert result.verified is True
|
||||||
|
assert result.semantic_verified is None # Pas de VLM
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests _verify_semantic — parsing de la réponse VLM
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestVerifySemantic:
|
||||||
|
|
||||||
|
@patch("requests.post")
|
||||||
|
def test_parse_verdict_oui(self, mock_post, verifier, screenshot_white):
|
||||||
|
"""Parse correctement VERDICT: OUI."""
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.ok = True
|
||||||
|
mock_resp.json.return_value = {
|
||||||
|
"message": {"content": "VERDICT: OUI\nRAISON: Le fichier est bien ouvert"}
|
||||||
|
}
|
||||||
|
mock_post.return_value = mock_resp
|
||||||
|
result = verifier._verify_semantic(
|
||||||
|
screenshot_before=screenshot_white,
|
||||||
|
screenshot_after=screenshot_white,
|
||||||
|
expected_result="Le fichier est ouvert",
|
||||||
|
)
|
||||||
|
assert result is not None
|
||||||
|
assert result["verified"] is True
|
||||||
|
assert "ouvert" in result["detail"]
|
||||||
|
|
||||||
|
@patch("requests.post")
|
||||||
|
def test_parse_verdict_non(self, mock_post, verifier, screenshot_white):
|
||||||
|
"""Parse correctement VERDICT: NON."""
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.ok = True
|
||||||
|
mock_resp.json.return_value = {
|
||||||
|
"message": {"content": "VERDICT: NON\nRAISON: L'écran n'a pas changé"}
|
||||||
|
}
|
||||||
|
mock_post.return_value = mock_resp
|
||||||
|
result = verifier._verify_semantic(
|
||||||
|
screenshot_before=screenshot_white,
|
||||||
|
screenshot_after=screenshot_white,
|
||||||
|
expected_result="Le menu s'est ouvert",
|
||||||
|
)
|
||||||
|
assert result is not None
|
||||||
|
assert result["verified"] is False
|
||||||
|
|
||||||
|
@patch("requests.post")
|
||||||
|
def test_vlm_timeout_retourne_none(self, mock_post, verifier, screenshot_white):
|
||||||
|
"""Timeout VLM → retourne None (fallback gracieux)."""
|
||||||
|
import requests as _real_requests
|
||||||
|
mock_post.side_effect = _real_requests.Timeout("timeout")
|
||||||
|
result = verifier._verify_semantic(
|
||||||
|
screenshot_before=screenshot_white,
|
||||||
|
screenshot_after=screenshot_white,
|
||||||
|
expected_result="Le fichier est ouvert",
|
||||||
|
)
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
def test_sans_screenshot_after_retourne_none(self, verifier):
|
||||||
|
"""Sans screenshot_after, pas de vérification possible."""
|
||||||
|
result = verifier._verify_semantic(
|
||||||
|
screenshot_before=None,
|
||||||
|
screenshot_after=None,
|
||||||
|
expected_result="Le fichier est ouvert",
|
||||||
|
)
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests _merge_results — matrice pixel x sémantique
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestMergeResults:
|
||||||
|
|
||||||
|
def test_pixel_ok_sem_ok(self, verifier):
|
||||||
|
pixel = VerificationResult(
|
||||||
|
verified=True, confidence=0.7, changes_detected=True,
|
||||||
|
change_area_pct=5.0, suggestion="continue",
|
||||||
|
)
|
||||||
|
semantic = {"verified": True, "detail": "OK", "elapsed_ms": 1000}
|
||||||
|
result = verifier._merge_results(pixel, semantic)
|
||||||
|
assert result.verified is True
|
||||||
|
assert result.semantic_verified is True
|
||||||
|
assert result.confidence >= 0.7
|
||||||
|
|
||||||
|
def test_pixel_ok_sem_non(self, verifier):
|
||||||
|
"""Pixel OK + Sémantique NON = inattendu → retry."""
|
||||||
|
pixel = VerificationResult(
|
||||||
|
verified=True, confidence=0.7, changes_detected=True,
|
||||||
|
change_area_pct=5.0, suggestion="continue",
|
||||||
|
)
|
||||||
|
semantic = {"verified": False, "detail": "Erreur popup", "elapsed_ms": 2000}
|
||||||
|
result = verifier._merge_results(pixel, semantic)
|
||||||
|
assert result.verified is False
|
||||||
|
assert result.semantic_verified is False
|
||||||
|
assert result.suggestion == "retry"
|
||||||
|
|
||||||
|
def test_pixel_non_sem_ok(self, verifier):
|
||||||
|
"""Pixel inchangé + Sémantique OK = état subtil → continue."""
|
||||||
|
pixel = VerificationResult(
|
||||||
|
verified=False, confidence=0.5, changes_detected=False,
|
||||||
|
change_area_pct=0.1, suggestion="retry",
|
||||||
|
)
|
||||||
|
semantic = {"verified": True, "detail": "Onglet déjà actif", "elapsed_ms": 1500}
|
||||||
|
result = verifier._merge_results(pixel, semantic)
|
||||||
|
assert result.verified is True
|
||||||
|
assert result.semantic_verified is True
|
||||||
|
assert result.suggestion == "continue"
|
||||||
|
|
||||||
|
def test_pixel_non_sem_non(self, verifier):
|
||||||
|
"""Pixel inchangé + Sémantique NON = échec complet → retry."""
|
||||||
|
pixel = VerificationResult(
|
||||||
|
verified=False, confidence=0.5, changes_detected=False,
|
||||||
|
change_area_pct=0.0, suggestion="retry",
|
||||||
|
)
|
||||||
|
semantic = {"verified": False, "detail": "Rien ne s'est passé", "elapsed_ms": 3000}
|
||||||
|
result = verifier._merge_results(pixel, semantic)
|
||||||
|
assert result.verified is False
|
||||||
|
assert result.semantic_verified is False
|
||||||
|
assert result.confidence >= 0.7 # Haute confiance dans l'échec
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests enrichissement intentions (stream_processor)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestEnrichActionsWithIntentions:
|
||||||
|
|
||||||
|
@patch("requests.post")
|
||||||
|
@patch("requests.get")
|
||||||
|
def test_enrichissement_parse_reponse_gemma4(self, mock_get, mock_post):
|
||||||
|
"""La réponse gemma4 est correctement parsée en intention/avant/après."""
|
||||||
|
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||||
|
import tempfile, shutil
|
||||||
|
|
||||||
|
# Mock gemma4 disponible
|
||||||
|
mock_tags_resp = MagicMock()
|
||||||
|
mock_tags_resp.ok = True
|
||||||
|
mock_get.return_value = mock_tags_resp
|
||||||
|
|
||||||
|
mock_chat_resp = MagicMock()
|
||||||
|
mock_chat_resp.ok = True
|
||||||
|
mock_chat_resp.json.return_value = {
|
||||||
|
"message": {
|
||||||
|
"content": (
|
||||||
|
"INTENTION: Ouvrir le fichier client dans le logiciel\n"
|
||||||
|
"AVANT: Le logiciel est ouvert sur la page d'accueil\n"
|
||||||
|
"APRÈS: Le fichier client est affiché dans la fenêtre"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mock_post.return_value = mock_chat_resp
|
||||||
|
|
||||||
|
actions = [
|
||||||
|
{
|
||||||
|
"type": "click",
|
||||||
|
"action_id": "act_001",
|
||||||
|
"target_spec": {"by_text": "Ouvrir", "window_title": "Logiciel"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "wait",
|
||||||
|
"action_id": "act_002",
|
||||||
|
"duration_ms": 1000,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
tmpdir = Path(tempfile.mkdtemp())
|
||||||
|
try:
|
||||||
|
(tmpdir / "shots").mkdir()
|
||||||
|
_enrich_actions_with_intentions(actions, tmpdir)
|
||||||
|
|
||||||
|
# L'action click doit être enrichie
|
||||||
|
assert actions[0].get("intention") == "Ouvrir le fichier client dans le logiciel"
|
||||||
|
assert actions[0].get("expected_state") == "Le logiciel est ouvert sur la page d'accueil"
|
||||||
|
assert actions[0].get("expected_result") == "Le fichier client est affiché dans la fenêtre"
|
||||||
|
# expected_state doit aussi être dans target_spec (pour l'Observer)
|
||||||
|
assert actions[0]["target_spec"]["expected_state"] == "Le logiciel est ouvert sur la page d'accueil"
|
||||||
|
|
||||||
|
# L'action wait ne doit PAS être enrichie
|
||||||
|
assert "intention" not in actions[1]
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(tmpdir)
|
||||||
|
|
||||||
|
@patch("requests.get")
|
||||||
|
def test_gemma4_indisponible_pas_de_crash(self, mock_get):
|
||||||
|
"""Si gemma4 est down, l'enrichissement est silencieusement désactivé."""
|
||||||
|
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||||
|
import tempfile, shutil
|
||||||
|
|
||||||
|
mock_get.side_effect = ConnectionError("gemma4 down")
|
||||||
|
|
||||||
|
actions = [
|
||||||
|
{"type": "click", "action_id": "act_001", "target_spec": {"by_text": "OK"}},
|
||||||
|
]
|
||||||
|
|
||||||
|
tmpdir = Path(tempfile.mkdtemp())
|
||||||
|
try:
|
||||||
|
(tmpdir / "shots").mkdir()
|
||||||
|
_enrich_actions_with_intentions(actions, tmpdir)
|
||||||
|
# Aucun crash, aucune intention ajoutée
|
||||||
|
assert "intention" not in actions[0]
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(tmpdir)
|
||||||
|
|
||||||
|
@patch("requests.post")
|
||||||
|
@patch("requests.get")
|
||||||
|
def test_reponse_gemma4_malformee(self, mock_get, mock_post):
|
||||||
|
"""Si gemma4 retourne du texte non structuré, pas de crash."""
|
||||||
|
from agent_v0.server_v1.stream_processor import _enrich_actions_with_intentions
|
||||||
|
import tempfile, shutil
|
||||||
|
|
||||||
|
mock_tags = MagicMock()
|
||||||
|
mock_tags.ok = True
|
||||||
|
mock_get.return_value = mock_tags
|
||||||
|
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.ok = True
|
||||||
|
mock_resp.json.return_value = {
|
||||||
|
"message": {"content": "Je ne comprends pas cette demande."}
|
||||||
|
}
|
||||||
|
mock_post.return_value = mock_resp
|
||||||
|
|
||||||
|
actions = [
|
||||||
|
{"type": "click", "action_id": "act_001", "target_spec": {"by_text": "OK"}},
|
||||||
|
]
|
||||||
|
|
||||||
|
tmpdir = Path(tempfile.mkdtemp())
|
||||||
|
try:
|
||||||
|
(tmpdir / "shots").mkdir()
|
||||||
|
_enrich_actions_with_intentions(actions, tmpdir)
|
||||||
|
# Pas de crash, mais pas d'intention non plus
|
||||||
|
assert "intention" not in actions[0]
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(tmpdir)
|
||||||
762
tests/unit/test_task_planner.py
Normal file
762
tests/unit/test_task_planner.py
Normal file
@@ -0,0 +1,762 @@
|
|||||||
|
# tests/unit/test_task_planner.py
|
||||||
|
"""
|
||||||
|
Tests unitaires du TaskPlanner (planificateur MACRO).
|
||||||
|
|
||||||
|
Vérifie :
|
||||||
|
1. La compréhension d'ordres simples (understand)
|
||||||
|
2. Le matching de workflows par description sémantique
|
||||||
|
3. La détection de boucles et l'extraction de paramètres
|
||||||
|
4. La conversion étapes → actions JSON (format correct)
|
||||||
|
5. L'extraction de descriptions de session
|
||||||
|
|
||||||
|
Toutes les réponses gemma4 sont mockées pour la reproductibilité.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest.mock import MagicMock, patch, Mock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||||
|
if _ROOT not in sys.path:
|
||||||
|
sys.path.insert(0, _ROOT)
|
||||||
|
|
||||||
|
from agent_v0.server_v1.task_planner import TaskPlanner, TaskPlan
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Fixtures
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def planner():
|
||||||
|
"""TaskPlanner avec port gemma4 factice."""
|
||||||
|
return TaskPlanner(gemma4_port="11435", domain_id="generic")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sample_workflows():
|
||||||
|
"""Workflows disponibles pour les tests de matching."""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"session_id": "sess_001",
|
||||||
|
"name": "Bloc-notes",
|
||||||
|
"description": "Ouvrir Bloc-notes via Exécuter (Win+R) et écrire du texte",
|
||||||
|
"machine": "PC-01",
|
||||||
|
"event_count": 25,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"session_id": "sess_002",
|
||||||
|
"name": "Explorateur de fichiers",
|
||||||
|
"description": "Naviguer dans l'Explorateur de fichiers et ouvrir des images",
|
||||||
|
"machine": "PC-01",
|
||||||
|
"event_count": 40,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"session_id": "sess_003",
|
||||||
|
"name": "DxCare, Codage CIM-10",
|
||||||
|
"description": "Ouvrir un dossier patient dans DxCare et coder les diagnostics CIM-10",
|
||||||
|
"machine": "PC-TIM",
|
||||||
|
"event_count": 80,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _mock_gemma4_response(content: str):
|
||||||
|
"""Créer un mock de réponse HTTP gemma4."""
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.ok = True
|
||||||
|
mock_resp.status_code = 200
|
||||||
|
mock_resp.json.return_value = {
|
||||||
|
"message": {"content": content}
|
||||||
|
}
|
||||||
|
return mock_resp
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : understand — ordre simple
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestUnderstandOrdreSimple:
|
||||||
|
"""Vérifier que understand() parse correctement des réponses gemma4."""
|
||||||
|
|
||||||
|
def test_understand_ordre_simple(self, planner, sample_workflows):
|
||||||
|
"""'Ouvre le bloc-notes' → understood=True."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 1\n"
|
||||||
|
"CONFIANCE: 0.9\n"
|
||||||
|
"PARAMETRES: AUCUN\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"SOURCE_BOUCLE: aucun\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Ouvrir le Bloc-notes via Win+R\n"
|
||||||
|
"2. Taper notepad et valider\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand(
|
||||||
|
"Ouvre le bloc-notes",
|
||||||
|
available_workflows=sample_workflows,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert plan.understood is True
|
||||||
|
assert plan.instruction == "Ouvre le bloc-notes"
|
||||||
|
|
||||||
|
def test_understand_instruction_non_comprise(self, planner):
|
||||||
|
"""Instruction incompréhensible → understood=False."""
|
||||||
|
gemma4_response = "COMPRIS: NON\nWORKFLOW: AUCUN\nBOUCLE: NON\n"
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand("xyzzy blah blah")
|
||||||
|
|
||||||
|
assert plan.understood is False
|
||||||
|
|
||||||
|
def test_understand_gemma4_erreur_http(self, planner):
|
||||||
|
"""Erreur HTTP gemma4 → plan.error renseigné."""
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.ok = False
|
||||||
|
mock_resp.status_code = 500
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=mock_resp):
|
||||||
|
plan = planner.understand("Ouvre le bloc-notes")
|
||||||
|
|
||||||
|
assert plan.understood is False
|
||||||
|
assert "500" in plan.error
|
||||||
|
|
||||||
|
def test_understand_gemma4_timeout(self, planner):
|
||||||
|
"""Timeout gemma4 → plan.error renseigné."""
|
||||||
|
import requests
|
||||||
|
with patch("requests.post", side_effect=requests.Timeout("timeout")):
|
||||||
|
plan = planner.understand("Ouvre le bloc-notes")
|
||||||
|
|
||||||
|
assert plan.understood is False
|
||||||
|
assert "erreur" in plan.error.lower() or "timeout" in plan.error.lower()
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : matching workflow
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestUnderstandIdentifieWorkflow:
|
||||||
|
"""Vérifier que le matching de workflow fonctionne."""
|
||||||
|
|
||||||
|
def test_understand_identifie_workflow(self, planner, sample_workflows):
|
||||||
|
"""Quand un workflow matche, workflow_match est rempli."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 1\n"
|
||||||
|
"CONFIANCE: 0.9\n"
|
||||||
|
"PARAMETRES: AUCUN\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"SOURCE_BOUCLE: aucun\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Lancer le Bloc-notes\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand(
|
||||||
|
"Ouvre le bloc-notes",
|
||||||
|
available_workflows=sample_workflows,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert plan.workflow_match == "sess_001"
|
||||||
|
assert plan.workflow_name == "Bloc-notes"
|
||||||
|
assert plan.mode == "replay"
|
||||||
|
assert plan.match_confidence >= 0.8
|
||||||
|
|
||||||
|
def test_understand_workflow_aucun_match(self, planner, sample_workflows):
|
||||||
|
"""Aucun workflow correspondant → mode libre."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: AUCUN\n"
|
||||||
|
"PARAMETRES: AUCUN\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"SOURCE_BOUCLE: aucun\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Ouvrir Chrome\n"
|
||||||
|
"2. Aller sur Google\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand(
|
||||||
|
"Recherche voiture sur Google",
|
||||||
|
available_workflows=sample_workflows,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert plan.understood is True
|
||||||
|
assert plan.workflow_match == ""
|
||||||
|
assert plan.mode == "free"
|
||||||
|
|
||||||
|
def test_understand_workflow_second_match(self, planner, sample_workflows):
|
||||||
|
"""Workflow 2 sélectionné correctement."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 2\n"
|
||||||
|
"CONFIANCE: 0.85\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Ouvrir l'explorateur de fichiers\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand(
|
||||||
|
"Ouvre mes images",
|
||||||
|
available_workflows=sample_workflows,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert plan.workflow_match == "sess_002"
|
||||||
|
assert plan.workflow_name == "Explorateur de fichiers"
|
||||||
|
|
||||||
|
def test_understand_workflow_avec_description_dans_prompt(self, planner, sample_workflows):
|
||||||
|
"""Le prompt envoyé à gemma4 inclut les descriptions des workflows."""
|
||||||
|
captured_body = {}
|
||||||
|
|
||||||
|
def capture_post(url, json=None, **kwargs):
|
||||||
|
captured_body.update(json or {})
|
||||||
|
return _mock_gemma4_response("COMPRIS: OUI\nWORKFLOW: AUCUN\nBOUCLE: NON\n")
|
||||||
|
|
||||||
|
with patch("requests.post", side_effect=capture_post):
|
||||||
|
planner.understand(
|
||||||
|
"Ouvre le bloc-notes",
|
||||||
|
available_workflows=sample_workflows,
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt_content = captured_body["messages"][0]["content"]
|
||||||
|
# La description doit apparaître dans le prompt
|
||||||
|
assert "Ouvrir Bloc-notes via Exécuter" in prompt_content
|
||||||
|
assert "Naviguer dans l'Explorateur" in prompt_content
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : détection de boucle
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestUnderstandDetecteBoucle:
|
||||||
|
"""Vérifier la détection de boucle."""
|
||||||
|
|
||||||
|
def test_understand_detecte_boucle(self, planner, sample_workflows):
|
||||||
|
"""'traite TOUS les dossiers' → is_loop=True."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 3\n"
|
||||||
|
"CONFIANCE: 0.8\n"
|
||||||
|
"PARAMETRES: AUCUN\n"
|
||||||
|
"BOUCLE: OUI\n"
|
||||||
|
"SOURCE_BOUCLE: écran\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Pour chaque dossier dans la liste\n"
|
||||||
|
"2. Ouvrir le dossier\n"
|
||||||
|
"3. Coder les diagnostics\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand(
|
||||||
|
"Traite TOUS les dossiers de la liste",
|
||||||
|
available_workflows=sample_workflows,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert plan.is_loop is True
|
||||||
|
assert plan.loop_source == "écran"
|
||||||
|
|
||||||
|
def test_understand_pas_de_boucle(self, planner):
|
||||||
|
"""Ordre simple → is_loop=False."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: AUCUN\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"SOURCE_BOUCLE: aucun\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Ouvrir le navigateur\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand("Ouvre le navigateur")
|
||||||
|
|
||||||
|
assert plan.is_loop is False
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : extraction de paramètres
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestUnderstandExtraitParametres:
|
||||||
|
"""Vérifier l'extraction des paramètres."""
|
||||||
|
|
||||||
|
def test_understand_extrait_parametres(self, planner, sample_workflows):
|
||||||
|
"""'dossiers de janvier' → parameters contient mois=janvier."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 3\n"
|
||||||
|
"CONFIANCE: 0.85\n"
|
||||||
|
"PARAMETRES: mois=janvier\n"
|
||||||
|
"BOUCLE: OUI\n"
|
||||||
|
"SOURCE_BOUCLE: écran\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Filtrer les dossiers de janvier\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand(
|
||||||
|
"Traite les dossiers de janvier",
|
||||||
|
available_workflows=sample_workflows,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "mois" in plan.parameters
|
||||||
|
assert plan.parameters["mois"] == "janvier"
|
||||||
|
|
||||||
|
def test_understand_parametres_multiples(self, planner):
|
||||||
|
"""Plusieurs paramètres sur des lignes séparées."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: AUCUN\n"
|
||||||
|
"PARAMETRES:\n"
|
||||||
|
"- patient=DUPONT\n"
|
||||||
|
"- date=2026-01-15\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Rechercher le patient DUPONT\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand("Cherche le dossier de DUPONT du 15 janvier")
|
||||||
|
|
||||||
|
assert plan.parameters.get("patient") == "DUPONT"
|
||||||
|
assert plan.parameters.get("date") == "2026-01-15"
|
||||||
|
|
||||||
|
def test_understand_parametres_inline(self, planner):
|
||||||
|
"""Paramètres sur la même ligne que PARAMETRES:."""
|
||||||
|
gemma4_response = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: AUCUN\n"
|
||||||
|
"PARAMETRES: nom=Martin, ville=Paris\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"1. Chercher Martin à Paris\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
plan = planner.understand("Cherche Martin à Paris")
|
||||||
|
|
||||||
|
assert plan.parameters.get("nom") == "Martin"
|
||||||
|
assert plan.parameters.get("ville") == "Paris"
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : _parse_understanding (parsing tolérant)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestParseUnderstanding:
|
||||||
|
"""Tester le parsing tolérant de réponses gemma4 variées."""
|
||||||
|
|
||||||
|
def test_parse_markdown_gras(self, planner):
|
||||||
|
"""Réponse avec **gras** → parsée correctement."""
|
||||||
|
plan = TaskPlan(instruction="test")
|
||||||
|
content = (
|
||||||
|
"**COMPRIS:** OUI\n"
|
||||||
|
"**WORKFLOW:** AUCUN\n"
|
||||||
|
"**BOUCLE:** NON\n"
|
||||||
|
"**PLAN:**\n"
|
||||||
|
"1. Première étape\n"
|
||||||
|
)
|
||||||
|
result = planner._parse_understanding(plan, content, [])
|
||||||
|
assert result.understood is True
|
||||||
|
assert result.mode == "free"
|
||||||
|
|
||||||
|
def test_parse_confiance_pourcentage(self, planner, sample_workflows):
|
||||||
|
"""CONFIANCE: 90% → match_confidence=0.9."""
|
||||||
|
plan = TaskPlan(instruction="test")
|
||||||
|
content = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 1\n"
|
||||||
|
"CONFIANCE: 90%\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
)
|
||||||
|
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||||
|
assert result.match_confidence == pytest.approx(0.9)
|
||||||
|
|
||||||
|
def test_parse_confiance_virgule(self, planner, sample_workflows):
|
||||||
|
"""CONFIANCE: 0,85 → match_confidence=0.85."""
|
||||||
|
plan = TaskPlan(instruction="test")
|
||||||
|
content = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 1\n"
|
||||||
|
"CONFIANCE: 0,85\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
)
|
||||||
|
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||||
|
assert result.match_confidence == pytest.approx(0.85)
|
||||||
|
|
||||||
|
def test_parse_workflow_avec_parentheses(self, planner, sample_workflows):
|
||||||
|
"""WORKFLOW: 2 (Explorateur) → index 2 correctement extrait."""
|
||||||
|
plan = TaskPlan(instruction="test")
|
||||||
|
content = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: 2 (Explorateur de fichiers)\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
)
|
||||||
|
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||||
|
assert result.workflow_match == "sess_002"
|
||||||
|
|
||||||
|
def test_parse_workflow_aucun_variantes(self, planner, sample_workflows):
|
||||||
|
"""Toutes les variantes de 'aucun' sont reconnues."""
|
||||||
|
for val in ("AUCUN", "None", "N/A", "-", "NON"):
|
||||||
|
plan = TaskPlan(instruction="test")
|
||||||
|
content = f"COMPRIS: OUI\nWORKFLOW: {val}\nBOUCLE: NON\n"
|
||||||
|
result = planner._parse_understanding(plan, content, sample_workflows)
|
||||||
|
assert result.workflow_match == "", f"Devrait être vide pour '{val}'"
|
||||||
|
|
||||||
|
def test_parse_etapes_tirets(self, planner):
|
||||||
|
"""Étapes avec tirets → ajoutées au plan."""
|
||||||
|
plan = TaskPlan(instruction="test")
|
||||||
|
content = (
|
||||||
|
"COMPRIS: OUI\n"
|
||||||
|
"WORKFLOW: AUCUN\n"
|
||||||
|
"BOUCLE: NON\n"
|
||||||
|
"PLAN:\n"
|
||||||
|
"- Ouvrir l'application\n"
|
||||||
|
"- Cliquer sur Fichier\n"
|
||||||
|
"- Sauvegarder\n"
|
||||||
|
)
|
||||||
|
result = planner._parse_understanding(plan, content, [])
|
||||||
|
assert len(result.steps) == 3
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : _steps_to_actions
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestStepsToActions:
|
||||||
|
"""Vérifier la conversion étapes → actions JSON."""
|
||||||
|
|
||||||
|
def test_steps_to_actions_format(self, planner):
|
||||||
|
"""Les actions générées ont le bon format (type, target_spec, etc.)."""
|
||||||
|
gemma4_response = (
|
||||||
|
'{"type": "click", "target_spec": {"by_text": "Rechercher"}}\n'
|
||||||
|
'{"type": "type", "text": "bloc-notes"}\n'
|
||||||
|
'{"type": "key_combo", "keys": ["enter"]}\n'
|
||||||
|
'{"type": "wait", "duration_ms": 2000}\n'
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
actions = planner._steps_to_actions(
|
||||||
|
[{"description": "1. Ouvrir le bloc-notes"}],
|
||||||
|
{},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(actions) == 4
|
||||||
|
assert actions[0]["type"] == "click"
|
||||||
|
assert actions[0]["visual_mode"] is True # Ajouté automatiquement
|
||||||
|
assert actions[0]["target_spec"]["by_text"] == "Rechercher"
|
||||||
|
assert actions[1]["type"] == "type"
|
||||||
|
assert actions[1]["text"] == "bloc-notes"
|
||||||
|
assert actions[2]["type"] == "key_combo"
|
||||||
|
assert actions[2]["keys"] == ["enter"]
|
||||||
|
assert actions[3]["type"] == "wait"
|
||||||
|
assert actions[3]["duration_ms"] == 2000
|
||||||
|
|
||||||
|
def test_steps_to_actions_json_array(self, planner):
|
||||||
|
"""gemma4 retourne un tableau JSON → parsé correctement."""
|
||||||
|
gemma4_response = (
|
||||||
|
'Voici les actions :\n'
|
||||||
|
'```json\n'
|
||||||
|
'[\n'
|
||||||
|
' {"type": "click", "target_spec": {"by_text": "Fichier"}},\n'
|
||||||
|
' {"type": "click", "target_spec": {"by_text": "Ouvrir"}}\n'
|
||||||
|
']\n'
|
||||||
|
'```\n'
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
actions = planner._steps_to_actions(
|
||||||
|
[{"description": "1. Ouvrir un fichier"}],
|
||||||
|
{},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(actions) == 2
|
||||||
|
assert actions[0]["target_spec"]["by_text"] == "Fichier"
|
||||||
|
assert actions[1]["target_spec"]["by_text"] == "Ouvrir"
|
||||||
|
|
||||||
|
def test_steps_to_actions_nested_json(self, planner):
|
||||||
|
"""JSON imbriqué (target_spec) → parsé correctement."""
|
||||||
|
gemma4_response = (
|
||||||
|
'{"type": "click", "target_spec": {"by_text": "OK", "window_title": "Confirmation"}}\n'
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
actions = planner._steps_to_actions(
|
||||||
|
[{"description": "1. Confirmer"}],
|
||||||
|
{},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(actions) == 1
|
||||||
|
assert actions[0]["target_spec"]["window_title"] == "Confirmation"
|
||||||
|
|
||||||
|
def test_steps_to_actions_gemma4_erreur(self, planner):
|
||||||
|
"""Erreur gemma4 → liste vide."""
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.ok = False
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=mock_resp):
|
||||||
|
actions = planner._steps_to_actions(
|
||||||
|
[{"description": "1. Faire quelque chose"}],
|
||||||
|
{},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert actions == []
|
||||||
|
|
||||||
|
def test_steps_to_actions_filtre_types_invalides(self, planner):
|
||||||
|
"""Seuls les types valides (click, type, key_combo, wait) sont acceptés."""
|
||||||
|
gemma4_response = (
|
||||||
|
'{"type": "click", "target_spec": {"by_text": "OK"}}\n'
|
||||||
|
'{"type": "invalid_action", "foo": "bar"}\n'
|
||||||
|
'{"type": "wait", "duration_ms": 500}\n'
|
||||||
|
'{"not_a_type": "test"}\n'
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=_mock_gemma4_response(gemma4_response)):
|
||||||
|
actions = planner._steps_to_actions(
|
||||||
|
[{"description": "1. Test"}],
|
||||||
|
{},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(actions) == 2
|
||||||
|
assert actions[0]["type"] == "click"
|
||||||
|
assert actions[1]["type"] == "wait"
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : _parse_actions_json (parsing robuste)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestParseActionsJson:
|
||||||
|
"""Tester le parsing robuste d'actions JSON."""
|
||||||
|
|
||||||
|
def test_parse_json_une_par_ligne(self):
|
||||||
|
"""Actions JSON une par ligne."""
|
||||||
|
content = (
|
||||||
|
'{"type": "click", "target_spec": {"by_text": "A"}}\n'
|
||||||
|
'{"type": "type", "text": "hello"}\n'
|
||||||
|
)
|
||||||
|
actions = TaskPlanner._parse_actions_json(content)
|
||||||
|
assert len(actions) == 2
|
||||||
|
|
||||||
|
def test_parse_json_array(self):
|
||||||
|
"""Tableau JSON."""
|
||||||
|
content = '[{"type": "click", "target_spec": {"by_text": "A"}}, {"type": "wait", "duration_ms": 1000}]'
|
||||||
|
actions = TaskPlanner._parse_actions_json(content)
|
||||||
|
assert len(actions) == 2
|
||||||
|
|
||||||
|
def test_parse_json_avec_texte_autour(self):
|
||||||
|
"""JSON entouré de commentaires texte."""
|
||||||
|
content = (
|
||||||
|
"Voici les actions RPA :\n\n"
|
||||||
|
'{"type": "click", "target_spec": {"by_text": "Envoyer"}}\n'
|
||||||
|
"\n"
|
||||||
|
"C'est tout.\n"
|
||||||
|
)
|
||||||
|
actions = TaskPlanner._parse_actions_json(content)
|
||||||
|
assert len(actions) == 1
|
||||||
|
assert actions[0]["target_spec"]["by_text"] == "Envoyer"
|
||||||
|
|
||||||
|
def test_parse_json_vide(self):
|
||||||
|
"""Contenu vide → liste vide."""
|
||||||
|
assert TaskPlanner._parse_actions_json("") == []
|
||||||
|
assert TaskPlanner._parse_actions_json("Pas de JSON ici") == []
|
||||||
|
|
||||||
|
def test_parse_json_markdown_code_block(self):
|
||||||
|
"""JSON dans un bloc de code markdown."""
|
||||||
|
content = (
|
||||||
|
"```json\n"
|
||||||
|
'{"type": "type", "text": "bonjour"}\n'
|
||||||
|
"```\n"
|
||||||
|
)
|
||||||
|
actions = TaskPlanner._parse_actions_json(content)
|
||||||
|
assert len(actions) == 1
|
||||||
|
assert actions[0]["text"] == "bonjour"
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : _extract_session_description
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestExtractSessionDescription:
|
||||||
|
"""Vérifier que les descriptions de session sont lisibles et sémantiques."""
|
||||||
|
|
||||||
|
def _write_events(self, tmp_path, events):
|
||||||
|
"""Écrire des événements dans un fichier JSONL temporaire."""
|
||||||
|
events_file = tmp_path / "live_events.jsonl"
|
||||||
|
with open(events_file, "w") as f:
|
||||||
|
for evt in events:
|
||||||
|
f.write(json.dumps(evt, ensure_ascii=False) + "\n")
|
||||||
|
return events_file
|
||||||
|
|
||||||
|
def test_extract_session_description_bloc_notes(self, tmp_path):
|
||||||
|
"""Session Bloc-notes via Win+R → description sémantique."""
|
||||||
|
events = [
|
||||||
|
{"event": {"type": "key_combo", "keys": ["win", "r"],
|
||||||
|
"window": {"title": "Bureau"}}},
|
||||||
|
{"event": {"type": "window_focus_change",
|
||||||
|
"from": {"title": "Bureau"},
|
||||||
|
"to": {"title": "Exécuter"}}},
|
||||||
|
{"event": {"type": "text_input", "text": "notepad",
|
||||||
|
"window": {"title": "Exécuter"}}},
|
||||||
|
{"event": {"type": "mouse_click", "button": "left",
|
||||||
|
"window": {"title": "Exécuter"}}},
|
||||||
|
{"event": {"type": "window_focus_change",
|
||||||
|
"from": {"title": "Exécuter"},
|
||||||
|
"to": {"title": "Sans titre – Bloc-notes"}}},
|
||||||
|
{"event": {"type": "text_input", "text": "Bonjour le monde",
|
||||||
|
"window": {"title": "Sans titre – Bloc-notes"}}},
|
||||||
|
]
|
||||||
|
events_file = self._write_events(tmp_path, events)
|
||||||
|
|
||||||
|
# Importer depuis api_stream (la fonction est au niveau module)
|
||||||
|
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||||
|
desc = _extract_session_description(events_file)
|
||||||
|
|
||||||
|
assert desc["event_count"] == 6
|
||||||
|
# La description doit être lisible et pas juste "Bloc-notes, Exécuter"
|
||||||
|
description = desc["description"]
|
||||||
|
assert "Bloc-notes" in description or "bloc-notes" in description.lower()
|
||||||
|
# Le nom doit contenir l'app
|
||||||
|
assert "Bloc-notes" in desc["name"]
|
||||||
|
|
||||||
|
def test_extract_session_description_explorateur(self, tmp_path):
|
||||||
|
"""Session Explorateur de fichiers → description pertinente."""
|
||||||
|
events = [
|
||||||
|
{"event": {"type": "window_focus_change",
|
||||||
|
"from": {"title": "Bureau"},
|
||||||
|
"to": {"title": "Images – Explorateur de fichiers"}}},
|
||||||
|
{"event": {"type": "mouse_click", "button": "left",
|
||||||
|
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||||
|
{"event": {"type": "mouse_click", "button": "left",
|
||||||
|
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||||
|
{"event": {"type": "mouse_click", "button": "left",
|
||||||
|
"window": {"title": "Images – Explorateur de fichiers"}}},
|
||||||
|
]
|
||||||
|
events_file = self._write_events(tmp_path, events)
|
||||||
|
|
||||||
|
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||||
|
desc = _extract_session_description(events_file)
|
||||||
|
|
||||||
|
assert "Explorateur" in desc["name"] or "Explorateur" in desc["description"]
|
||||||
|
|
||||||
|
def test_extract_session_description_vide(self, tmp_path):
|
||||||
|
"""Fichier vide → description par défaut."""
|
||||||
|
events_file = self._write_events(tmp_path, [])
|
||||||
|
|
||||||
|
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||||
|
desc = _extract_session_description(events_file)
|
||||||
|
|
||||||
|
assert desc["event_count"] == 0
|
||||||
|
assert desc["name"] == "Session sans nom"
|
||||||
|
|
||||||
|
def test_extract_session_description_cmd(self, tmp_path):
|
||||||
|
"""Session avec cmd.exe → description contient cmd."""
|
||||||
|
events = [
|
||||||
|
{"event": {"type": "window_focus_change",
|
||||||
|
"from": {"title": "Bureau"},
|
||||||
|
"to": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||||
|
{"event": {"type": "text_input", "text": "dir",
|
||||||
|
"window": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||||
|
{"event": {"type": "text_input", "text": "cd documents",
|
||||||
|
"window": {"title": "C:\\Windows\\system32\\cmd.exe"}}},
|
||||||
|
]
|
||||||
|
events_file = self._write_events(tmp_path, events)
|
||||||
|
|
||||||
|
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||||
|
desc = _extract_session_description(events_file)
|
||||||
|
|
||||||
|
assert desc["event_count"] == 3
|
||||||
|
# Le nom ou la description doit mentionner cmd
|
||||||
|
full = f"{desc['name']} {desc['description']}"
|
||||||
|
assert "cmd" in full.lower()
|
||||||
|
|
||||||
|
def test_extract_session_description_recherche_windows(self, tmp_path):
|
||||||
|
"""Session avec recherche Windows (Win+S) → description mentionne recherche."""
|
||||||
|
events = [
|
||||||
|
{"event": {"type": "key_combo", "keys": ["win", "s"],
|
||||||
|
"window": {"title": "Bureau"}}},
|
||||||
|
{"event": {"type": "window_focus_change",
|
||||||
|
"from": {"title": "Bureau"},
|
||||||
|
"to": {"title": "Rechercher"}}},
|
||||||
|
{"event": {"type": "text_input", "text": "calculator",
|
||||||
|
"window": {"title": "Rechercher"}}},
|
||||||
|
]
|
||||||
|
events_file = self._write_events(tmp_path, events)
|
||||||
|
|
||||||
|
from agent_v0.server_v1.api_stream import _extract_session_description
|
||||||
|
desc = _extract_session_description(events_file)
|
||||||
|
|
||||||
|
# La description doit mentionner la recherche Windows
|
||||||
|
assert "recherche" in desc["description"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : list_capabilities
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestListCapabilities:
|
||||||
|
"""Vérifier le listing des capacités."""
|
||||||
|
|
||||||
|
def test_list_capabilities_avec_workflows(self, planner, sample_workflows):
|
||||||
|
"""Avec des workflows → texte lisible avec descriptions."""
|
||||||
|
text = planner.list_capabilities(sample_workflows)
|
||||||
|
assert "Léa sait faire" in text
|
||||||
|
assert "Bloc-notes" in text
|
||||||
|
|
||||||
|
def test_list_capabilities_sans_workflows(self, planner):
|
||||||
|
"""Sans workflows → message d'aide."""
|
||||||
|
text = planner.list_capabilities([])
|
||||||
|
assert "pas encore appris" in text
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests : execute (mode replay et free)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
class TestExecute:
|
||||||
|
"""Vérifier l'exécution des plans."""
|
||||||
|
|
||||||
|
def test_execute_replay(self, planner):
|
||||||
|
"""Mode replay → callback appelé avec le bon session_id."""
|
||||||
|
plan = TaskPlan(
|
||||||
|
instruction="Ouvre le bloc-notes",
|
||||||
|
understood=True,
|
||||||
|
workflow_match="sess_001",
|
||||||
|
workflow_name="Bloc-notes",
|
||||||
|
mode="replay",
|
||||||
|
)
|
||||||
|
|
||||||
|
callback = MagicMock(return_value="replay_123")
|
||||||
|
result = planner.execute(plan, replay_callback=callback)
|
||||||
|
|
||||||
|
assert result.success is True
|
||||||
|
callback.assert_called_once_with(
|
||||||
|
session_id="sess_001",
|
||||||
|
machine_id="default",
|
||||||
|
params={},
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_execute_non_compris(self, planner):
|
||||||
|
"""Plan non compris → échec."""
|
||||||
|
plan = TaskPlan(instruction="blah", understood=False)
|
||||||
|
result = planner.execute(plan)
|
||||||
|
assert result.success is False
|
||||||
|
assert "non comprise" in result.summary.lower() or "non comprise" in result.summary
|
||||||
|
|
||||||
|
def test_execute_sans_callback(self, planner):
|
||||||
|
"""Mode replay sans callback → échec."""
|
||||||
|
plan = TaskPlan(
|
||||||
|
instruction="test",
|
||||||
|
understood=True,
|
||||||
|
workflow_match="sess_001",
|
||||||
|
mode="replay",
|
||||||
|
)
|
||||||
|
result = planner.execute(plan, replay_callback=None)
|
||||||
|
assert result.success is False
|
||||||
419
tests/visual/test_grounding_benchmark.py
Normal file
419
tests/visual/test_grounding_benchmark.py
Normal file
@@ -0,0 +1,419 @@
|
|||||||
|
"""
|
||||||
|
Benchmark de grounding — 3 approches testées en boucle.
|
||||||
|
|
||||||
|
Compare la robustesse et la précision de :
|
||||||
|
1. Baseline : qwen2.5vl direct
|
||||||
|
2. Zoom progressif : 2 passes (full → crop → re-grounding)
|
||||||
|
3. OCR-first : docTR localise le texte, VLM seulement pour les icônes
|
||||||
|
|
||||||
|
Chaque approche est testée N fois sur les mêmes cibles.
|
||||||
|
Mesure : taux de détection, variance des coordonnées, temps moyen.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||||
|
if _ROOT not in sys.path:
|
||||||
|
sys.path.insert(0, _ROOT)
|
||||||
|
|
||||||
|
_SHOTS_DIR = Path(_ROOT) / "data/training/live_sessions/DESKTOP-ST3VBSD_windows/sess_20260404T135010_cec5c8/shots"
|
||||||
|
|
||||||
|
# Nombre d'itérations par test
|
||||||
|
N_ITERATIONS = 5
|
||||||
|
|
||||||
|
|
||||||
|
def _load_screenshot(name: str) -> str:
|
||||||
|
path = _SHOTS_DIR / name
|
||||||
|
if not path.is_file():
|
||||||
|
pytest.skip(f"Screenshot {name} non disponible")
|
||||||
|
return base64.b64encode(path.read_bytes()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def _load_screenshot_pil(name: str):
|
||||||
|
from PIL import Image
|
||||||
|
path = _SHOTS_DIR / name
|
||||||
|
if not path.is_file():
|
||||||
|
pytest.skip(f"Screenshot {name} non disponible")
|
||||||
|
return Image.open(path)
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Approche 1 : Baseline qwen2.5vl direct
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_bbox_2d(content: str) -> Optional[Tuple[int, int, int, int]]:
|
||||||
|
"""Parser les coordonnées bbox_2d depuis une réponse qwen2.5vl.
|
||||||
|
|
||||||
|
qwen2.5vl retourne du JSON :
|
||||||
|
```json
|
||||||
|
[{"bbox_2d": [x1, y1, x2, y2], "label": "..."}]
|
||||||
|
```
|
||||||
|
Les coordonnées sont en pixels relatifs à l'image envoyée.
|
||||||
|
"""
|
||||||
|
# Stratégie 1 : parser le JSON complet (le plus fiable)
|
||||||
|
# Nettoyer les fences markdown
|
||||||
|
cleaned = re.sub(r'```(?:json)?\s*', '', content).strip()
|
||||||
|
try:
|
||||||
|
data = json.loads(cleaned)
|
||||||
|
if isinstance(data, list) and len(data) > 0:
|
||||||
|
bbox = data[0].get("bbox_2d")
|
||||||
|
if bbox and len(bbox) >= 4:
|
||||||
|
return (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
|
||||||
|
elif isinstance(data, dict):
|
||||||
|
bbox = data.get("bbox_2d")
|
||||||
|
if bbox and len(bbox) >= 4:
|
||||||
|
return (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
|
||||||
|
except (json.JSONDecodeError, ValueError, TypeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Stratégie 2 : regex ciblé sur "bbox_2d": [x1, y1, x2, y2]
|
||||||
|
bbox_match = re.search(
|
||||||
|
r'"bbox_2d"\s*:\s*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\]',
|
||||||
|
content,
|
||||||
|
)
|
||||||
|
if bbox_match:
|
||||||
|
return tuple(int(bbox_match.group(i)) for i in range(1, 5))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def grounding_baseline(screenshot_b64: str, description: str, img_width: int = 1280, img_height: int = 800) -> Optional[Tuple[float, float]]:
|
||||||
|
"""Grounding qwen2.5vl direct — retourne (x_pct, y_pct) normalisées.
|
||||||
|
|
||||||
|
qwen2.5vl retourne des coordonnées en pixels relatifs à l'image envoyée.
|
||||||
|
On normalise en divisant par les dimensions de l'image.
|
||||||
|
"""
|
||||||
|
import requests
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = requests.post(
|
||||||
|
"http://localhost:11434/api/chat",
|
||||||
|
json={
|
||||||
|
"model": "qwen2.5vl:7b",
|
||||||
|
"messages": [{"role": "user", "content": f"Detect '{description}' with a bounding box.", "images": [screenshot_b64]}],
|
||||||
|
"stream": False,
|
||||||
|
"options": {"temperature": 0.0, "num_predict": 100},
|
||||||
|
},
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
if not resp.ok:
|
||||||
|
return None
|
||||||
|
content = resp.json().get("message", {}).get("content", "")
|
||||||
|
bbox = _parse_bbox_2d(content)
|
||||||
|
if bbox:
|
||||||
|
x1, y1, x2, y2 = bbox
|
||||||
|
# Normaliser par les dimensions de l'image (pixels → 0-1)
|
||||||
|
cx = (x1 + x2) / 2 / img_width
|
||||||
|
cy = (y1 + y2) / 2 / img_height
|
||||||
|
if 0.0 <= cx <= 1.0 and 0.0 <= cy <= 1.0:
|
||||||
|
return (cx, cy)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Approche 2 : Zoom progressif (2 passes)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def grounding_zoom(screenshot_b64: str, description: str, img_width: int = 1280, img_height: int = 800) -> Optional[Tuple[float, float]]:
|
||||||
|
"""Zoom progressif — passe 1 (full) puis passe 2 (crop 2x)."""
|
||||||
|
import requests
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
# Passe 1 : grounding sur l'image complète
|
||||||
|
result1 = grounding_baseline(screenshot_b64, description, img_width, img_height)
|
||||||
|
if result1 is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
x1_pct, y1_pct = result1
|
||||||
|
|
||||||
|
# Passe 2 : crop autour de la zone trouvée, re-grounding
|
||||||
|
try:
|
||||||
|
img_bytes = base64.b64decode(screenshot_b64)
|
||||||
|
img = Image.open(io.BytesIO(img_bytes))
|
||||||
|
w, h = img.size
|
||||||
|
|
||||||
|
# Crop 2x autour du point trouvé (25% de l'image de chaque côté)
|
||||||
|
crop_size = 0.25
|
||||||
|
cx_px = int(x1_pct * w)
|
||||||
|
cy_px = int(y1_pct * h)
|
||||||
|
x_left = max(0, cx_px - int(crop_size * w))
|
||||||
|
y_top = max(0, cy_px - int(crop_size * h))
|
||||||
|
x_right = min(w, cx_px + int(crop_size * w))
|
||||||
|
y_bottom = min(h, cy_px + int(crop_size * h))
|
||||||
|
|
||||||
|
cropped = img.crop((x_left, y_top, x_right, y_bottom))
|
||||||
|
crop_w, crop_h = cropped.size
|
||||||
|
|
||||||
|
# Encoder le crop en base64
|
||||||
|
buf = io.BytesIO()
|
||||||
|
cropped.save(buf, format="JPEG", quality=85)
|
||||||
|
crop_b64 = base64.b64encode(buf.getvalue()).decode()
|
||||||
|
|
||||||
|
# Passe 2 : re-grounding sur le crop (dimensions du crop)
|
||||||
|
result2 = grounding_baseline(crop_b64, description, crop_w, crop_h)
|
||||||
|
if result2 is None:
|
||||||
|
return result1 # Fallback sur passe 1
|
||||||
|
|
||||||
|
# Reconvertir les coordonnées du crop vers l'image originale
|
||||||
|
x2_in_crop, y2_in_crop = result2
|
||||||
|
x_final = (x_left + x2_in_crop * crop_w) / w
|
||||||
|
y_final = (y_top + y2_in_crop * crop_h) / h
|
||||||
|
return (x_final, y_final)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
return result1 # Fallback
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Approche 3 : OCR-first (docTR)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def grounding_ocr_first(screenshot_b64: str, description: str) -> Optional[Tuple[float, float]]:
|
||||||
|
"""OCR-first — docTR localise le texte, VLM pour les icônes."""
|
||||||
|
try:
|
||||||
|
from doctr.io import DocumentFile
|
||||||
|
from doctr.models import ocr_predictor
|
||||||
|
|
||||||
|
# Décoder l'image
|
||||||
|
img_bytes = base64.b64decode(screenshot_b64)
|
||||||
|
|
||||||
|
# OCR
|
||||||
|
predictor = ocr_predictor(det_arch='db_resnet50', reco_arch='crnn_vgg16_bn', pretrained=True)
|
||||||
|
doc = DocumentFile.from_images([img_bytes])
|
||||||
|
result = predictor(doc)
|
||||||
|
|
||||||
|
# Chercher le texte dans les résultats OCR
|
||||||
|
target_lower = description.lower()
|
||||||
|
best_match = None
|
||||||
|
best_score = 0
|
||||||
|
|
||||||
|
for page in result.pages:
|
||||||
|
for block in page.blocks:
|
||||||
|
for line_obj in block.lines:
|
||||||
|
for word in line_obj.words:
|
||||||
|
word_text = word.value.lower()
|
||||||
|
# Match exact ou partiel
|
||||||
|
if target_lower in word_text or word_text in target_lower:
|
||||||
|
score = len(word_text) / max(len(target_lower), 1)
|
||||||
|
if score > best_score:
|
||||||
|
# Coordonnées normalisées (docTR retourne 0-1)
|
||||||
|
box = word.geometry # ((x1,y1), (x2,y2))
|
||||||
|
cx = (box[0][0] + box[1][0]) / 2
|
||||||
|
cy = (box[0][1] + box[1][1]) / 2
|
||||||
|
best_match = (cx, cy)
|
||||||
|
best_score = score
|
||||||
|
|
||||||
|
if best_match and best_score > 0.5:
|
||||||
|
return best_match
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
pass # docTR non disponible
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Fallback VLM pour les éléments sans texte
|
||||||
|
return grounding_baseline(screenshot_b64, description)
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Framework de benchmark
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def run_benchmark(
|
||||||
|
approach_fn,
|
||||||
|
approach_name: str,
|
||||||
|
screenshot_b64: str,
|
||||||
|
description: str,
|
||||||
|
n_iterations: int = N_ITERATIONS,
|
||||||
|
) -> Dict:
|
||||||
|
"""Exécuter un benchmark : N itérations, mesurer variance et temps."""
|
||||||
|
results = []
|
||||||
|
times = []
|
||||||
|
|
||||||
|
for i in range(n_iterations):
|
||||||
|
t_start = time.time()
|
||||||
|
result = approach_fn(screenshot_b64, description)
|
||||||
|
elapsed = time.time() - t_start
|
||||||
|
times.append(elapsed)
|
||||||
|
|
||||||
|
if result is not None:
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
# Statistiques
|
||||||
|
n_found = len(results)
|
||||||
|
detection_rate = n_found / n_iterations
|
||||||
|
|
||||||
|
stats = {
|
||||||
|
"approach": approach_name,
|
||||||
|
"target": description,
|
||||||
|
"iterations": n_iterations,
|
||||||
|
"detection_rate": round(detection_rate, 2),
|
||||||
|
"avg_time_ms": round(sum(times) / len(times) * 1000, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
if n_found >= 2:
|
||||||
|
xs = [r[0] for r in results]
|
||||||
|
ys = [r[1] for r in results]
|
||||||
|
stats["x_mean"] = round(sum(xs) / len(xs), 4)
|
||||||
|
stats["y_mean"] = round(sum(ys) / len(ys), 4)
|
||||||
|
stats["x_variance"] = round(max(xs) - min(xs), 4)
|
||||||
|
stats["y_variance"] = round(max(ys) - min(ys), 4)
|
||||||
|
stats["stable"] = stats["x_variance"] < 0.05 and stats["y_variance"] < 0.05
|
||||||
|
elif n_found == 1:
|
||||||
|
stats["x_mean"] = round(results[0][0], 4)
|
||||||
|
stats["y_mean"] = round(results[0][1], 4)
|
||||||
|
stats["x_variance"] = 0
|
||||||
|
stats["y_variance"] = 0
|
||||||
|
stats["stable"] = True
|
||||||
|
else:
|
||||||
|
stats["stable"] = False
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests de benchmark comparatif
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
# Cibles à tester (screenshot, description, nom)
|
||||||
|
_TARGETS = [
|
||||||
|
("shot_0001_full.png", "Rechercher", "Rechercher taskbar"),
|
||||||
|
("shot_0001_full.png", "agent_v1", "Dossier agent_v1"),
|
||||||
|
("shot_0004_full.png", "Fichier", "Menu Fichier"),
|
||||||
|
("shot_0004_full.png", "Modifier", "Menu Modifier"),
|
||||||
|
("shot_0004_full.png", "Ceci est un test.txt", "Onglet fichier"),
|
||||||
|
("shot_0014_full.png", "Rechercher sur Google ou saisir une URL", "Recherche Google"),
|
||||||
|
("shot_0014_full.png", "Gmail", "Lien Gmail"),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestBenchmarkBaseline:
|
||||||
|
"""Benchmark de l'approche baseline (qwen2.5vl direct)."""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("shot,desc,name", _TARGETS)
|
||||||
|
def test_baseline_robustesse(self, shot, desc, name):
|
||||||
|
screenshot = _load_screenshot(shot)
|
||||||
|
stats = run_benchmark(grounding_baseline, "baseline", screenshot, desc, N_ITERATIONS)
|
||||||
|
|
||||||
|
print(f"\n [{stats['approach']}] {name}:")
|
||||||
|
print(f" Détection: {stats['detection_rate']*100:.0f}% ({int(stats['detection_rate']*N_ITERATIONS)}/{N_ITERATIONS})")
|
||||||
|
print(f" Temps moyen: {stats['avg_time_ms']:.0f}ms")
|
||||||
|
if stats.get("x_mean") is not None:
|
||||||
|
print(f" Position: ({stats['x_mean']:.3f}, {stats['y_mean']:.3f})")
|
||||||
|
print(f" Variance: X={stats['x_variance']:.4f} Y={stats['y_variance']:.4f}")
|
||||||
|
print(f" Stable: {'OUI' if stats['stable'] else 'NON'}")
|
||||||
|
|
||||||
|
assert stats["detection_rate"] >= 0.6, f"{name}: détection trop faible ({stats['detection_rate']})"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestBenchmarkZoom:
|
||||||
|
"""Benchmark de l'approche zoom progressif."""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("shot,desc,name", _TARGETS)
|
||||||
|
def test_zoom_robustesse(self, shot, desc, name):
|
||||||
|
screenshot = _load_screenshot(shot)
|
||||||
|
stats = run_benchmark(grounding_zoom, "zoom", screenshot, desc, N_ITERATIONS)
|
||||||
|
|
||||||
|
print(f"\n [{stats['approach']}] {name}:")
|
||||||
|
print(f" Détection: {stats['detection_rate']*100:.0f}% ({int(stats['detection_rate']*N_ITERATIONS)}/{N_ITERATIONS})")
|
||||||
|
print(f" Temps moyen: {stats['avg_time_ms']:.0f}ms")
|
||||||
|
if stats.get("x_mean") is not None:
|
||||||
|
print(f" Position: ({stats['x_mean']:.3f}, {stats['y_mean']:.3f})")
|
||||||
|
print(f" Variance: X={stats['x_variance']:.4f} Y={stats['y_variance']:.4f}")
|
||||||
|
print(f" Stable: {'OUI' if stats['stable'] else 'NON'}")
|
||||||
|
|
||||||
|
assert stats["detection_rate"] >= 0.6, f"{name}: détection trop faible ({stats['detection_rate']})"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestBenchmarkCitrix:
|
||||||
|
"""Benchmark baseline sur images dégradées (simulation Citrix JPEG Q20)."""
|
||||||
|
|
||||||
|
def _degrade_citrix(self, screenshot_b64: str) -> str:
|
||||||
|
"""Simuler compression Citrix (JPEG qualité 20)."""
|
||||||
|
from PIL import Image
|
||||||
|
img_bytes = base64.b64decode(screenshot_b64)
|
||||||
|
img = Image.open(io.BytesIO(img_bytes))
|
||||||
|
buf = io.BytesIO()
|
||||||
|
img.save(buf, "JPEG", quality=20)
|
||||||
|
return base64.b64encode(buf.getvalue()).decode()
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("shot,desc,name", _TARGETS)
|
||||||
|
def test_citrix_robustesse(self, shot, desc, name):
|
||||||
|
screenshot = _load_screenshot(shot)
|
||||||
|
citrix = self._degrade_citrix(screenshot)
|
||||||
|
stats = run_benchmark(grounding_baseline, "citrix_q20", citrix, desc, N_ITERATIONS)
|
||||||
|
|
||||||
|
print(f"\n [{stats['approach']}] {name}:")
|
||||||
|
print(f" Détection: {stats['detection_rate']*100:.0f}%")
|
||||||
|
print(f" Temps moyen: {stats['avg_time_ms']:.0f}ms")
|
||||||
|
if stats.get("x_mean") is not None:
|
||||||
|
print(f" Position: ({stats['x_mean']:.3f}, {stats['y_mean']:.3f})")
|
||||||
|
print(f" Variance: X={stats['x_variance']:.4f} Y={stats['y_variance']:.4f}")
|
||||||
|
print(f" Stable: {'OUI' if stats['stable'] else 'NON'}")
|
||||||
|
|
||||||
|
# Citrix peut être moins fiable — seuil plus bas
|
||||||
|
assert stats["detection_rate"] >= 0.4, f"{name} Citrix: détection trop faible ({stats['detection_rate']})"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestRapportComparatif:
|
||||||
|
"""Génère un rapport comparatif des 3 approches."""
|
||||||
|
|
||||||
|
def test_rapport_complet(self):
|
||||||
|
"""Exécuter les 3 approches sur toutes les cibles et comparer."""
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
all_results = []
|
||||||
|
|
||||||
|
for shot, desc, name in _TARGETS:
|
||||||
|
screenshot = _load_screenshot(shot)
|
||||||
|
|
||||||
|
# Citrix
|
||||||
|
img_bytes = base64.b64decode(screenshot)
|
||||||
|
img = Image.open(io.BytesIO(img_bytes))
|
||||||
|
buf = io.BytesIO()
|
||||||
|
img.save(buf, "JPEG", quality=20)
|
||||||
|
citrix = base64.b64encode(buf.getvalue()).decode()
|
||||||
|
|
||||||
|
for approach_fn, approach_name, img_b64 in [
|
||||||
|
(grounding_baseline, "baseline", screenshot),
|
||||||
|
(grounding_zoom, "zoom", screenshot),
|
||||||
|
(grounding_baseline, "citrix_q20", citrix),
|
||||||
|
]:
|
||||||
|
stats = run_benchmark(approach_fn, approach_name, img_b64, desc, 3)
|
||||||
|
stats["target_name"] = name
|
||||||
|
all_results.append(stats)
|
||||||
|
|
||||||
|
# Rapport
|
||||||
|
print("\n" + "=" * 80)
|
||||||
|
print("RAPPORT COMPARATIF — GROUNDING BENCHMARK")
|
||||||
|
print("=" * 80)
|
||||||
|
print(f"{'Cible':<25s} {'Approche':<12s} {'Détect.':<8s} {'Temps':<8s} {'Position':<20s} {'Var X':<8s} {'Var Y':<8s} {'Stable'}")
|
||||||
|
print("-" * 80)
|
||||||
|
for r in all_results:
|
||||||
|
pos = f"({r.get('x_mean',0):.3f}, {r.get('y_mean',0):.3f})" if r.get('x_mean') is not None else "N/A"
|
||||||
|
var_x = f"{r.get('x_variance',0):.4f}" if r.get('x_variance') is not None else "N/A"
|
||||||
|
var_y = f"{r.get('y_variance',0):.4f}" if r.get('y_variance') is not None else "N/A"
|
||||||
|
stable = "OUI" if r.get('stable') else "NON"
|
||||||
|
print(f"{r['target_name']:<25s} {r['approach']:<12s} {r['detection_rate']*100:5.0f}% {r['avg_time_ms']:5.0f}ms {pos:<20s} {var_x:<8s} {var_y:<8s} {stable}")
|
||||||
|
print("=" * 80)
|
||||||
445
tests/visual/test_visual_grounding.py
Normal file
445
tests/visual/test_visual_grounding.py
Normal file
@@ -0,0 +1,445 @@
|
|||||||
|
"""
|
||||||
|
Tests visuels sur captures d'écran réelles — Grounding benchmark.
|
||||||
|
|
||||||
|
Vérifie que le système trouve les bons éléments UI sur des screenshots
|
||||||
|
Windows réels. Pas besoin de VM — juste les images et le serveur.
|
||||||
|
|
||||||
|
Chaque test :
|
||||||
|
1. Charge un screenshot réel (sessions enregistrées)
|
||||||
|
2. Demande au serveur de localiser un élément (via /resolve_target)
|
||||||
|
3. Vérifie que les coordonnées retournées sont dans la zone attendue
|
||||||
|
|
||||||
|
C'est l'apprentissage de l'environnement Windows :
|
||||||
|
- Rechercher un programme
|
||||||
|
- Fermer/réduire/agrandir une fenêtre
|
||||||
|
- Naviguer dans les onglets
|
||||||
|
- Utiliser les menus
|
||||||
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||||
|
if _ROOT not in sys.path:
|
||||||
|
sys.path.insert(0, _ROOT)
|
||||||
|
|
||||||
|
# Répertoire des screenshots de test
|
||||||
|
_SHOTS_DIR = Path(_ROOT) / "data/training/live_sessions/DESKTOP-ST3VBSD_windows/sess_20260404T135010_cec5c8/shots"
|
||||||
|
|
||||||
|
# Résolution des screenshots
|
||||||
|
_SCREEN_W = 1280
|
||||||
|
_SCREEN_H = 800
|
||||||
|
|
||||||
|
|
||||||
|
def _load_screenshot(name: str) -> Optional[str]:
|
||||||
|
"""Charger un screenshot en base64."""
|
||||||
|
path = _SHOTS_DIR / name
|
||||||
|
if not path.is_file():
|
||||||
|
pytest.skip(f"Screenshot {name} non disponible")
|
||||||
|
return base64.b64encode(path.read_bytes()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def _in_zone(x_pct: float, y_pct: float, zone: dict) -> bool:
|
||||||
|
"""Vérifier si un point est dans une zone attendue.
|
||||||
|
|
||||||
|
zone = {"x_min": 0.3, "x_max": 0.5, "y_min": 0.9, "y_max": 1.0}
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
zone["x_min"] <= x_pct <= zone["x_max"]
|
||||||
|
and zone["y_min"] <= y_pct <= zone["y_max"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_via_server(
|
||||||
|
screenshot_b64: str,
|
||||||
|
target_spec: dict,
|
||||||
|
strict: bool = True,
|
||||||
|
) -> Optional[dict]:
|
||||||
|
"""Résoudre une cible visuellement via le VLM (qwen2.5vl grounding direct).
|
||||||
|
|
||||||
|
Appelle qwen2.5vl directement pour le grounding (bbox_2d).
|
||||||
|
Si le VLM ne trouve pas, essaie aussi via l'endpoint serveur.
|
||||||
|
"""
|
||||||
|
import requests
|
||||||
|
import re
|
||||||
|
|
||||||
|
# ── Stratégie 1 : Grounding VLM direct (qwen2.5vl) ──
|
||||||
|
by_text = target_spec.get("by_text", "")
|
||||||
|
vlm_desc = target_spec.get("vlm_description", "")
|
||||||
|
search_text = by_text or vlm_desc
|
||||||
|
|
||||||
|
if search_text:
|
||||||
|
try:
|
||||||
|
prompt = f"Detect the element '{search_text}' with a bounding box."
|
||||||
|
resp = requests.post(
|
||||||
|
"http://localhost:11434/api/chat",
|
||||||
|
json={
|
||||||
|
"model": "qwen2.5vl:7b",
|
||||||
|
"messages": [{"role": "user", "content": prompt, "images": [screenshot_b64]}],
|
||||||
|
"stream": False,
|
||||||
|
"options": {"temperature": 0.0, "num_predict": 100},
|
||||||
|
},
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
if resp.ok:
|
||||||
|
content = resp.json().get("message", {}).get("content", "")
|
||||||
|
# Parser bbox_2d — qwen2.5vl retourne des pixels relatifs à l'image,
|
||||||
|
# PAS une grille 1000x1000.
|
||||||
|
bbox_match = re.search(
|
||||||
|
r'"bbox_2d"\s*:\s*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\]',
|
||||||
|
content,
|
||||||
|
)
|
||||||
|
if bbox_match:
|
||||||
|
x1, y1, x2, y2 = [int(bbox_match.group(i)) for i in range(1, 5)]
|
||||||
|
# Normaliser par les dimensions de l'image (pixels → 0-1)
|
||||||
|
cx = (x1 + x2) / 2 / _SCREEN_W
|
||||||
|
cy = (y1 + y2) / 2 / _SCREEN_H
|
||||||
|
if 0.0 <= cx <= 1.0 and 0.0 <= cy <= 1.0:
|
||||||
|
return {
|
||||||
|
"resolved": True,
|
||||||
|
"method": "vlm_grounding",
|
||||||
|
"x_pct": cx,
|
||||||
|
"y_pct": cy,
|
||||||
|
"score": 0.8,
|
||||||
|
"raw_bbox": [x1, y1, x2, y2],
|
||||||
|
}
|
||||||
|
except requests.Timeout:
|
||||||
|
pytest.skip("qwen2.5vl timeout — premier chargement ?")
|
||||||
|
except requests.ConnectionError:
|
||||||
|
pytest.skip("Ollama non disponible (localhost:11434)")
|
||||||
|
|
||||||
|
# ── Stratégie 2 : Endpoint serveur (fallback) ──
|
||||||
|
token = os.environ.get("RPA_API_TOKEN", "")
|
||||||
|
if not token:
|
||||||
|
env_file = Path(_ROOT) / ".env.local"
|
||||||
|
if env_file.is_file():
|
||||||
|
for line in env_file.read_text().splitlines():
|
||||||
|
if line.startswith("RPA_API_TOKEN="):
|
||||||
|
token = line.split("=", 1)[1].strip()
|
||||||
|
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
if token:
|
||||||
|
headers["Authorization"] = f"Bearer {token}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = requests.post(
|
||||||
|
"http://localhost:5005/api/v1/traces/stream/replay/resolve_target",
|
||||||
|
json={
|
||||||
|
"session_id": "visual_test",
|
||||||
|
"screenshot_b64": screenshot_b64,
|
||||||
|
"target_spec": target_spec,
|
||||||
|
"screen_width": _SCREEN_W,
|
||||||
|
"screen_height": _SCREEN_H,
|
||||||
|
"fallback_x_pct": 0.5,
|
||||||
|
"fallback_y_pct": 0.5,
|
||||||
|
"strict_mode": strict,
|
||||||
|
},
|
||||||
|
headers=headers,
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
if resp.ok:
|
||||||
|
data = resp.json()
|
||||||
|
if data.get("resolved"):
|
||||||
|
return data
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_found_in_zone(result: dict, zone: dict, element_name: str):
|
||||||
|
"""Vérifier qu'un élément a été trouvé dans la zone attendue."""
|
||||||
|
assert result is not None, f"{element_name}: pas de réponse du serveur"
|
||||||
|
assert result.get("resolved"), (
|
||||||
|
f"{element_name}: non trouvé (reason={result.get('reason', '?')})"
|
||||||
|
)
|
||||||
|
x = result.get("x_pct", 0)
|
||||||
|
y = result.get("y_pct", 0)
|
||||||
|
assert _in_zone(x, y, zone), (
|
||||||
|
f"{element_name}: trouvé à ({x:.3f}, {y:.3f}) "
|
||||||
|
f"mais attendu dans zone x=[{zone['x_min']:.2f}-{zone['x_max']:.2f}] "
|
||||||
|
f"y=[{zone['y_min']:.2f}-{zone['y_max']:.2f}]"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# shot_0001 : Explorateur de fichiers Windows
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestExplorateurFichiers:
|
||||||
|
"""Tests sur l'Explorateur de fichiers Windows (shot_0001)."""
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def screenshot(self):
|
||||||
|
return _load_screenshot("shot_0001_full.png")
|
||||||
|
|
||||||
|
def test_trouver_rechercher_taskbar(self, screenshot):
|
||||||
|
"""Trouver 'Rechercher' dans la barre des tâches."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Rechercher",
|
||||||
|
"vlm_description": "La barre de recherche Windows dans la barre des tâches, en bas de l'écran",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.20, "x_max": 0.50,
|
||||||
|
"y_min": 0.90, "y_max": 1.00,
|
||||||
|
}, "Rechercher (taskbar)")
|
||||||
|
|
||||||
|
def test_trouver_bouton_fermer_explorateur(self, screenshot):
|
||||||
|
"""Trouver le bouton X (fermer) de l'Explorateur."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton fermer (X) de la fenêtre Explorateur de fichiers, en haut à droite",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.90, "x_max": 1.00,
|
||||||
|
"y_min": 0.00, "y_max": 0.05,
|
||||||
|
}, "Bouton fermer (X)")
|
||||||
|
|
||||||
|
def test_trouver_bouton_reduire(self, screenshot):
|
||||||
|
"""Trouver le bouton réduire (-) de l'Explorateur."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton réduire (minimize, -) de la fenêtre, en haut à droite à gauche du X",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.85, "x_max": 0.95,
|
||||||
|
"y_min": 0.00, "y_max": 0.05,
|
||||||
|
}, "Bouton réduire (-)")
|
||||||
|
|
||||||
|
def test_trouver_dossier_agent_v1(self, screenshot):
|
||||||
|
"""Trouver le dossier 'agent_v1' dans la liste des fichiers."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "agent_v1",
|
||||||
|
"vlm_description": "Le dossier agent_v1 dans la liste des fichiers de l'Explorateur",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.05, "x_max": 0.50,
|
||||||
|
"y_min": 0.10, "y_max": 0.30,
|
||||||
|
}, "Dossier agent_v1")
|
||||||
|
|
||||||
|
def test_trouver_bouton_demarrer(self, screenshot):
|
||||||
|
"""Trouver le bouton Démarrer (Windows) dans la barre des tâches."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton Démarrer (logo Windows) dans la barre des tâches, en bas",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.18, "x_max": 0.30,
|
||||||
|
"y_min": 0.90, "y_max": 1.00,
|
||||||
|
}, "Bouton Démarrer")
|
||||||
|
|
||||||
|
def test_trouver_ce_pc(self, screenshot):
|
||||||
|
"""Trouver 'Ce PC' dans le panneau latéral de l'Explorateur."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Ce PC",
|
||||||
|
"vlm_description": "L'élément 'Ce PC' dans le panneau de navigation gauche de l'Explorateur",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.00, "x_max": 0.12,
|
||||||
|
"y_min": 0.40, "y_max": 0.55,
|
||||||
|
}, "Ce PC")
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# shot_0004 : Bloc-notes avec onglets + Explorateur derrière
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestBlocNotesOnglets:
|
||||||
|
"""Tests sur le Bloc-notes avec plusieurs onglets (shot_0004)."""
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def screenshot(self):
|
||||||
|
return _load_screenshot("shot_0004_full.png")
|
||||||
|
|
||||||
|
def test_trouver_menu_fichier(self, screenshot):
|
||||||
|
"""Trouver le menu 'Fichier' du Bloc-notes."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Fichier",
|
||||||
|
"vlm_description": "Le menu Fichier dans la barre de menus du Bloc-notes",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.02, "x_max": 0.10,
|
||||||
|
"y_min": 0.08, "y_max": 0.15,
|
||||||
|
}, "Menu Fichier")
|
||||||
|
|
||||||
|
def test_trouver_onglet_ceci_est_un_test(self, screenshot):
|
||||||
|
"""Trouver l'onglet 'Ceci est un test.txt' dans le Bloc-notes."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Ceci est un test",
|
||||||
|
"vlm_description": "L'onglet 'Ceci est un test.txt' dans le Bloc-notes",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.40, "x_max": 0.70,
|
||||||
|
"y_min": 0.03, "y_max": 0.10,
|
||||||
|
}, "Onglet 'Ceci est un test.txt'")
|
||||||
|
|
||||||
|
def test_trouver_nouvel_onglet_plus(self, screenshot):
|
||||||
|
"""Trouver le bouton '+' pour ajouter un nouvel onglet."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton + (plus) pour ajouter un nouvel onglet dans le Bloc-notes, à droite des onglets",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.55, "x_max": 0.70,
|
||||||
|
"y_min": 0.03, "y_max": 0.10,
|
||||||
|
}, "Bouton + (nouvel onglet)")
|
||||||
|
|
||||||
|
def test_trouver_bouton_fermer_onglet(self, screenshot):
|
||||||
|
"""Trouver le X de fermeture de l'onglet actif."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton X pour fermer l'onglet actif 'Ceci est un test.txt' dans le Bloc-notes",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.50, "x_max": 0.65,
|
||||||
|
"y_min": 0.03, "y_max": 0.10,
|
||||||
|
}, "Fermer onglet (X)")
|
||||||
|
|
||||||
|
def test_trouver_menu_modifier(self, screenshot):
|
||||||
|
"""Trouver le menu 'Modifier' du Bloc-notes."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Modifier",
|
||||||
|
"vlm_description": "Le menu Modifier dans la barre de menus du Bloc-notes",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.07, "x_max": 0.16,
|
||||||
|
"y_min": 0.08, "y_max": 0.15,
|
||||||
|
}, "Menu Modifier")
|
||||||
|
|
||||||
|
def test_trouver_encodage_utf8(self, screenshot):
|
||||||
|
"""Trouver l'indicateur d'encodage UTF-8 dans la barre de statut."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "UTF-8",
|
||||||
|
"vlm_description": "L'indicateur d'encodage UTF-8 dans la barre de statut en bas du Bloc-notes",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.60, "x_max": 0.80,
|
||||||
|
"y_min": 0.90, "y_max": 1.00,
|
||||||
|
}, "UTF-8 (barre de statut)")
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# shot_0014 : Google Chrome page d'accueil
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestGoogleChrome:
|
||||||
|
"""Tests sur Google Chrome avec page d'accueil (shot_0014)."""
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def screenshot(self):
|
||||||
|
return _load_screenshot("shot_0014_full.png")
|
||||||
|
|
||||||
|
def test_trouver_barre_recherche_google(self, screenshot):
|
||||||
|
"""Trouver la barre de recherche Google au centre."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Rechercher sur Google",
|
||||||
|
"vlm_description": "La barre de recherche Google au centre de la page d'accueil",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.10, "x_max": 0.60,
|
||||||
|
"y_min": 0.30, "y_max": 0.50,
|
||||||
|
}, "Barre recherche Google")
|
||||||
|
|
||||||
|
def test_trouver_barre_adresse_chrome(self, screenshot):
|
||||||
|
"""Trouver la barre d'adresse de Chrome en haut."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "La barre d'adresse URL de Google Chrome, en haut du navigateur",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.10, "x_max": 0.60,
|
||||||
|
"y_min": 0.05, "y_max": 0.15,
|
||||||
|
}, "Barre d'adresse Chrome")
|
||||||
|
|
||||||
|
def test_trouver_nouvel_onglet_chrome(self, screenshot):
|
||||||
|
"""Trouver le bouton '+' pour un nouvel onglet Chrome."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton + pour ouvrir un nouvel onglet dans Google Chrome",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.15, "x_max": 0.25,
|
||||||
|
"y_min": 0.00, "y_max": 0.06,
|
||||||
|
}, "Nouvel onglet (+) Chrome")
|
||||||
|
|
||||||
|
def test_trouver_fermer_chrome(self, screenshot):
|
||||||
|
"""Trouver le bouton X pour fermer Chrome."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton fermer (X) de la fenêtre Google Chrome, en haut à droite",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.90, "x_max": 1.00,
|
||||||
|
"y_min": 0.00, "y_max": 0.06,
|
||||||
|
}, "Fermer Chrome (X)")
|
||||||
|
|
||||||
|
def test_trouver_gmail(self, screenshot):
|
||||||
|
"""Trouver le lien Gmail sur la page d'accueil Google."""
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Gmail",
|
||||||
|
"vlm_description": "Le lien Gmail en haut à droite de la page Google",
|
||||||
|
})
|
||||||
|
_assert_found_in_zone(result, {
|
||||||
|
"x_min": 0.50, "x_max": 0.80,
|
||||||
|
"y_min": 0.10, "y_max": 0.20,
|
||||||
|
}, "Gmail")
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests transversaux (connaissances de base Windows)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestConnaissancesWindowsBase:
|
||||||
|
"""Connaissances de base Windows que tout utilisateur connaît."""
|
||||||
|
|
||||||
|
def test_rechercher_programme_depuis_explorateur(self):
|
||||||
|
"""Depuis l'Explorateur, trouver la barre de recherche Windows."""
|
||||||
|
screenshot = _load_screenshot("shot_0001_full.png")
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Rechercher",
|
||||||
|
"vlm_description": "La barre de recherche dans la barre des tâches Windows en bas de l'écran",
|
||||||
|
})
|
||||||
|
assert result and result.get("resolved"), "Rechercher non trouvé"
|
||||||
|
|
||||||
|
def test_fermer_programme_depuis_blocnotes(self):
|
||||||
|
"""Depuis le Bloc-notes, trouver le bouton fermer."""
|
||||||
|
screenshot = _load_screenshot("shot_0004_full.png")
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton X pour fermer la fenêtre du Bloc-notes, en haut à droite",
|
||||||
|
})
|
||||||
|
assert result and result.get("resolved"), "Bouton fermer non trouvé"
|
||||||
|
|
||||||
|
def test_ajouter_onglet_blocnotes(self):
|
||||||
|
"""Ajouter un nouvel onglet dans le Bloc-notes."""
|
||||||
|
screenshot = _load_screenshot("shot_0004_full.png")
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "",
|
||||||
|
"vlm_description": "Le bouton + pour ajouter un nouvel onglet dans le Bloc-notes",
|
||||||
|
})
|
||||||
|
assert result and result.get("resolved"), "Bouton + non trouvé"
|
||||||
|
|
||||||
|
def test_rechercher_sur_google(self):
|
||||||
|
"""Taper dans la barre de recherche Google."""
|
||||||
|
screenshot = _load_screenshot("shot_0014_full.png")
|
||||||
|
result = _resolve_via_server(screenshot, {
|
||||||
|
"by_text": "Rechercher sur Google",
|
||||||
|
"vlm_description": "Le champ de recherche Google",
|
||||||
|
})
|
||||||
|
assert result and result.get("resolved"), "Recherche Google non trouvée"
|
||||||
864
tests/visual/test_visual_robustness.py
Normal file
864
tests/visual/test_visual_robustness.py
Normal file
@@ -0,0 +1,864 @@
|
|||||||
|
"""
|
||||||
|
Tests de robustesse visuelle — Grounding VLM qwen2.5vl:7b.
|
||||||
|
|
||||||
|
Objectifs :
|
||||||
|
1. Reproductibilité : même screenshot + même cible → même résultat 10 fois
|
||||||
|
2. Robustesse Citrix : screenshots compressés JPEG qualité 15-25 → ça marche
|
||||||
|
3. Mesure de variance : coordonnées stables à < 5% de l'écran
|
||||||
|
|
||||||
|
Architecture des coordonnées qwen2.5vl :
|
||||||
|
- Format bbox_2d : [x1, y1, x2, y2] en pixels relatifs à l'image envoyée
|
||||||
|
- Pour une image 1280x800, X va de 0 à 1280 et Y de 0 à 800
|
||||||
|
- Normalisation : diviser par les dimensions de l'image (pas par 1000)
|
||||||
|
|
||||||
|
Calibration mesurée (5 avril 2026) sur screenshots 1280x800 :
|
||||||
|
- shot_0001/Rechercher (taskbar) : cx=0.458, cy=0.789
|
||||||
|
- shot_0001/agent_v1 (dossier) : cx=0.247, cy=0.201
|
||||||
|
- shot_0004/Fichier (menu) : cx=0.095, cy=0.086
|
||||||
|
- shot_0004/Modifier (menu) : cx=0.142, cy=0.085
|
||||||
|
- shot_0004/Ceci est un test.txt (onglet): cx=0.694, cy=0.053
|
||||||
|
- shot_0004/Close X (Bloc-notes) : cx=0.990, cy=0.041
|
||||||
|
- shot_0014/Google search (centre) : cx=0.539, cy=0.389
|
||||||
|
- shot_0014/Gmail (haut-droite) : cx=0.913, cy=0.130
|
||||||
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import statistics
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||||
|
if _ROOT not in sys.path:
|
||||||
|
sys.path.insert(0, _ROOT)
|
||||||
|
|
||||||
|
# Répertoire des screenshots de test
|
||||||
|
_SHOTS_DIR = (
|
||||||
|
Path(_ROOT)
|
||||||
|
/ "data/training/live_sessions/DESKTOP-ST3VBSD_windows"
|
||||||
|
/ "sess_20260404T135010_cec5c8/shots"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Résolution des screenshots
|
||||||
|
_SCREEN_W = 1280
|
||||||
|
_SCREEN_H = 800
|
||||||
|
|
||||||
|
# Nombre de répétitions pour les tests de reproductibilité
|
||||||
|
_N_REPEATS = 10
|
||||||
|
|
||||||
|
# Tolérance de variance maximale (en fraction de l'écran, 0.05 = 5%)
|
||||||
|
_MAX_VARIANCE = 0.05
|
||||||
|
|
||||||
|
# Taux de détection minimal (X sur _N_REPEATS)
|
||||||
|
_MIN_DETECTION_RATE = 8
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Utilitaires
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _load_screenshot(name: str) -> Optional[str]:
|
||||||
|
"""Charger un screenshot en base64."""
|
||||||
|
path = _SHOTS_DIR / name
|
||||||
|
if not path.is_file():
|
||||||
|
pytest.skip(f"Screenshot {name} non disponible")
|
||||||
|
return base64.b64encode(path.read_bytes()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def _degrade_citrix(screenshot_b64: str, quality: int = 20) -> str:
|
||||||
|
"""Simuler compression Citrix : JPEG qualité basse puis retour PNG b64."""
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
raw = base64.b64decode(screenshot_b64)
|
||||||
|
img = Image.open(io.BytesIO(raw))
|
||||||
|
|
||||||
|
# Compression JPEG qualité basse (simulation Citrix)
|
||||||
|
buf_jpg = io.BytesIO()
|
||||||
|
img.save(buf_jpg, "JPEG", quality=quality)
|
||||||
|
buf_jpg.seek(0)
|
||||||
|
citrix_img = Image.open(buf_jpg)
|
||||||
|
|
||||||
|
# Re-encoder en PNG pour l'envoi au VLM
|
||||||
|
buf_png = io.BytesIO()
|
||||||
|
citrix_img.save(buf_png, "PNG")
|
||||||
|
return base64.b64encode(buf_png.getvalue()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def _grounding_vlm(
|
||||||
|
screenshot_b64: str,
|
||||||
|
element_description: str,
|
||||||
|
timeout: int = 60,
|
||||||
|
) -> Tuple[Optional[float], Optional[float], Optional[List[int]], str]:
|
||||||
|
"""Appeler qwen2.5vl pour localiser un élément.
|
||||||
|
|
||||||
|
Retourne (cx, cy, [x1,y1,x2,y2], raw_content).
|
||||||
|
cx et cy sont les centres normalisés sur la grille 1000.
|
||||||
|
"""
|
||||||
|
import requests
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = requests.post(
|
||||||
|
"http://localhost:11434/api/chat",
|
||||||
|
json={
|
||||||
|
"model": "qwen2.5vl:7b",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": (
|
||||||
|
f"Detect the element '{element_description}' "
|
||||||
|
f"with a bounding box."
|
||||||
|
),
|
||||||
|
"images": [screenshot_b64],
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"stream": False,
|
||||||
|
"options": {"temperature": 0.1, "num_predict": 100},
|
||||||
|
},
|
||||||
|
timeout=timeout,
|
||||||
|
)
|
||||||
|
except requests.ConnectionError:
|
||||||
|
pytest.skip("Ollama non disponible (localhost:11434)")
|
||||||
|
except requests.Timeout:
|
||||||
|
pytest.skip("qwen2.5vl timeout — modèle en cours de chargement ?")
|
||||||
|
|
||||||
|
content = resp.json().get("message", {}).get("content", "")
|
||||||
|
|
||||||
|
# Parser bbox_2d depuis la réponse JSON
|
||||||
|
# qwen2.5vl retourne des coordonnées en pixels relatifs à l'image envoyée,
|
||||||
|
# PAS sur une grille 1000x1000.
|
||||||
|
bbox_match = re.search(
|
||||||
|
r'"bbox_2d"\s*:\s*\[(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\]',
|
||||||
|
content,
|
||||||
|
)
|
||||||
|
if bbox_match:
|
||||||
|
x1, y1, x2, y2 = [int(bbox_match.group(i)) for i in range(1, 5)]
|
||||||
|
# Normaliser par les dimensions de l'image (pixels → 0-1)
|
||||||
|
cx = (x1 + x2) / 2 / _SCREEN_W
|
||||||
|
cy = (y1 + y2) / 2 / _SCREEN_H
|
||||||
|
return cx, cy, [x1, y1, x2, y2], content
|
||||||
|
|
||||||
|
return None, None, None, content
|
||||||
|
|
||||||
|
|
||||||
|
def _run_n_times(
|
||||||
|
screenshot_b64: str,
|
||||||
|
description: str,
|
||||||
|
n: int = _N_REPEATS,
|
||||||
|
delay: float = 0.2,
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""Exécuter le grounding N fois et collecter les résultats."""
|
||||||
|
results = []
|
||||||
|
for i in range(n):
|
||||||
|
cx, cy, bbox, raw = _grounding_vlm(screenshot_b64, description)
|
||||||
|
results.append({
|
||||||
|
"run": i + 1,
|
||||||
|
"cx": cx,
|
||||||
|
"cy": cy,
|
||||||
|
"bbox": bbox,
|
||||||
|
"detected": cx is not None,
|
||||||
|
"raw": raw,
|
||||||
|
})
|
||||||
|
if i < n - 1:
|
||||||
|
time.sleep(delay)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def _compute_stats(results: List[Dict]) -> Dict:
|
||||||
|
"""Calculer les statistiques de détection et de variance."""
|
||||||
|
detected = [r for r in results if r["detected"]]
|
||||||
|
n_total = len(results)
|
||||||
|
n_detected = len(detected)
|
||||||
|
|
||||||
|
stats = {
|
||||||
|
"total": n_total,
|
||||||
|
"detected": n_detected,
|
||||||
|
"rate": n_detected / n_total if n_total > 0 else 0,
|
||||||
|
"rate_str": f"{n_detected}/{n_total}",
|
||||||
|
}
|
||||||
|
|
||||||
|
if n_detected >= 2:
|
||||||
|
xs = [r["cx"] for r in detected]
|
||||||
|
ys = [r["cy"] for r in detected]
|
||||||
|
stats.update({
|
||||||
|
"x_min": min(xs),
|
||||||
|
"x_max": max(xs),
|
||||||
|
"x_mean": statistics.mean(xs),
|
||||||
|
"x_range": max(xs) - min(xs),
|
||||||
|
"x_stdev": statistics.stdev(xs) if n_detected >= 2 else 0,
|
||||||
|
"y_min": min(ys),
|
||||||
|
"y_max": max(ys),
|
||||||
|
"y_mean": statistics.mean(ys),
|
||||||
|
"y_range": max(ys) - min(ys),
|
||||||
|
"y_stdev": statistics.stdev(ys) if n_detected >= 2 else 0,
|
||||||
|
})
|
||||||
|
elif n_detected == 1:
|
||||||
|
stats.update({
|
||||||
|
"x_min": detected[0]["cx"],
|
||||||
|
"x_max": detected[0]["cx"],
|
||||||
|
"x_mean": detected[0]["cx"],
|
||||||
|
"x_range": 0,
|
||||||
|
"x_stdev": 0,
|
||||||
|
"y_min": detected[0]["cy"],
|
||||||
|
"y_max": detected[0]["cy"],
|
||||||
|
"y_mean": detected[0]["cy"],
|
||||||
|
"y_range": 0,
|
||||||
|
"y_stdev": 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_reproducible(
|
||||||
|
stats: Dict,
|
||||||
|
element_name: str,
|
||||||
|
min_rate: int = _MIN_DETECTION_RATE,
|
||||||
|
max_var: float = _MAX_VARIANCE,
|
||||||
|
):
|
||||||
|
"""Vérifier la reproductibilité : taux de détection + variance faible."""
|
||||||
|
assert stats["detected"] >= min_rate, (
|
||||||
|
f"{element_name}: seulement {stats['rate_str']} détections "
|
||||||
|
f"(minimum requis: {min_rate}/{stats['total']})"
|
||||||
|
)
|
||||||
|
|
||||||
|
if stats["detected"] >= 2:
|
||||||
|
assert stats["x_range"] < max_var, (
|
||||||
|
f"{element_name}: variance X trop élevée: "
|
||||||
|
f"{stats['x_range']:.4f} (max={max_var})"
|
||||||
|
)
|
||||||
|
assert stats["y_range"] < max_var, (
|
||||||
|
f"{element_name}: variance Y trop élevée: "
|
||||||
|
f"{stats['y_range']:.4f} (max={max_var})"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_in_zone(
|
||||||
|
stats: Dict,
|
||||||
|
zone: Dict[str, float],
|
||||||
|
element_name: str,
|
||||||
|
):
|
||||||
|
"""Vérifier que la position moyenne est dans la zone attendue."""
|
||||||
|
assert stats["detected"] >= 1, f"{element_name}: aucune détection"
|
||||||
|
cx = stats["x_mean"]
|
||||||
|
cy = stats["y_mean"]
|
||||||
|
assert zone["x_min"] <= cx <= zone["x_max"], (
|
||||||
|
f"{element_name}: X moyen {cx:.4f} hors zone "
|
||||||
|
f"[{zone['x_min']:.2f}-{zone['x_max']:.2f}]"
|
||||||
|
)
|
||||||
|
assert zone["y_min"] <= cy <= zone["y_max"], (
|
||||||
|
f"{element_name}: Y moyen {cy:.4f} hors zone "
|
||||||
|
f"[{zone['y_min']:.2f}-{zone['y_max']:.2f}]"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Zones calibrées (mesurées le 5 avril 2026)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
CALIBRATED_ZONES = {
|
||||||
|
# shot_0001 — Explorateur de fichiers Windows
|
||||||
|
"rechercher_taskbar": {
|
||||||
|
"x_min": 0.40, "x_max": 0.60,
|
||||||
|
"y_min": 0.74, "y_max": 0.84,
|
||||||
|
},
|
||||||
|
"agent_v1_folder": {
|
||||||
|
"x_min": 0.18, "x_max": 0.30,
|
||||||
|
"y_min": 0.16, "y_max": 0.26,
|
||||||
|
},
|
||||||
|
# shot_0004 — Bloc-notes avec onglets
|
||||||
|
"fichier_menu": {
|
||||||
|
"x_min": 0.06, "x_max": 0.13,
|
||||||
|
"y_min": 0.06, "y_max": 0.12,
|
||||||
|
},
|
||||||
|
"modifier_menu": {
|
||||||
|
"x_min": 0.11, "x_max": 0.18,
|
||||||
|
"y_min": 0.06, "y_max": 0.12,
|
||||||
|
},
|
||||||
|
"ceci_est_un_test_tab": {
|
||||||
|
"x_min": 0.65, "x_max": 0.75,
|
||||||
|
"y_min": 0.03, "y_max": 0.08,
|
||||||
|
},
|
||||||
|
"close_x_notepad": {
|
||||||
|
"x_min": 0.95, "x_max": 1.02,
|
||||||
|
"y_min": 0.02, "y_max": 0.06,
|
||||||
|
},
|
||||||
|
# shot_0014 — Google Chrome
|
||||||
|
"google_search_bar": {
|
||||||
|
"x_min": 0.48, "x_max": 0.60,
|
||||||
|
"y_min": 0.35, "y_max": 0.43,
|
||||||
|
},
|
||||||
|
"gmail_link": {
|
||||||
|
"x_min": 0.87, "x_max": 0.95,
|
||||||
|
"y_min": 0.10, "y_max": 0.16,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests de reproductibilité — 10 appels consécutifs
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestReproductibilite:
|
||||||
|
"""Chaque test appelle le VLM 10 fois et vérifie la cohérence.
|
||||||
|
|
||||||
|
Critères de réussite :
|
||||||
|
- Au moins 8/10 détections
|
||||||
|
- Variance des coordonnées < 5% de l'écran sur chaque axe
|
||||||
|
- Position moyenne dans la zone calibrée
|
||||||
|
"""
|
||||||
|
|
||||||
|
# -- shot_0001 : Explorateur de fichiers --
|
||||||
|
|
||||||
|
@pytest.fixture(scope="class")
|
||||||
|
def shot_0001(self):
|
||||||
|
return _load_screenshot("shot_0001_full.png")
|
||||||
|
|
||||||
|
def test_rechercher_10_fois(self, shot_0001):
|
||||||
|
"""Le VLM trouve 'Rechercher' au même endroit 10 fois de suite."""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0001,
|
||||||
|
"the 'Rechercher' search text in the Windows taskbar at the bottom",
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
_assert_reproducible(stats, "Rechercher (taskbar)")
|
||||||
|
_assert_in_zone(stats, CALIBRATED_ZONES["rechercher_taskbar"], "Rechercher")
|
||||||
|
# Afficher le résumé pour le rapport
|
||||||
|
print(f"\n [Rechercher] {stats['rate_str']} détections, "
|
||||||
|
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||||
|
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||||
|
|
||||||
|
def test_agent_v1_10_fois(self, shot_0001):
|
||||||
|
"""Le VLM trouve le dossier 'agent_v1' au même endroit 10 fois."""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0001,
|
||||||
|
"the folder named 'agent_v1' in the file list",
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
_assert_reproducible(stats, "agent_v1 (dossier)")
|
||||||
|
_assert_in_zone(stats, CALIBRATED_ZONES["agent_v1_folder"], "agent_v1")
|
||||||
|
print(f"\n [agent_v1] {stats['rate_str']} détections, "
|
||||||
|
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||||
|
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||||
|
|
||||||
|
def test_close_x_explorateur_10_fois(self, shot_0001):
|
||||||
|
"""Le bouton X de la fenêtre maximisée : overflow X attendu.
|
||||||
|
|
||||||
|
Ce test vérifie que le VLM détecte bien le bouton X de façon cohérente.
|
||||||
|
Sur les fenêtres maximisées (1280px de large), les coordonnées X
|
||||||
|
dépassent la grille 1000 normalisée (cx > 1.0).
|
||||||
|
|
||||||
|
Note : le VLM peut parfois confondre le bouton X de la fenêtre avec
|
||||||
|
celui de l'onglet (ambiguïté multiple close buttons). On vérifie
|
||||||
|
que la majorité des détections ciblent le bon bouton.
|
||||||
|
"""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0001,
|
||||||
|
"the X close button of the 'Lea' window",
|
||||||
|
)
|
||||||
|
# Vérifier que le VLM détecte bien quelque chose
|
||||||
|
detected = [r for r in results if r["detected"]]
|
||||||
|
assert len(detected) >= _MIN_DETECTION_RATE, (
|
||||||
|
f"Close X: seulement {len(detected)}/{len(results)} détections"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Classer les détections : overflow (bouton fenêtre) vs non-overflow (bouton onglet)
|
||||||
|
overflows = [r for r in detected if r["cx"] > 1.0]
|
||||||
|
non_overflows = [r for r in detected if r["cx"] <= 1.0]
|
||||||
|
|
||||||
|
# Au moins 60% des détections doivent viser le bouton fenêtre (overflow)
|
||||||
|
assert len(overflows) >= len(detected) * 0.6, (
|
||||||
|
f"Close X: seulement {len(overflows)}/{len(detected)} en overflow. "
|
||||||
|
f"Ambiguïté avec bouton onglet ({len(non_overflows)} non-overflow)."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Vérifier la cohérence des détections overflow (le cluster principal)
|
||||||
|
if len(overflows) >= 2:
|
||||||
|
bboxes = [r["bbox"] for r in overflows]
|
||||||
|
x1s = [b[0] for b in bboxes]
|
||||||
|
y1s = [b[1] for b in bboxes]
|
||||||
|
assert max(x1s) - min(x1s) < 20, (
|
||||||
|
f"Close X overflow: x1 trop variable: {min(x1s)}-{max(x1s)}"
|
||||||
|
)
|
||||||
|
assert max(y1s) - min(y1s) < 20, (
|
||||||
|
f"Close X overflow: y1 trop variable: {min(y1s)}-{max(y1s)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"\n [Close X Explorer] {len(detected)}/{len(results)} détections, "
|
||||||
|
f"{len(overflows)} overflow (fenêtre), {len(non_overflows)} non-overflow (onglet). "
|
||||||
|
f"cx_mean_overflow={statistics.mean([r['cx'] for r in overflows]):.4f}" if overflows else "")
|
||||||
|
|
||||||
|
# -- shot_0004 : Bloc-notes --
|
||||||
|
|
||||||
|
@pytest.fixture(scope="class")
|
||||||
|
def shot_0004(self):
|
||||||
|
return _load_screenshot("shot_0004_full.png")
|
||||||
|
|
||||||
|
def test_fichier_10_fois(self, shot_0004):
|
||||||
|
"""Le VLM trouve le menu 'Fichier' au même endroit 10 fois."""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0004,
|
||||||
|
"the 'Fichier' menu item in the menu bar",
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
_assert_reproducible(stats, "Fichier (menu)")
|
||||||
|
_assert_in_zone(stats, CALIBRATED_ZONES["fichier_menu"], "Fichier")
|
||||||
|
print(f"\n [Fichier] {stats['rate_str']} détections, "
|
||||||
|
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||||
|
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||||
|
|
||||||
|
def test_modifier_10_fois(self, shot_0004):
|
||||||
|
"""Le VLM trouve le menu 'Modifier' au même endroit 10 fois."""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0004,
|
||||||
|
"the 'Modifier' menu item in the menu bar",
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
_assert_reproducible(stats, "Modifier (menu)")
|
||||||
|
_assert_in_zone(stats, CALIBRATED_ZONES["modifier_menu"], "Modifier")
|
||||||
|
print(f"\n [Modifier] {stats['rate_str']} détections, "
|
||||||
|
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||||
|
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||||
|
|
||||||
|
def test_ceci_est_un_test_10_fois(self, shot_0004):
|
||||||
|
"""Le VLM trouve l'onglet 'Ceci est un test.txt' au même endroit 10 fois."""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0004,
|
||||||
|
"the tab labeled 'Ceci est un test.txt'",
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
_assert_reproducible(stats, "Ceci est un test.txt (onglet)")
|
||||||
|
_assert_in_zone(stats, CALIBRATED_ZONES["ceci_est_un_test_tab"], "Ceci est un test.txt")
|
||||||
|
print(f"\n [Ceci est un test.txt] {stats['rate_str']} détections, "
|
||||||
|
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||||
|
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||||
|
|
||||||
|
# -- shot_0014 : Google Chrome --
|
||||||
|
|
||||||
|
@pytest.fixture(scope="class")
|
||||||
|
def shot_0014(self):
|
||||||
|
return _load_screenshot("shot_0014_full.png")
|
||||||
|
|
||||||
|
def test_google_search_10_fois(self, shot_0014):
|
||||||
|
"""Le VLM trouve la barre de recherche Google au même endroit 10 fois."""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0014,
|
||||||
|
"the Google search bar 'Rechercher sur Google ou saisir une URL'",
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
_assert_reproducible(stats, "Recherche Google")
|
||||||
|
_assert_in_zone(stats, CALIBRATED_ZONES["google_search_bar"], "Recherche Google")
|
||||||
|
print(f"\n [Google search] {stats['rate_str']} détections, "
|
||||||
|
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||||
|
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||||
|
|
||||||
|
def test_gmail_10_fois(self, shot_0014):
|
||||||
|
"""Le VLM trouve le lien Gmail au même endroit 10 fois."""
|
||||||
|
results = _run_n_times(
|
||||||
|
shot_0014,
|
||||||
|
"the 'Gmail' link at the top of the page",
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
_assert_reproducible(stats, "Gmail")
|
||||||
|
_assert_in_zone(stats, CALIBRATED_ZONES["gmail_link"], "Gmail")
|
||||||
|
print(f"\n [Gmail] {stats['rate_str']} détections, "
|
||||||
|
f"X=[{stats.get('x_min', 0):.4f}-{stats.get('x_max', 0):.4f}], "
|
||||||
|
f"Y=[{stats.get('y_min', 0):.4f}-{stats.get('y_max', 0):.4f}]")
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests de robustesse Citrix — JPEG dégradé
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestCitrixRobustesse:
|
||||||
|
"""Vérifier que le grounding fonctionne sur des images compressées.
|
||||||
|
|
||||||
|
Simule un environnement Citrix/RDP avec compression JPEG qualité 15-25.
|
||||||
|
Compare les résultats original vs dégradé.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@pytest.fixture(scope="class")
|
||||||
|
def shots_original(self):
|
||||||
|
return {
|
||||||
|
"shot_0001": _load_screenshot("shot_0001_full.png"),
|
||||||
|
"shot_0004": _load_screenshot("shot_0004_full.png"),
|
||||||
|
"shot_0014": _load_screenshot("shot_0014_full.png"),
|
||||||
|
}
|
||||||
|
|
||||||
|
@pytest.fixture(scope="class")
|
||||||
|
def shots_citrix(self, shots_original):
|
||||||
|
return {
|
||||||
|
name: _degrade_citrix(b64, quality=20)
|
||||||
|
for name, b64 in shots_original.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
def _compare_original_vs_citrix(
|
||||||
|
self,
|
||||||
|
original_b64: str,
|
||||||
|
citrix_b64: str,
|
||||||
|
description: str,
|
||||||
|
element_name: str,
|
||||||
|
zone: Dict,
|
||||||
|
n_runs: int = 5,
|
||||||
|
) -> Dict:
|
||||||
|
"""Comparer les résultats original vs Citrix."""
|
||||||
|
# 5 runs sur l'original
|
||||||
|
results_orig = _run_n_times(original_b64, description, n=n_runs, delay=0.2)
|
||||||
|
stats_orig = _compute_stats(results_orig)
|
||||||
|
|
||||||
|
# 5 runs sur le Citrix
|
||||||
|
results_citrix = _run_n_times(citrix_b64, description, n=n_runs, delay=0.2)
|
||||||
|
stats_citrix = _compute_stats(results_citrix)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"original": stats_orig,
|
||||||
|
"citrix": stats_citrix,
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_rechercher_citrix(self, shots_original, shots_citrix):
|
||||||
|
"""'Rechercher' détecté malgré compression JPEG Q20."""
|
||||||
|
comp = self._compare_original_vs_citrix(
|
||||||
|
shots_original["shot_0001"],
|
||||||
|
shots_citrix["shot_0001"],
|
||||||
|
"the 'Rechercher' search text in the Windows taskbar at the bottom",
|
||||||
|
"Rechercher",
|
||||||
|
CALIBRATED_ZONES["rechercher_taskbar"],
|
||||||
|
)
|
||||||
|
# Au moins 3/5 détections sur Citrix
|
||||||
|
assert comp["citrix"]["detected"] >= 3, (
|
||||||
|
f"Citrix Rechercher: seulement {comp['citrix']['rate_str']} détections"
|
||||||
|
)
|
||||||
|
# Position dans la zone calibrée
|
||||||
|
if comp["citrix"]["detected"] >= 1:
|
||||||
|
_assert_in_zone(comp["citrix"], CALIBRATED_ZONES["rechercher_taskbar"], "Rechercher (Citrix)")
|
||||||
|
print(f"\n [Rechercher Citrix] orig={comp['original']['rate_str']}, "
|
||||||
|
f"citrix={comp['citrix']['rate_str']}")
|
||||||
|
|
||||||
|
def test_fichier_citrix(self, shots_original, shots_citrix):
|
||||||
|
"""Menu 'Fichier' détecté malgré compression JPEG Q20."""
|
||||||
|
comp = self._compare_original_vs_citrix(
|
||||||
|
shots_original["shot_0004"],
|
||||||
|
shots_citrix["shot_0004"],
|
||||||
|
"the 'Fichier' menu item in the menu bar",
|
||||||
|
"Fichier",
|
||||||
|
CALIBRATED_ZONES["fichier_menu"],
|
||||||
|
)
|
||||||
|
assert comp["citrix"]["detected"] >= 3, (
|
||||||
|
f"Citrix Fichier: seulement {comp['citrix']['rate_str']} détections"
|
||||||
|
)
|
||||||
|
if comp["citrix"]["detected"] >= 1:
|
||||||
|
_assert_in_zone(comp["citrix"], CALIBRATED_ZONES["fichier_menu"], "Fichier (Citrix)")
|
||||||
|
print(f"\n [Fichier Citrix] orig={comp['original']['rate_str']}, "
|
||||||
|
f"citrix={comp['citrix']['rate_str']}")
|
||||||
|
|
||||||
|
def test_ceci_est_un_test_citrix(self, shots_original, shots_citrix):
|
||||||
|
"""Onglet 'Ceci est un test.txt' détecté malgré compression JPEG Q20."""
|
||||||
|
comp = self._compare_original_vs_citrix(
|
||||||
|
shots_original["shot_0004"],
|
||||||
|
shots_citrix["shot_0004"],
|
||||||
|
"the tab labeled 'Ceci est un test.txt'",
|
||||||
|
"Ceci est un test.txt",
|
||||||
|
CALIBRATED_ZONES["ceci_est_un_test_tab"],
|
||||||
|
)
|
||||||
|
assert comp["citrix"]["detected"] >= 3, (
|
||||||
|
f"Citrix tab: seulement {comp['citrix']['rate_str']} détections"
|
||||||
|
)
|
||||||
|
if comp["citrix"]["detected"] >= 1:
|
||||||
|
_assert_in_zone(
|
||||||
|
comp["citrix"],
|
||||||
|
CALIBRATED_ZONES["ceci_est_un_test_tab"],
|
||||||
|
"Ceci est un test.txt (Citrix)",
|
||||||
|
)
|
||||||
|
print(f"\n [Ceci est un test.txt Citrix] orig={comp['original']['rate_str']}, "
|
||||||
|
f"citrix={comp['citrix']['rate_str']}")
|
||||||
|
|
||||||
|
def test_google_search_citrix(self, shots_original, shots_citrix):
|
||||||
|
"""Barre de recherche Google détectée malgré compression JPEG Q20."""
|
||||||
|
comp = self._compare_original_vs_citrix(
|
||||||
|
shots_original["shot_0014"],
|
||||||
|
shots_citrix["shot_0014"],
|
||||||
|
"the Google search bar 'Rechercher sur Google ou saisir une URL'",
|
||||||
|
"Recherche Google",
|
||||||
|
CALIBRATED_ZONES["google_search_bar"],
|
||||||
|
)
|
||||||
|
assert comp["citrix"]["detected"] >= 3, (
|
||||||
|
f"Citrix Google: seulement {comp['citrix']['rate_str']} détections"
|
||||||
|
)
|
||||||
|
if comp["citrix"]["detected"] >= 1:
|
||||||
|
_assert_in_zone(
|
||||||
|
comp["citrix"],
|
||||||
|
CALIBRATED_ZONES["google_search_bar"],
|
||||||
|
"Recherche Google (Citrix)",
|
||||||
|
)
|
||||||
|
print(f"\n [Google search Citrix] orig={comp['original']['rate_str']}, "
|
||||||
|
f"citrix={comp['citrix']['rate_str']}")
|
||||||
|
|
||||||
|
def test_gmail_citrix(self, shots_original, shots_citrix):
|
||||||
|
"""Lien Gmail détecté malgré compression JPEG Q20."""
|
||||||
|
comp = self._compare_original_vs_citrix(
|
||||||
|
shots_original["shot_0014"],
|
||||||
|
shots_citrix["shot_0014"],
|
||||||
|
"the 'Gmail' link at the top of the page",
|
||||||
|
"Gmail",
|
||||||
|
CALIBRATED_ZONES["gmail_link"],
|
||||||
|
)
|
||||||
|
assert comp["citrix"]["detected"] >= 3, (
|
||||||
|
f"Citrix Gmail: seulement {comp['citrix']['rate_str']} détections"
|
||||||
|
)
|
||||||
|
if comp["citrix"]["detected"] >= 1:
|
||||||
|
_assert_in_zone(comp["citrix"], CALIBRATED_ZONES["gmail_link"], "Gmail (Citrix)")
|
||||||
|
print(f"\n [Gmail Citrix] orig={comp['original']['rate_str']}, "
|
||||||
|
f"citrix={comp['citrix']['rate_str']}")
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Tests de dégradation progressive — qualité JPEG 50 → 15 → 5
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestDegradationProgressive:
|
||||||
|
"""Mesurer à partir de quelle qualité JPEG le grounding échoue."""
|
||||||
|
|
||||||
|
@pytest.fixture(scope="class")
|
||||||
|
def shot_0004(self):
|
||||||
|
return _load_screenshot("shot_0004_full.png")
|
||||||
|
|
||||||
|
def test_fichier_degradation_progressive(self, shot_0004):
|
||||||
|
"""Fichier menu : tester JPEG Q50, Q25, Q15, Q10, Q5."""
|
||||||
|
qualities = [50, 25, 15, 10, 5]
|
||||||
|
results_by_quality = {}
|
||||||
|
|
||||||
|
for q in qualities:
|
||||||
|
degraded = _degrade_citrix(shot_0004, quality=q)
|
||||||
|
results = _run_n_times(
|
||||||
|
degraded,
|
||||||
|
"the 'Fichier' menu item in the menu bar",
|
||||||
|
n=3,
|
||||||
|
delay=0.2,
|
||||||
|
)
|
||||||
|
stats = _compute_stats(results)
|
||||||
|
results_by_quality[q] = stats
|
||||||
|
|
||||||
|
# Afficher le rapport de dégradation
|
||||||
|
print("\n === Dégradation progressive : Fichier menu ===")
|
||||||
|
for q in qualities:
|
||||||
|
s = results_by_quality[q]
|
||||||
|
zone_ok = ""
|
||||||
|
if s["detected"] >= 1:
|
||||||
|
cx = s["x_mean"]
|
||||||
|
cy = s["y_mean"]
|
||||||
|
z = CALIBRATED_ZONES["fichier_menu"]
|
||||||
|
in_zone = z["x_min"] <= cx <= z["x_max"] and z["y_min"] <= cy <= z["y_max"]
|
||||||
|
zone_ok = " (in zone)" if in_zone else f" (HORS zone: {cx:.3f},{cy:.3f})"
|
||||||
|
print(f" Q{q:>2}: {s['rate_str']} détections{zone_ok}")
|
||||||
|
|
||||||
|
# Au moins Q50 et Q25 doivent fonctionner
|
||||||
|
assert results_by_quality[50]["detected"] >= 2, "Q50 devrait fonctionner"
|
||||||
|
assert results_by_quality[25]["detected"] >= 2, "Q25 devrait fonctionner"
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Rapport final — exécuté en dernier, résume tout
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.visual
|
||||||
|
class TestRapportFinal:
|
||||||
|
"""Rapport complet des capacités de grounding VLM.
|
||||||
|
|
||||||
|
Ce test exécute une batterie de détections et produit un rapport
|
||||||
|
structuré avec taux de détection, variance, et comparaison Citrix.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_rapport_complet(self):
|
||||||
|
"""Génère le rapport final de robustesse du grounding VLM."""
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
shots = {
|
||||||
|
"shot_0001": _load_screenshot("shot_0001_full.png"),
|
||||||
|
"shot_0004": _load_screenshot("shot_0004_full.png"),
|
||||||
|
"shot_0014": _load_screenshot("shot_0014_full.png"),
|
||||||
|
}
|
||||||
|
|
||||||
|
targets = [
|
||||||
|
("shot_0001", "Rechercher (taskbar)",
|
||||||
|
"the 'Rechercher' search text in the Windows taskbar at the bottom",
|
||||||
|
CALIBRATED_ZONES["rechercher_taskbar"]),
|
||||||
|
("shot_0001", "agent_v1 (dossier)",
|
||||||
|
"the folder named 'agent_v1' in the file list",
|
||||||
|
CALIBRATED_ZONES["agent_v1_folder"]),
|
||||||
|
("shot_0004", "Fichier (menu)",
|
||||||
|
"the 'Fichier' menu item in the menu bar",
|
||||||
|
CALIBRATED_ZONES["fichier_menu"]),
|
||||||
|
("shot_0004", "Modifier (menu)",
|
||||||
|
"the 'Modifier' menu item in the menu bar",
|
||||||
|
CALIBRATED_ZONES["modifier_menu"]),
|
||||||
|
("shot_0004", "Ceci est un test.txt (onglet)",
|
||||||
|
"the tab labeled 'Ceci est un test.txt'",
|
||||||
|
CALIBRATED_ZONES["ceci_est_un_test_tab"]),
|
||||||
|
("shot_0004", "Close X (Bloc-notes)",
|
||||||
|
"the close button X of the Notepad window at the top right",
|
||||||
|
CALIBRATED_ZONES["close_x_notepad"]),
|
||||||
|
("shot_0014", "Recherche Google (barre)",
|
||||||
|
"the Google search bar 'Rechercher sur Google ou saisir une URL'",
|
||||||
|
CALIBRATED_ZONES["google_search_bar"]),
|
||||||
|
("shot_0014", "Gmail (lien)",
|
||||||
|
"the 'Gmail' link at the top of the page",
|
||||||
|
CALIBRATED_ZONES["gmail_link"]),
|
||||||
|
]
|
||||||
|
|
||||||
|
report_lines = [
|
||||||
|
"",
|
||||||
|
"=" * 80,
|
||||||
|
"RAPPORT DE ROBUSTESSE — Grounding VLM qwen2.5vl:7b",
|
||||||
|
f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}",
|
||||||
|
f"Screenshots: 1280x800 (3 images, {len(targets)} cibles)",
|
||||||
|
f"Répétitions: 5 par cible (original + Citrix Q20)",
|
||||||
|
"=" * 80,
|
||||||
|
"",
|
||||||
|
"--- ORIGINAL (PNG) ---",
|
||||||
|
f"{'Élément':<35} {'Taux':>6} {'X moy':>8} {'Y moy':>8} "
|
||||||
|
f"{'Var X':>8} {'Var Y':>8} {'Zone':>6}",
|
||||||
|
"-" * 80,
|
||||||
|
]
|
||||||
|
|
||||||
|
all_original_stats = []
|
||||||
|
all_citrix_stats = []
|
||||||
|
|
||||||
|
for shot_name, label, desc, zone in targets:
|
||||||
|
# Original : 5 runs
|
||||||
|
results_orig = _run_n_times(shots[shot_name], desc, n=5, delay=0.2)
|
||||||
|
stats_orig = _compute_stats(results_orig)
|
||||||
|
all_original_stats.append((label, stats_orig, zone))
|
||||||
|
|
||||||
|
in_zone = "?"
|
||||||
|
if stats_orig["detected"] >= 1:
|
||||||
|
cx, cy = stats_orig["x_mean"], stats_orig["y_mean"]
|
||||||
|
ok = (zone["x_min"] <= cx <= zone["x_max"]
|
||||||
|
and zone["y_min"] <= cy <= zone["y_max"])
|
||||||
|
in_zone = "OK" if ok else "HORS"
|
||||||
|
|
||||||
|
report_lines.append(
|
||||||
|
f"{label:<35} {stats_orig['rate_str']:>6} "
|
||||||
|
f"{stats_orig.get('x_mean', 0):>8.4f} "
|
||||||
|
f"{stats_orig.get('y_mean', 0):>8.4f} "
|
||||||
|
f"{stats_orig.get('x_range', 0):>8.4f} "
|
||||||
|
f"{stats_orig.get('y_range', 0):>8.4f} "
|
||||||
|
f"{in_zone:>6}"
|
||||||
|
)
|
||||||
|
|
||||||
|
report_lines.extend([
|
||||||
|
"",
|
||||||
|
"--- CITRIX (JPEG Q20) ---",
|
||||||
|
f"{'Élément':<35} {'Taux':>6} {'X moy':>8} {'Y moy':>8} "
|
||||||
|
f"{'Var X':>8} {'Var Y':>8} {'Zone':>6} {'Écart orig':>10}",
|
||||||
|
"-" * 90,
|
||||||
|
])
|
||||||
|
|
||||||
|
for i, (shot_name, label, desc, zone) in enumerate(targets):
|
||||||
|
citrix_b64 = _degrade_citrix(shots[shot_name], quality=20)
|
||||||
|
results_citrix = _run_n_times(citrix_b64, desc, n=5, delay=0.2)
|
||||||
|
stats_citrix = _compute_stats(results_citrix)
|
||||||
|
all_citrix_stats.append((label, stats_citrix, zone))
|
||||||
|
|
||||||
|
in_zone = "?"
|
||||||
|
ecart = "N/A"
|
||||||
|
if stats_citrix["detected"] >= 1:
|
||||||
|
cx, cy = stats_citrix["x_mean"], stats_citrix["y_mean"]
|
||||||
|
ok = (zone["x_min"] <= cx <= zone["x_max"]
|
||||||
|
and zone["y_min"] <= cy <= zone["y_max"])
|
||||||
|
in_zone = "OK" if ok else "HORS"
|
||||||
|
|
||||||
|
# Calculer l'écart avec l'original
|
||||||
|
orig_stats = all_original_stats[i][1]
|
||||||
|
if orig_stats["detected"] >= 1:
|
||||||
|
dx = abs(cx - orig_stats["x_mean"])
|
||||||
|
dy = abs(cy - orig_stats["y_mean"])
|
||||||
|
ecart = f"{dx:.4f}/{dy:.4f}"
|
||||||
|
|
||||||
|
report_lines.append(
|
||||||
|
f"{label:<35} {stats_citrix['rate_str']:>6} "
|
||||||
|
f"{stats_citrix.get('x_mean', 0):>8.4f} "
|
||||||
|
f"{stats_citrix.get('y_mean', 0):>8.4f} "
|
||||||
|
f"{stats_citrix.get('x_range', 0):>8.4f} "
|
||||||
|
f"{stats_citrix.get('y_range', 0):>8.4f} "
|
||||||
|
f"{in_zone:>6} {ecart:>10}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Résumé
|
||||||
|
orig_total = sum(s["detected"] for _, s, _ in all_original_stats)
|
||||||
|
orig_max = sum(s["total"] for _, s, _ in all_original_stats)
|
||||||
|
citrix_total = sum(s["detected"] for _, s, _ in all_citrix_stats)
|
||||||
|
citrix_max = sum(s["total"] for _, s, _ in all_citrix_stats)
|
||||||
|
|
||||||
|
orig_in_zone = sum(
|
||||||
|
1 for _, s, z in all_original_stats
|
||||||
|
if s["detected"] >= 1
|
||||||
|
and z["x_min"] <= s["x_mean"] <= z["x_max"]
|
||||||
|
and z["y_min"] <= s["y_mean"] <= z["y_max"]
|
||||||
|
)
|
||||||
|
citrix_in_zone = sum(
|
||||||
|
1 for _, s, z in all_citrix_stats
|
||||||
|
if s["detected"] >= 1
|
||||||
|
and z["x_min"] <= s["x_mean"] <= z["x_max"]
|
||||||
|
and z["y_min"] <= s["y_mean"] <= z["y_max"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Éléments non fiables
|
||||||
|
unreliable = []
|
||||||
|
for label, s, _ in all_original_stats:
|
||||||
|
if s["detected"] < 3:
|
||||||
|
unreliable.append(f"{label} (taux {s['rate_str']})")
|
||||||
|
elif s.get("x_range", 0) >= _MAX_VARIANCE or s.get("y_range", 0) >= _MAX_VARIANCE:
|
||||||
|
unreliable.append(
|
||||||
|
f"{label} (variance X={s.get('x_range', 0):.4f} "
|
||||||
|
f"Y={s.get('y_range', 0):.4f})"
|
||||||
|
)
|
||||||
|
|
||||||
|
report_lines.extend([
|
||||||
|
"",
|
||||||
|
"=" * 80,
|
||||||
|
"RÉSUMÉ",
|
||||||
|
"=" * 80,
|
||||||
|
f" Détection original : {orig_total}/{orig_max} "
|
||||||
|
f"({orig_total/orig_max*100:.0f}%)",
|
||||||
|
f" Détection Citrix Q20: {citrix_total}/{citrix_max} "
|
||||||
|
f"({citrix_total/citrix_max*100:.0f}%)",
|
||||||
|
f" Positionnement correct (original) : {orig_in_zone}/{len(all_original_stats)}",
|
||||||
|
f" Positionnement correct (Citrix) : {citrix_in_zone}/{len(all_citrix_stats)}",
|
||||||
|
"",
|
||||||
|
])
|
||||||
|
|
||||||
|
if unreliable:
|
||||||
|
report_lines.append(" ÉLÉMENTS NON FIABLES :")
|
||||||
|
for u in unreliable:
|
||||||
|
report_lines.append(f" - {u}")
|
||||||
|
else:
|
||||||
|
report_lines.append(" Tous les éléments sont fiables.")
|
||||||
|
|
||||||
|
report_lines.extend([
|
||||||
|
"",
|
||||||
|
" NOTES TECHNIQUES :",
|
||||||
|
" - qwen2.5vl bbox_2d retourne des pixels relatifs à l'image envoyée",
|
||||||
|
" - Normalisation : diviser par les dimensions de l'image (W, H)",
|
||||||
|
" - temperature=0.1 donne une variance < 0.003 typiquement",
|
||||||
|
"=" * 80,
|
||||||
|
])
|
||||||
|
|
||||||
|
report = "\n".join(report_lines)
|
||||||
|
print(report)
|
||||||
|
|
||||||
|
# Le test réussit si au moins 80% des détections originales fonctionnent
|
||||||
|
assert orig_total / orig_max >= 0.80, (
|
||||||
|
f"Taux de détection global trop bas: {orig_total}/{orig_max}"
|
||||||
|
)
|
||||||
Reference in New Issue
Block a user