feat(cognition): timing + écran attendu + auto-apprentissage Shadow + VLM qwen2.5vl
Some checks failed
security-audit / Bandit (scan statique) (push) Successful in 13s
security-audit / pip-audit (CVE dépendances) (push) Successful in 10s
security-audit / Scan secrets (grep) (push) Successful in 8s
tests / Lint (ruff + black) (push) Successful in 14s
tests / Tests unitaires (sans GPU) (push) Failing after 15s
tests / Tests sécurité (critique) (push) Has been skipped
Some checks failed
security-audit / Bandit (scan statique) (push) Successful in 13s
security-audit / pip-audit (CVE dépendances) (push) Successful in 10s
security-audit / Scan secrets (grep) (push) Successful in 8s
tests / Lint (ruff + black) (push) Successful in 14s
tests / Tests unitaires (sans GPU) (push) Failing after 15s
tests / Tests sécurité (critique) (push) Has been skipped
Mémoire de travail enrichie : - Timing par étape (durée, moyenne, alerte si lent) - Écran attendu vs observation réelle - Contexte VLM étendu VLM reasoning : default qwen2.5vl:3b (gemma4 ne supporte pas vision) Auto-apprentissage Shadow : - stream_processor apprend les dialogues automatiquement - Clic utilisateur après dialogue → pattern mémorisé - Sauvegardé dans data/learned_patterns.json GUI-R1 : 10 patterns additionnels extraits du dataset Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1791,6 +1791,10 @@ class StreamProcessor:
|
||||
# Workflows construits (pour le matching)
|
||||
self._workflows: Dict[str, Any] = {}
|
||||
|
||||
# Shadow learning : dernier pattern UI détecté par session
|
||||
# Stocke {session_id: {"pattern": str, "ocr_text": str, "screen_state": obj, "shot_id": str}}
|
||||
self._pending_ui_patterns: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
# Charger les workflows existants depuis le disque
|
||||
self._load_persisted_workflows()
|
||||
|
||||
@@ -1975,6 +1979,9 @@ class StreamProcessor:
|
||||
- key_combo/key_press avec uniquement des modificateurs seuls (ctrl, alt, shift, etc.)
|
||||
- key_combo/key_press avec liste de touches vide
|
||||
- text_input avec texte vide
|
||||
|
||||
Shadow learning : quand un clic suit un pattern UI détecté,
|
||||
on apprend l'association dialogue→bouton.
|
||||
"""
|
||||
if _is_parasitic_event(event_data):
|
||||
logger.debug(
|
||||
@@ -1982,9 +1989,119 @@ class StreamProcessor:
|
||||
f"type={event_data.get('type')}, data={event_data.get('keys', event_data.get('text', ''))}"
|
||||
)
|
||||
return {"status": "event_filtered", "session_id": session_id, "reason": "parasitic"}
|
||||
|
||||
# Shadow learning : si un pattern UI est en attente et qu'on reçoit un clic
|
||||
if event_data.get("type") == "mouse_click":
|
||||
self._try_shadow_learn(session_id, event_data)
|
||||
|
||||
self.session_manager.add_event(session_id, event_data)
|
||||
return {"status": "event_recorded", "session_id": session_id}
|
||||
|
||||
def _try_shadow_learn(self, session_id: str, click_event: Dict[str, Any]):
|
||||
"""Tente d'apprendre un pattern UI depuis un clic observé en Shadow.
|
||||
|
||||
Quand un screenshot contenait un pattern UI détecté (dialogue) et que
|
||||
l'utilisateur clique ensuite, on extrait le texte OCR au point de clic
|
||||
pour apprendre l'association : "quand je vois ce texte → cliquer sur ce bouton".
|
||||
"""
|
||||
with self._data_lock:
|
||||
pending = self._pending_ui_patterns.pop(session_id, None)
|
||||
if not pending:
|
||||
return
|
||||
|
||||
screen_state = pending.get("screen_state")
|
||||
if screen_state is None:
|
||||
return
|
||||
|
||||
# Extraire la position du clic (pixels absolus)
|
||||
pos = click_event.get("pos", [])
|
||||
if not pos or len(pos) != 2:
|
||||
return
|
||||
|
||||
click_x, click_y = pos[0], pos[1]
|
||||
|
||||
# Trouver le texte OCR le plus proche du point de clic
|
||||
# via les ui_elements du ScreenState (ils ont bbox + label)
|
||||
clicked_label = self._find_label_at_position(screen_state, click_x, click_y)
|
||||
if not clicked_label:
|
||||
return
|
||||
|
||||
# Extraire le trigger principal du texte OCR du dialogue
|
||||
ocr_text = pending.get("ocr_text", "")
|
||||
# Utiliser un extrait court comme trigger (max 80 chars, premier segment pertinent)
|
||||
trigger_text = ocr_text[:80].strip().lower()
|
||||
if not trigger_text:
|
||||
return
|
||||
|
||||
logger.info(
|
||||
f"Shadow learning: pattern '{pending['pattern_name']}' "
|
||||
f"→ utilisateur a cliqué '{clicked_label}' | trigger='{trigger_text[:40]}...'"
|
||||
)
|
||||
|
||||
# Sauvegarder le pattern appris
|
||||
try:
|
||||
from core.knowledge.ui_patterns import UIPatternLibrary
|
||||
lib = UIPatternLibrary()
|
||||
lib.save_learned_pattern({
|
||||
"category": "dialog",
|
||||
"triggers": [trigger_text],
|
||||
"action": "click",
|
||||
"target": clicked_label,
|
||||
"os": "windows",
|
||||
"confidence": 0.8,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning(f"Shadow learning: échec sauvegarde pattern: {e}")
|
||||
|
||||
@staticmethod
|
||||
def _find_label_at_position(screen_state, click_x: int, click_y: int) -> Optional[str]:
|
||||
"""Trouve le label de l'élément UI le plus proche du point de clic.
|
||||
|
||||
Parcourt les ui_elements du ScreenState et retourne le label de
|
||||
l'élément dont la bbox contient le point, ou le plus proche si aucun
|
||||
ne contient exactement le point.
|
||||
"""
|
||||
ui_elements = getattr(screen_state, "ui_elements", [])
|
||||
if not ui_elements:
|
||||
return None
|
||||
|
||||
best_label = None
|
||||
best_dist = float("inf")
|
||||
|
||||
for elem in ui_elements:
|
||||
bbox = getattr(elem, "bbox", None)
|
||||
label = getattr(elem, "label", "")
|
||||
if not bbox or not label:
|
||||
continue
|
||||
|
||||
# BBox = (x, y, width, height) — extraire les coordonnées
|
||||
try:
|
||||
bx, by = bbox.x, bbox.y
|
||||
bw, bh = bbox.width, bbox.height
|
||||
except AttributeError:
|
||||
# Fallback si bbox est une liste/tuple
|
||||
if hasattr(bbox, '__len__') and len(bbox) >= 4:
|
||||
bx, by, bw, bh = bbox[0], bbox[1], bbox[2], bbox[3]
|
||||
else:
|
||||
continue
|
||||
|
||||
# Vérifier si le clic est dans la bbox
|
||||
if bx <= click_x <= bx + bw and by <= click_y <= by + bh:
|
||||
return label.strip()
|
||||
|
||||
# Sinon calculer la distance au centre
|
||||
cx = bx + bw / 2
|
||||
cy = by + bh / 2
|
||||
dist = ((click_x - cx) ** 2 + (click_y - cy) ** 2) ** 0.5
|
||||
if dist < best_dist:
|
||||
best_dist = dist
|
||||
best_label = label.strip()
|
||||
|
||||
# Ne retourner le plus proche que s'il est raisonnablement proche (< 100px)
|
||||
if best_label and best_dist < 100:
|
||||
return best_label
|
||||
return None
|
||||
|
||||
# =========================================================================
|
||||
# Screenshots
|
||||
# =========================================================================
|
||||
@@ -2055,6 +2172,19 @@ class StreamProcessor:
|
||||
result["ui_pattern_action"] = pattern["action"]
|
||||
result["ui_pattern_target"] = pattern["target"]
|
||||
logger.info(f"Pattern UI détecté: {pattern['pattern']} → {pattern['target']}")
|
||||
|
||||
# Shadow learning : mémoriser le pattern en attente du clic utilisateur
|
||||
with self._data_lock:
|
||||
self._pending_ui_patterns[session_id] = {
|
||||
"pattern_name": pattern["pattern"],
|
||||
"ocr_text": ocr_text,
|
||||
"screen_state": screen_state,
|
||||
"shot_id": shot_id,
|
||||
}
|
||||
else:
|
||||
# Pas de pattern connu → effacer le pending (l'écran a changé)
|
||||
with self._data_lock:
|
||||
self._pending_ui_patterns.pop(session_id, None)
|
||||
except ImportError:
|
||||
pass
|
||||
except Exception as e:
|
||||
|
||||
Reference in New Issue
Block a user