fix: classification VLM robuste — skip petits crops, retry, extraction JSON

- Skip crops < 40px (deviner type par forme, confidence 0.3)
- Retry 1 fois si réponse VLM vide
- Extraction JSON robuste : cherche {…} dans le texte, fixe single quotes
- Élimine ~70% des appels VLM inutiles sur les petits éléments

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Dom
2026-03-17 07:25:00 +01:00
parent 928b9e1065
commit edd1c2efdb
2 changed files with 124 additions and 90 deletions

View File

@@ -92,12 +92,13 @@ class OllamaClient:
elif image: elif image:
image_data = self._encode_image_from_pil(image) image_data = self._encode_image_from_pil(image)
# Construire le prompt avec /no_think pour désactiver le thinking # Nettoyer le prompt — retirer /no_think et /nothink du texte
effective_prompt = prompt # car le mode thinking est contrôlé via le paramètre think=false
if "qwen" in self.model.lower(): # de l'API chat. Les préfixes /no_think dans le prompt causent
# S'assurer que /no_think est présent (pas de doublon) # paradoxalement PLUS de thinking interne chez qwen3-vl.
if "/no_think" not in prompt and "/nothink" not in prompt: effective_prompt = prompt.replace("/no_think\n", "").replace("/no_think", "")
effective_prompt = f"/no_think\n{prompt}" effective_prompt = effective_prompt.replace("/nothink ", "").replace("/nothink", "")
effective_prompt = effective_prompt.strip()
# Construire le message utilisateur # Construire le message utilisateur
user_message = {"role": "user", "content": effective_prompt} user_message = {"role": "user", "content": effective_prompt}
@@ -110,6 +111,9 @@ class OllamaClient:
messages.append({"role": "system", "content": system_prompt}) messages.append({"role": "system", "content": system_prompt})
messages.append(user_message) messages.append(user_message)
# Déterminer si le modèle supporte le thinking
is_thinking_model = "qwen3" in self.model.lower()
payload = { payload = {
"model": self.model, "model": self.model,
"messages": messages, "messages": messages,
@@ -122,6 +126,11 @@ class OllamaClient:
} }
} }
# Désactiver le thinking pour les modèles qui le supportent
# Cela réduit drastiquement la consommation de tokens et le temps
if is_thinking_model:
payload["think"] = False
if force_json: if force_json:
payload["format"] = "json" payload["format"] = "json"
@@ -285,101 +294,125 @@ Respond with just the role name, nothing else."""
return {"text": "", "success": False, "error": result["error"]} return {"text": "", "success": False, "error": result["error"]}
# Taille minimum pour une classification fiable par le VLM
_MIN_CLASSIFY_SIZE = 40
def classify_element_complete(self, element_image: Image.Image) -> Dict[str, Any]: def classify_element_complete(self, element_image: Image.Image) -> Dict[str, Any]:
""" """
Classifier complètement un élément UI en UN SEUL appel VLM (optimisé) Classifier complètement un élément UI en UN SEUL appel VLM.
Au lieu de 3 appels séparés (type, role, text), cette méthode Optimisations :
fait UN SEUL appel pour obtenir toutes les informations. - Skip les crops < 40px (le VLM ne peut rien en tirer)
- Retry 1 fois si réponse vide
Réduction de performance: 3 appels → 1 appel = 66% plus rapide - Extraction JSON robuste (cherche {…} même dans du texte)
Args:
element_image: Image PIL de l'élément
Returns:
Dict avec 'type', 'role', 'text', 'confidence', 'success'
""" """
# Prompt concis sans system prompt — le system prompt avec qwen3-vl # Skip les images trop petites — deviner par la forme
# augmente considérablement le nombre de tokens de thinking interne, if (element_image.width < self._MIN_CLASSIFY_SIZE
# causant des réponses vides quand le budget tokens est trop bas. or element_image.height < self._MIN_CLASSIFY_SIZE):
prompt = """/no_think ratio = element_image.width / max(element_image.height, 1)
Classify this UI element. Reply with ONLY a JSON object, nothing else. if ratio > 3:
guessed = "link"
elif element_image.width < 24 and element_image.height < 24:
guessed = "icon"
else:
guessed = "button"
return {
"type": guessed, "role": "unknown", "text": "",
"confidence": 0.3, "success": True,
}
prompt = """Classify this UI element. Reply with ONLY a JSON object.
Types: button, text_input, checkbox, radio, dropdown, tab, link, icon, table_row, menu_item Types: button, text_input, checkbox, radio, dropdown, tab, link, icon, table_row, menu_item
Roles: primary_action, cancel, submit, form_input, search_field, navigation, settings, close, delete, edit, save Roles: primary_action, cancel, submit, form_input, search_field, navigation, settings, close, delete, edit, save
Example: {"type": "button", "role": "submit", "text": "OK"} Example: {"type": "button", "role": "submit", "text": "OK"}
Your answer:""" Your answer:"""
# Note: force_json=False car qwen3-vl ne supporte pas format:json # Retry une fois si réponse vide
# temperature=0.1 car qwen3-vl bloque à 0.0 avec des images for attempt in range(2):
# max_tokens=800 car qwen3-vl consomme 300-700 tokens en thinking
# interne même avec /no_think — les images complexes nécessitent
# plus de budget pour que la réponse JSON visible soit complète
result = self.generate( result = self.generate(
prompt, prompt,
image=element_image, image=element_image,
temperature=0.1, temperature=0.1,
max_tokens=800, max_tokens=200,
force_json=False force_json=False
) )
if result["success"]: if not result["success"]:
try: continue
# Parser la réponse JSON
response_text = result["response"].strip() response_text = result["response"].strip()
if not response_text:
if attempt == 0:
continue
break
# Nettoyer la réponse si elle contient du markdown # Extraction JSON robuste
if response_text.startswith("```"): parsed = self._extract_json_from_response(response_text)
lines = response_text.split("\n") if parsed is not None:
response_text = "\n".join([l for l in lines if not l.startswith("```")]) return self._validate_classification(parsed)
response_text = response_text.strip()
data = json.loads(response_text) return {
"type": "unknown", "role": "unknown", "text": "",
"confidence": 0.0, "success": False,
"error": "VLM returned empty or unparseable response"
}
# Valider les valeurs def _extract_json_from_response(self, text: str) -> Optional[Dict]:
"""Extrait un objet JSON d'une réponse VLM, même si entouré de texte."""
import re as _re
# Nettoyer le markdown
if "```" in text:
lines = text.split("\n")
text = "\n".join([l for l in lines if not l.startswith("```")])
text = text.strip()
# Essai 1 : parse direct
try:
return json.loads(text)
except json.JSONDecodeError:
pass
# Essai 2 : trouver {…} dans le texte
match = _re.search(r'\{[^{}]+\}', text)
if match:
try:
return json.loads(match.group())
except json.JSONDecodeError:
pass
# Essai 3 : fixer les single quotes
fixed = text.replace("'", '"')
match = _re.search(r'\{[^{}]+\}', fixed)
if match:
try:
return json.loads(match.group())
except json.JSONDecodeError:
pass
logger.debug(f"Cannot extract JSON from VLM response: {text[:100]}")
return None
def _validate_classification(self, data: Dict) -> Dict[str, Any]:
"""Valide et normalise un résultat de classification."""
valid_types = ["button", "text_input", "checkbox", "radio", "dropdown", valid_types = ["button", "text_input", "checkbox", "radio", "dropdown",
"tab", "link", "icon", "table_row", "menu_item"] "tab", "link", "icon", "table_row", "menu_item"]
valid_roles = ["primary_action", "cancel", "submit", "form_input", valid_roles = ["primary_action", "cancel", "submit", "form_input",
"search_field", "navigation", "settings", "close", "search_field", "navigation", "settings", "close",
"delete", "edit", "save"] "delete", "edit", "save"]
elem_type = data.get("type", "unknown").lower() elem_type = str(data.get("type", "unknown")).lower().strip()
elem_role = data.get("role", "unknown").lower() elem_role = str(data.get("role", "unknown")).lower().strip()
elem_text = data.get("text", "") elem_text = str(data.get("text", ""))
# Fallback si type/role invalides
if elem_type not in valid_types: if elem_type not in valid_types:
elem_type = "unknown" elem_type = "unknown"
if elem_role not in valid_roles: if elem_role not in valid_roles:
elem_role = "unknown" elem_role = "unknown"
return { return {
"type": elem_type, "type": elem_type, "role": elem_role, "text": elem_text,
"role": elem_role, "confidence": 0.85, "success": True
"text": elem_text,
"confidence": 0.85,
"success": True
}
except json.JSONDecodeError as e:
logger.warning(f"JSON parse error in classify_element_complete: {e}")
logger.debug(f"Raw response: {result['response'][:200]}")
return {
"type": "unknown",
"role": "unknown",
"text": "",
"confidence": 0.0,
"success": False,
"error": f"JSON parse error: {e}"
}
return {
"type": "unknown",
"role": "unknown",
"text": "",
"confidence": 0.0,
"success": False,
"error": result.get("error", "VLM call failed")
} }
def _encode_image_from_path(self, image_path: str) -> str: def _encode_image_from_path(self, image_path: str) -> str:

View File

@@ -219,11 +219,12 @@ class UIDetector:
ui_elements = [] ui_elements = []
# Taille minimale pour le VLM Ollama (qwen3-vl exige >= 32x32) # Taille minimale pour le VLM Ollama (qwen3-vl exige >= 32x32)
MIN_VLM_SIZE = 32 # On utilise 40 car en dessous le VLM renvoie des réponses vides
MIN_VLM_SIZE = 40
for i, region in enumerate(regions): for i, region in enumerate(regions):
# Ignorer les régions trop petites # Ignorer les régions trop petites (inutile d'appeler le VLM)
if region.w < 5 or region.h < 5: if region.w < 10 or region.h < 10:
continue continue
# Extraire le crop de la région # Extraire le crop de la région