feat(vwb): Remplacer EasyOCR par docTR (Mindee) pour l'OCR
docTR est plus performant et mieux maintenu. Crée un service OCR partagé (singleton paresseux) utilisé par verify_text_content et extraire_tableau, avec les mêmes signatures et fallbacks. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -406,7 +406,7 @@ Retourne maintenant le JSON du tableau:"""
|
||||
|
||||
def _extraire_avec_ocr(self, image_base64: str) -> Optional[List]:
|
||||
"""
|
||||
Extrait le tableau avec OCR.
|
||||
Extrait le tableau avec OCR (docTR).
|
||||
|
||||
Args:
|
||||
image_base64: Image en base64
|
||||
@@ -415,27 +415,19 @@ Retourne maintenant le JSON du tableau:"""
|
||||
Liste de listes
|
||||
"""
|
||||
try:
|
||||
# Décoder l'image
|
||||
from PIL import Image
|
||||
from services.ocr_service import ocr_extract_words
|
||||
|
||||
image_data = base64.b64decode(image_base64)
|
||||
pil_image = Image.open(io.BytesIO(image_data))
|
||||
|
||||
# Essayer EasyOCR
|
||||
try:
|
||||
import easyocr
|
||||
import numpy as np
|
||||
|
||||
reader = easyocr.Reader(['fr', 'en'], gpu=True)
|
||||
img_array = np.array(pil_image)
|
||||
|
||||
results = reader.readtext(img_array)
|
||||
words = ocr_extract_words(pil_image)
|
||||
|
||||
# Grouper par lignes (par coordonnée Y)
|
||||
return self._grouper_ocr_en_lignes(results)
|
||||
return self._grouper_ocr_en_lignes(words)
|
||||
|
||||
except ImportError:
|
||||
print("⚠️ EasyOCR non disponible")
|
||||
print("⚠️ docTR non disponible")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
@@ -447,7 +439,7 @@ Retourne maintenant le JSON du tableau:"""
|
||||
Groupe les résultats OCR en lignes de tableau.
|
||||
|
||||
Args:
|
||||
results: Résultats EasyOCR [(bbox, text, conf), ...]
|
||||
results: Liste de dicts docTR [{"text", "bbox": (x1,y1,x2,y2), "confidence"}, ...]
|
||||
|
||||
Returns:
|
||||
Liste de lignes
|
||||
@@ -457,13 +449,13 @@ Retourne maintenant le JSON du tableau:"""
|
||||
|
||||
# Extraire les positions et textes
|
||||
items = []
|
||||
for bbox, text, conf in results:
|
||||
if conf < 0.3: # Ignorer basse confiance
|
||||
for word in results:
|
||||
if word["confidence"] < 0.3: # Ignorer basse confiance
|
||||
continue
|
||||
# Calculer le centre Y
|
||||
y_center = (bbox[0][1] + bbox[2][1]) / 2
|
||||
x_center = (bbox[0][0] + bbox[2][0]) / 2
|
||||
items.append({'text': text, 'x': x_center, 'y': y_center})
|
||||
x1, y1, x2, y2 = word["bbox"]
|
||||
y_center = (y1 + y2) / 2
|
||||
x_center = (x1 + x2) / 2
|
||||
items.append({'text': word["text"], 'x': x_center, 'y': y_center})
|
||||
|
||||
if not items:
|
||||
return []
|
||||
|
||||
@@ -7,7 +7,7 @@ de l'écran, en utilisant l'OCR pour extraire et comparer le texte.
|
||||
|
||||
Modes OCR disponibles:
|
||||
- ollama: Utilise un modèle de vision local (GPU, meilleure qualité)
|
||||
- easyocr: OCR traditionnel (CPU/GPU, plus rapide)
|
||||
- doctr: OCR traditionnel (CPU/GPU, plus rapide)
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
@@ -34,7 +34,7 @@ class VWBVerifyTextContentAction(BaseVWBAction):
|
||||
|
||||
Supporte deux modes OCR:
|
||||
- ollama: Modèle de vision local (meilleure qualité, utilise GPU)
|
||||
- easyocr: OCR traditionnel (plus rapide, fallback)
|
||||
- doctr: OCR traditionnel (plus rapide, fallback)
|
||||
"""
|
||||
|
||||
# Configuration Ollama par défaut (centralisée via variable d'environnement)
|
||||
@@ -70,7 +70,7 @@ class VWBVerifyTextContentAction(BaseVWBAction):
|
||||
self.case_sensitive = parameters.get('case_sensitive', False)
|
||||
|
||||
# Configuration OCR
|
||||
self.ocr_mode = parameters.get('ocr_mode', 'ollama') # ollama (GPU) ou easyocr
|
||||
self.ocr_mode = parameters.get('ocr_mode', 'ollama') # ollama (GPU) ou doctr
|
||||
self.ollama_model = parameters.get('ollama_model', self.OLLAMA_MODEL)
|
||||
self.ollama_url = parameters.get('ollama_url', self.OLLAMA_URL)
|
||||
|
||||
@@ -100,8 +100,8 @@ class VWBVerifyTextContentAction(BaseVWBAction):
|
||||
errors.append(f"Mode de matching invalide: {self.match_mode}")
|
||||
|
||||
# Vérifier le mode OCR
|
||||
if self.ocr_mode not in ['ollama', 'easyocr']:
|
||||
errors.append(f"Mode OCR invalide: {self.ocr_mode} (utilisez 'ollama' ou 'easyocr')")
|
||||
if self.ocr_mode not in ['ollama', 'doctr', 'easyocr']:
|
||||
errors.append(f"Mode OCR invalide: {self.ocr_mode} (utilisez 'ollama' ou 'doctr')")
|
||||
|
||||
return errors
|
||||
|
||||
@@ -214,7 +214,7 @@ class VWBVerifyTextContentAction(BaseVWBAction):
|
||||
if self.ocr_mode == 'ollama':
|
||||
return self._extract_with_ollama(screenshot)
|
||||
else:
|
||||
return self._extract_with_easyocr(screenshot)
|
||||
return self._extract_with_doctr(screenshot)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Erreur extraction texte: {e}")
|
||||
@@ -264,20 +264,20 @@ class VWBVerifyTextContentAction(BaseVWBAction):
|
||||
return extracted_text
|
||||
else:
|
||||
print(f" ⚠️ Erreur Ollama: {response.status_code}")
|
||||
# Fallback sur easyocr
|
||||
return self._extract_with_easyocr(image)
|
||||
# Fallback sur docTR
|
||||
return self._extract_with_doctr(image)
|
||||
|
||||
except requests.exceptions.ConnectionError:
|
||||
print(f" ⚠️ Ollama non disponible, fallback sur easyocr")
|
||||
return self._extract_with_easyocr(image)
|
||||
print(f" ⚠️ Ollama non disponible, fallback sur docTR")
|
||||
return self._extract_with_doctr(image)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ⚠️ Erreur Ollama: {e}, fallback sur easyocr")
|
||||
return self._extract_with_easyocr(image)
|
||||
print(f" ⚠️ Erreur Ollama: {e}, fallback sur docTR")
|
||||
return self._extract_with_doctr(image)
|
||||
|
||||
def _extract_with_easyocr(self, image) -> Optional[str]:
|
||||
def _extract_with_doctr(self, image) -> Optional[str]:
|
||||
"""
|
||||
Extrait le texte en utilisant EasyOCR.
|
||||
Extrait le texte en utilisant docTR.
|
||||
|
||||
Args:
|
||||
image: Image PIL à analyser
|
||||
@@ -286,29 +286,20 @@ class VWBVerifyTextContentAction(BaseVWBAction):
|
||||
Texte extrait ou None si erreur
|
||||
"""
|
||||
try:
|
||||
import easyocr
|
||||
import numpy as np
|
||||
from services.ocr_service import ocr_extract_text
|
||||
|
||||
print("📝 Extraction OCR via EasyOCR...")
|
||||
print("📝 Extraction OCR via docTR...")
|
||||
|
||||
# Convertir en array numpy
|
||||
img_array = np.array(image)
|
||||
|
||||
# EasyOCR (GPU si disponible)
|
||||
reader = easyocr.Reader(['fr', 'en'], gpu=True)
|
||||
results = reader.readtext(img_array)
|
||||
|
||||
# Combiner les résultats
|
||||
extracted_text = ' '.join([result[1] for result in results])
|
||||
extracted_text = ocr_extract_text(image)
|
||||
print(f" ✅ Texte extrait ({len(extracted_text)} caractères)")
|
||||
return extracted_text.strip()
|
||||
|
||||
except ImportError:
|
||||
print(" ⚠️ easyocr non disponible")
|
||||
print(" ⚠️ docTR non disponible")
|
||||
return ""
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Erreur EasyOCR: {e}")
|
||||
print(f" ❌ Erreur docTR: {e}")
|
||||
return None
|
||||
|
||||
def _compare_text(self, extracted: str, expected: str) -> bool:
|
||||
|
||||
96
visual_workflow_builder/backend/services/ocr_service.py
Normal file
96
visual_workflow_builder/backend/services/ocr_service.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Service OCR partagé basé sur docTR (Mindee).
|
||||
|
||||
Singleton paresseux : le modèle n'est chargé qu'au premier appel.
|
||||
Import dynamique — si docTR n'est pas installé, les fonctions
|
||||
lèvent ImportError sans crasher l'application.
|
||||
"""
|
||||
|
||||
from typing import List, Optional
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
# Singleton paresseux
|
||||
_predictor = None
|
||||
|
||||
|
||||
def _get_predictor():
|
||||
"""Charge le predictor docTR une seule fois (GPU si dispo)."""
|
||||
global _predictor
|
||||
if _predictor is None:
|
||||
from doctr.io import DocumentFile
|
||||
from doctr.models import ocr_predictor
|
||||
|
||||
_predictor = ocr_predictor(
|
||||
det_arch="fast_base",
|
||||
reco_arch="crnn_vgg16_bn",
|
||||
pretrained=True,
|
||||
)
|
||||
return _predictor
|
||||
|
||||
|
||||
def ocr_extract_text(image: Image.Image) -> str:
|
||||
"""Extrait le texte brut d'une image PIL.
|
||||
|
||||
Args:
|
||||
image: Image PIL (RGB).
|
||||
|
||||
Returns:
|
||||
Texte concaténé (lignes séparées par des espaces).
|
||||
|
||||
Raises:
|
||||
ImportError: si docTR n'est pas installé.
|
||||
"""
|
||||
predictor = _get_predictor()
|
||||
img_array = np.array(image.convert("RGB"))
|
||||
|
||||
result = predictor([img_array])
|
||||
|
||||
lines: list[str] = []
|
||||
for page in result.pages:
|
||||
for block in page.blocks:
|
||||
for line in block.lines:
|
||||
words = " ".join(w.value for w in line.words)
|
||||
lines.append(words)
|
||||
|
||||
return " ".join(lines)
|
||||
|
||||
|
||||
def ocr_extract_words(image: Image.Image) -> List[dict]:
|
||||
"""Extrait les mots avec leurs bounding boxes et confiances.
|
||||
|
||||
Args:
|
||||
image: Image PIL (RGB).
|
||||
|
||||
Returns:
|
||||
Liste de dicts : {"text": str, "bbox": (x1, y1, x2, y2), "confidence": float}
|
||||
Les coordonnées bbox sont en pixels absolus.
|
||||
|
||||
Raises:
|
||||
ImportError: si docTR n'est pas installé.
|
||||
"""
|
||||
predictor = _get_predictor()
|
||||
img_rgb = image.convert("RGB")
|
||||
w_img, h_img = img_rgb.size
|
||||
img_array = np.array(img_rgb)
|
||||
|
||||
result = predictor([img_array])
|
||||
|
||||
words: List[dict] = []
|
||||
for page in result.pages:
|
||||
for block in page.blocks:
|
||||
for line in block.lines:
|
||||
for word in line.words:
|
||||
# docTR retourne des coordonnées normalisées (0-1)
|
||||
(x1_n, y1_n), (x2_n, y2_n) = word.geometry
|
||||
words.append({
|
||||
"text": word.value,
|
||||
"bbox": (
|
||||
x1_n * w_img,
|
||||
y1_n * h_img,
|
||||
x2_n * w_img,
|
||||
y2_n * h_img,
|
||||
),
|
||||
"confidence": word.confidence,
|
||||
})
|
||||
|
||||
return words
|
||||
Reference in New Issue
Block a user