diff --git a/pipeline/ocr_qwen.py b/pipeline/ocr_qwen.py index d8cc201..8789aff 100644 --- a/pipeline/ocr_qwen.py +++ b/pipeline/ocr_qwen.py @@ -27,22 +27,64 @@ class QwenVLOCR: def _init_model(self): t0 = time.time() + import os as _os + # max_pixels limite le nombre de patches visuels pour éviter l'OOM # sur images 300 dpi (2481x3509). ~800 patches = équilibre qualité/VRAM, # tient confortablement dans ~5-6 Go même avec d'autres processus GPU # en arrière-plan. Configurable via env var QWEN_MAX_PIXELS (en patches). - import os as _os max_pixels = int(_os.environ.get("QWEN_MAX_PIXELS", 800)) * 28 * 28 self.processor = AutoProcessor.from_pretrained( MODEL_PATH, min_pixels=256 * 28 * 28, max_pixels=max_pixels, ) - self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( - MODEL_PATH, - torch_dtype=torch.bfloat16, - device_map="auto", - ) + + # Device : "auto" par défaut (GPU si dispo), "cpu" pour forcer le CPU + # quand la VRAM est saturée par d'autres process. Configurable via + # QWEN_DEVICE=cpu. + device = _os.environ.get("QWEN_DEVICE", "auto").lower() + if device == "cpu": + # Sur CPU on cherche à maximiser le throughput : + # 1. Utiliser tous les cores via torch.set_num_threads (set_num_threads + # prime sur OMP_NUM_THREADS pour les ops PyTorch natifs). + # 2. Choisir bfloat16 si le CPU le supporte nativement (Zen 5, + # Zen 4, Intel Sapphire Rapids+ ont AVX-512 BF16). Sinon float32. + n_threads = int(_os.environ.get("TORCH_NUM_THREADS", _os.cpu_count() or 8)) + torch.set_num_threads(n_threads) + try: + torch.set_num_interop_threads(n_threads) + except RuntimeError: + pass # déjà initialisé, ignorer + + # Détection AVX-512 BF16 via /proc/cpuinfo (Linux) + use_bf16 = False + try: + with open("/proc/cpuinfo") as f: + flags = f.read() + use_bf16 = "avx512_bf16" in flags or "amx_bf16" in flags + except Exception: + pass + dtype = torch.bfloat16 if use_bf16 else torch.float32 + + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + MODEL_PATH, + torch_dtype=dtype, + device_map={"": "cpu"}, + low_cpu_mem_usage=True, + ) + self.device_used = "cpu" + self.cpu_threads = n_threads + self.cpu_dtype = str(dtype).replace("torch.", "") + else: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + MODEL_PATH, + torch_dtype=torch.bfloat16, + device_map="auto", + ) + self.device_used = "cuda" if torch.cuda.is_available() else "cpu" + self.cpu_threads = None + self.cpu_dtype = None self.model.eval() self.load_time = time.time() - t0 self.vram_gb = torch.cuda.memory_allocated() / 1e9 if torch.cuda.is_available() else 0.0