diff --git a/core/embedding/clip_embedder.py b/core/embedding/clip_embedder.py index 3b86fa754..db573b6fb 100644 --- a/core/embedding/clip_embedder.py +++ b/core/embedding/clip_embedder.py @@ -58,9 +58,19 @@ class CLIPEmbedder(EmbedderBase): "Install it with: pip install open-clip-torch" ) - # Default to CPU to save GPU for vision models (Qwen3-VL, etc.) if device is None: - device = "cpu" + try: + import torch + if torch.cuda.is_available(): + free_vram = torch.cuda.mem_get_info()[0] / 1024**3 + if free_vram > 1.5: + device = "cuda" + else: + device = "cpu" + else: + device = "cpu" + except Exception: + device = "cpu" self.model_name = model_name self.pretrained = pretrained