- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
61 lines
1.7 KiB
Python
61 lines
1.7 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
Test rapide de l'intégration Ollama avec qwen3-vl:8b
|
||
|
||
Usage: python quick_test_ollama.py
|
||
"""
|
||
|
||
import sys
|
||
from pathlib import Path
|
||
|
||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||
|
||
from core.detection import OllamaClient, check_ollama_available
|
||
|
||
|
||
def main():
|
||
print("🔍 Test rapide Ollama + qwen3-vl:8b\n")
|
||
|
||
# 1. Vérifier Ollama
|
||
print("1️⃣ Vérification Ollama...")
|
||
if not check_ollama_available():
|
||
print(" ❌ Ollama non disponible")
|
||
print(" 💡 Lancez: ollama serve")
|
||
return
|
||
print(" ✅ Ollama disponible\n")
|
||
|
||
# 2. Lister modèles
|
||
print("2️⃣ Modèles disponibles:")
|
||
client = OllamaClient(model="qwen3-vl:8b")
|
||
models = client.list_models()
|
||
|
||
vision_models = [m for m in models if 'vl' in m.lower() or 'vision' in m.lower()]
|
||
if vision_models:
|
||
for model in vision_models:
|
||
marker = "✅" if "qwen3-vl:8b" in model else " "
|
||
print(f" {marker} {model}")
|
||
else:
|
||
print(" ⚠️ Aucun modèle vision trouvé")
|
||
print(" 💡 Installez: ollama pull qwen3-vl:8b")
|
||
return
|
||
|
||
# 3. Test simple
|
||
print("\n3️⃣ Test de génération...")
|
||
result = client.generate(
|
||
"Describe what you see in one sentence.",
|
||
temperature=0.1
|
||
)
|
||
|
||
if result["success"]:
|
||
print(f" ✅ Réponse: {result['response'][:100]}...")
|
||
else:
|
||
print(f" ❌ Erreur: {result['error']}")
|
||
|
||
print("\n✨ Test terminé!")
|
||
print("\n💡 Pour tester avec un screenshot:")
|
||
print(" python test_ollama_integration.py <screenshot.png>")
|
||
|
||
|
||
if __name__ == "__main__":
|
||
main()
|