feat(agent_chat): Activer intégration LLM Ollama pour parsing intelligent
- Activer use_llm=True par défaut dans app.py - Améliorer le prompt LLM avec contexte des workflows disponibles - Ajouter endpoints /api/llm/status et /api/llm/model pour configuration - Permettre injection dynamique des workflows dans IntentParser - Supporter changement de modèle à chaud Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -115,11 +115,20 @@ def init_system():
|
||||
|
||||
# 3. Composants conversationnels
|
||||
try:
|
||||
intent_parser = get_intent_parser(use_llm=False) # LLM optionnel
|
||||
intent_parser = get_intent_parser(use_llm=True) # LLM activé (Ollama)
|
||||
confirmation_loop = get_confirmation_loop()
|
||||
response_generator = get_response_generator()
|
||||
conversation_manager = get_conversation_manager()
|
||||
logger.info("✓ Composants conversationnels initialisés")
|
||||
|
||||
# Injecter les workflows dans l'intent_parser pour contexte LLM
|
||||
if matcher and intent_parser:
|
||||
workflows_for_llm = [
|
||||
{"name": wf.name, "description": wf.description, "tags": wf.tags}
|
||||
for wf in matcher.get_all_workflows()
|
||||
]
|
||||
intent_parser.set_workflows(workflows_for_llm)
|
||||
|
||||
logger.info("✓ Composants conversationnels initialisés (LLM activé)")
|
||||
except Exception as e:
|
||||
logger.error(f"✗ Composants conversationnels: {e}")
|
||||
# Fallback aux composants de base
|
||||
@@ -560,6 +569,51 @@ def api_gpu_action(action):
|
||||
return jsonify({"success": False, "error": str(e)})
|
||||
|
||||
|
||||
@app.route('/api/llm/status')
|
||||
def api_llm_status():
|
||||
"""Statut du LLM (Ollama)."""
|
||||
status = {
|
||||
"enabled": intent_parser.use_llm if intent_parser else False,
|
||||
"available": intent_parser.llm_available if intent_parser else False,
|
||||
"model": intent_parser.llm_model if intent_parser else None,
|
||||
"endpoint": intent_parser.llm_endpoint if intent_parser else None,
|
||||
"workflows_loaded": len(intent_parser._workflows_cache) if intent_parser else 0
|
||||
}
|
||||
|
||||
# Lister les modèles Ollama disponibles
|
||||
try:
|
||||
import requests
|
||||
response = requests.get("http://localhost:11434/api/tags", timeout=2)
|
||||
if response.status_code == 200:
|
||||
models = response.json().get('models', [])
|
||||
status["available_models"] = [m["name"] for m in models]
|
||||
except:
|
||||
status["available_models"] = []
|
||||
|
||||
return jsonify(status)
|
||||
|
||||
|
||||
@app.route('/api/llm/model', methods=['POST'])
|
||||
def api_llm_set_model():
|
||||
"""Changer le modèle LLM."""
|
||||
data = request.json
|
||||
model = data.get('model')
|
||||
|
||||
if not model:
|
||||
return jsonify({"success": False, "error": "Modèle non spécifié"})
|
||||
|
||||
if intent_parser:
|
||||
intent_parser.llm_model = model
|
||||
intent_parser._check_llm_availability()
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"model": model,
|
||||
"available": intent_parser.llm_available
|
||||
})
|
||||
|
||||
return jsonify({"success": False, "error": "IntentParser non initialisé"})
|
||||
|
||||
|
||||
@app.route('/api/help')
|
||||
def api_help():
|
||||
"""Aide et mode d'emploi."""
|
||||
|
||||
Reference in New Issue
Block a user