feat: Léa chat + IRBuilder enrichi (stratégies V4 complètes)
Aspect 2/4 Léa : interface conversationnelle
- chat_interface.py : ChatSession thread-safe, états idle/planning/awaiting/executing/done
- 5 endpoints REST : /api/v1/chat/* (session, message, history, confirm, sessions)
- web_dashboard/chat.html + chat.js : UI minimaliste, polling 2s, pas de framework
- Proxy Flask /api/chat/* → serveur streaming
- 34 tests (happy path, abandon, refus, erreurs, gemma4 down)
IRBuilder enrichi pour plans V4 complets
- _event_to_action() appelle enrich_click_from_screenshot() quand session_dir dispo
- Chaque clic porte _enrichment (by_text OCR, anchor_image_base64, vlm_description)
- ExecutionCompiler consomme l'enrichissement pour produire 3 stratégies par clic
Avant : [ocr] uniquement, target="unknown_window"
Après : [ocr, template, vlm] avec vrai texte OCR ("Rechercher", "Ouvrir")
Validé sur session réelle : 10/10 clics enrichis (by_text + anchor + vlm_description)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -780,6 +780,11 @@ async def stream_event(data: StreamEvent):
|
||||
# Traitement direct via StreamProcessor
|
||||
result = worker.process_event_direct(session_id, data.event)
|
||||
|
||||
# ── Observation Shadow (si mode Shadow activé pour cette session) ──
|
||||
# L'appel est protégé et non bloquant : si l'observer n'est pas
|
||||
# actif, ou s'il lève, la capture continue normalement.
|
||||
shadow_observe_event(session_id, data.event)
|
||||
|
||||
# ── Enrichissement SomEngine temps réel pour les mouse_click ──
|
||||
# Après l'enregistrement de l'event, tenter l'enrichissement si le
|
||||
# screenshot est déjà arrivé. Sinon, l'event est mis en attente et
|
||||
@@ -1338,6 +1343,249 @@ async def requeue_session(session_id: str):
|
||||
}
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Shadow mode — observation temps réel + feedback utilisateur
|
||||
# =========================================================================
|
||||
#
|
||||
# Endpoints utilisés par la GUI Léa pour :
|
||||
# - Démarrer/arrêter le mode Shadow sur une session en cours
|
||||
# - Récupérer en temps réel ce que Léa a compris
|
||||
# - Envoyer des feedbacks (valider/corriger/annuler/fusionner)
|
||||
# - Construire le WorkflowIR final après validation
|
||||
#
|
||||
# Source de vérité : events.jsonl (inchangé). Le ShadowObserver est une
|
||||
# couche d'observation facultative qui ne modifie PAS la capture.
|
||||
#
|
||||
# Import paresseux pour ne pas alourdir le démarrage serveur si la
|
||||
# feature n'est pas utilisée.
|
||||
# =========================================================================
|
||||
|
||||
_shadow_observer = None
|
||||
_shadow_validators: Dict[str, Any] = {} # session_id -> ShadowValidator
|
||||
_shadow_lock = threading.Lock()
|
||||
|
||||
|
||||
def _get_shadow_observer():
|
||||
"""Retourner le ShadowObserver partagé (lazy init)."""
|
||||
global _shadow_observer
|
||||
with _shadow_lock:
|
||||
if _shadow_observer is None:
|
||||
from core.workflow.shadow_observer import get_shared_observer
|
||||
_shadow_observer = get_shared_observer()
|
||||
return _shadow_observer
|
||||
|
||||
|
||||
def _get_shadow_validator(session_id: str):
|
||||
"""Retourner (ou créer) le ShadowValidator pour une session."""
|
||||
from core.workflow.shadow_validator import ShadowValidator
|
||||
with _shadow_lock:
|
||||
if session_id not in _shadow_validators:
|
||||
_shadow_validators[session_id] = ShadowValidator()
|
||||
return _shadow_validators[session_id]
|
||||
|
||||
|
||||
def shadow_observe_event(session_id: str, event: Dict[str, Any]) -> None:
|
||||
"""Injection d'un événement dans le ShadowObserver (si session active).
|
||||
|
||||
Helper appelé depuis stream_event() pour alimenter l'observer sans
|
||||
casser le flux de capture. Protégé contre les exceptions pour
|
||||
garantir qu'une erreur d'observation ne fait jamais planter la
|
||||
capture.
|
||||
"""
|
||||
try:
|
||||
observer = _get_shadow_observer()
|
||||
if observer.has_session(session_id):
|
||||
observer.observe_event(session_id, event)
|
||||
except Exception as e:
|
||||
logger.debug(f"shadow_observe_event: {e}")
|
||||
|
||||
|
||||
class ShadowStartRequest(BaseModel):
|
||||
session_id: str
|
||||
|
||||
|
||||
class ShadowFeedbackRequest(BaseModel):
|
||||
"""Feedback utilisateur pendant l'enregistrement.
|
||||
|
||||
action :
|
||||
- "validate" : valider l'étape
|
||||
- "correct" : corriger l'intention (new_intent requis)
|
||||
- "undo" : annuler l'étape
|
||||
- "cancel" : annuler tout le workflow
|
||||
- "merge_next" : fusionner avec la suivante
|
||||
- "split" : couper (at_event_index requis)
|
||||
"""
|
||||
session_id: str
|
||||
action: str
|
||||
step_index: Optional[int] = None
|
||||
new_intent: Optional[str] = None
|
||||
at_event_index: Optional[int] = None
|
||||
|
||||
|
||||
class ShadowBuildRequest(BaseModel):
|
||||
"""Construire le WorkflowIR final à partir des feedbacks."""
|
||||
session_id: str
|
||||
name: str = ""
|
||||
domain: str = "generic"
|
||||
require_all_validated: bool = False
|
||||
|
||||
|
||||
@app.post("/api/v1/shadow/start")
|
||||
async def shadow_start(request: ShadowStartRequest):
|
||||
"""Démarrer le mode Shadow pour une session en cours.
|
||||
|
||||
Une fois démarré, chaque événement reçu via /api/v1/traces/stream/event
|
||||
alimentera le ShadowObserver pour construire la compréhension en
|
||||
temps réel.
|
||||
"""
|
||||
observer = _get_shadow_observer()
|
||||
observer.start(request.session_id)
|
||||
logger.info(f"Shadow mode démarré pour la session {request.session_id}")
|
||||
return {
|
||||
"status": "shadow_started",
|
||||
"session_id": request.session_id,
|
||||
"message": "Léa observe — fais ta tâche normalement.",
|
||||
}
|
||||
|
||||
|
||||
@app.post("/api/v1/shadow/stop")
|
||||
async def shadow_stop(request: ShadowStartRequest):
|
||||
"""Arrêter le mode Shadow (sans détruire l'état).
|
||||
|
||||
La compréhension reste accessible via /api/v1/shadow/{id}/understanding
|
||||
jusqu'à ce que /api/v1/shadow/build soit appelé ou la session finalisée.
|
||||
"""
|
||||
observer = _get_shadow_observer()
|
||||
observer.stop(request.session_id)
|
||||
understanding = observer.get_understanding(request.session_id)
|
||||
return {
|
||||
"status": "shadow_stopped",
|
||||
"session_id": request.session_id,
|
||||
"steps_count": len(understanding),
|
||||
"understanding": understanding,
|
||||
}
|
||||
|
||||
|
||||
@app.post("/api/v1/shadow/feedback")
|
||||
async def shadow_feedback(request: ShadowFeedbackRequest):
|
||||
"""Recevoir un feedback utilisateur pendant ou après l'enregistrement.
|
||||
|
||||
body : {session_id, action, step_index?, new_intent?, at_event_index?}
|
||||
"""
|
||||
observer = _get_shadow_observer()
|
||||
if not observer.has_session(request.session_id):
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Aucune session Shadow active pour {request.session_id}",
|
||||
)
|
||||
|
||||
validator = _get_shadow_validator(request.session_id)
|
||||
# Recharger les étapes courantes depuis l'observer
|
||||
validator.set_steps(observer.get_steps_internal(request.session_id))
|
||||
|
||||
feedback_dict: Dict[str, Any] = {"action": request.action}
|
||||
if request.step_index is not None:
|
||||
feedback_dict["step_index"] = request.step_index
|
||||
if request.new_intent is not None:
|
||||
feedback_dict["new_intent"] = request.new_intent
|
||||
if request.at_event_index is not None:
|
||||
feedback_dict["at_event_index"] = request.at_event_index
|
||||
|
||||
result = validator.apply_feedback(feedback_dict)
|
||||
return {
|
||||
"status": "feedback_applied" if result.ok else "feedback_rejected",
|
||||
"session_id": request.session_id,
|
||||
"result": result.to_dict(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/v1/shadow/{session_id}/understanding")
|
||||
async def shadow_get_understanding(session_id: str, since_ts: float = 0.0):
|
||||
"""Récupérer ce que Léa a compris jusqu'ici.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"session_id": ...,
|
||||
"steps": [
|
||||
{"step": 1, "intent": "...", "confidence": 0.9, ...},
|
||||
...
|
||||
],
|
||||
"current_step": {...} | None,
|
||||
"notifications": [...] # Seulement celles depuis since_ts
|
||||
}
|
||||
"""
|
||||
observer = _get_shadow_observer()
|
||||
if not observer.has_session(session_id):
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Aucune session Shadow active pour {session_id}",
|
||||
)
|
||||
return {
|
||||
"session_id": session_id,
|
||||
"steps": observer.get_understanding(session_id, include_current=False),
|
||||
"current_step": observer.get_current_step(session_id),
|
||||
"notifications": observer.get_notifications(session_id, since_ts=since_ts),
|
||||
}
|
||||
|
||||
|
||||
@app.post("/api/v1/shadow/build")
|
||||
async def shadow_build(request: ShadowBuildRequest):
|
||||
"""Construire le WorkflowIR final à partir des étapes validées/corrigées.
|
||||
|
||||
Le WorkflowIR est retourné mais pas encore sauvegardé — c'est au
|
||||
caller de décider s'il l'écrit sur disque ou le compile en
|
||||
ExecutionPlan.
|
||||
"""
|
||||
observer = _get_shadow_observer()
|
||||
if not observer.has_session(request.session_id):
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Aucune session Shadow active pour {request.session_id}",
|
||||
)
|
||||
|
||||
validator = _get_shadow_validator(request.session_id)
|
||||
# S'assurer que le validator voit les étapes finales de l'observer
|
||||
validator.set_steps(observer.get_steps_internal(request.session_id))
|
||||
|
||||
# Réappliquer l'historique n'est PAS nécessaire : on s'attend à ce que
|
||||
# les feedbacks aient été appliqués dans l'ordre via /api/v1/shadow/feedback
|
||||
# et que le validator ait accumulé son état. Mais puisqu'on vient de
|
||||
# recharger les étapes, on perd les corrections. Stratégie : conserver
|
||||
# l'historique et le rejouer.
|
||||
history = validator.history
|
||||
validator.set_steps(observer.get_steps_internal(request.session_id))
|
||||
for entry in history:
|
||||
# Rejouer en reconstruisant un feedback depuis le résultat
|
||||
data = entry.data or {}
|
||||
fb: Dict[str, Any] = {"action": entry.action, "step_index": entry.step_index}
|
||||
if "new_intent" in data:
|
||||
fb["new_intent"] = data["new_intent"]
|
||||
validator.apply_feedback(fb)
|
||||
|
||||
try:
|
||||
ir = validator.build_workflow_ir(
|
||||
session_id=request.session_id,
|
||||
name=request.name,
|
||||
domain=request.domain,
|
||||
require_all_validated=request.require_all_validated,
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
if ir is None:
|
||||
return {
|
||||
"status": "cancelled",
|
||||
"session_id": request.session_id,
|
||||
"message": "Workflow annulé par l'utilisateur",
|
||||
}
|
||||
|
||||
return {
|
||||
"status": "workflow_built",
|
||||
"session_id": request.session_id,
|
||||
"workflow_ir": ir.to_dict(),
|
||||
}
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Monitoring
|
||||
# =========================================================================
|
||||
@@ -3737,6 +3985,160 @@ def _extract_session_description(events_file) -> Dict[str, Any]:
|
||||
return {"name": "?", "description": "", "event_count": 0}
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Chat conversationnel (Léa conversationnelle)
|
||||
# =========================================================================
|
||||
|
||||
from .chat_interface import ChatManager # noqa: E402
|
||||
|
||||
|
||||
def _chat_replay_callback(session_id="", machine_id="default", params=None, **kwargs):
|
||||
"""Callback utilisé par ChatSession pour lancer un replay.
|
||||
|
||||
Appelle l'endpoint /replay-session en interne. On passe par HTTP pour
|
||||
réutiliser la logique d'auth/rate-limit/enqueue existante.
|
||||
"""
|
||||
import requests as _req
|
||||
if not session_id:
|
||||
raise ValueError("session_id requis pour replay chat")
|
||||
resp = _req.post(
|
||||
f"http://localhost:5005/api/v1/traces/stream/replay-session"
|
||||
f"?session_id={session_id}&machine_id={machine_id}",
|
||||
headers={"Authorization": f"Bearer {API_TOKEN}"},
|
||||
timeout=600,
|
||||
)
|
||||
if not resp.ok:
|
||||
raise RuntimeError(f"Replay échoué: {resp.text[:200]}")
|
||||
return resp.json().get("replay_id", "")
|
||||
|
||||
|
||||
def _chat_status_provider(replay_id: str) -> Dict[str, Any]:
|
||||
"""Callback pour lire l'état d'un replay depuis ChatSession.
|
||||
|
||||
Lit directement _replay_states en mémoire (pas de HTTP round-trip).
|
||||
"""
|
||||
if not replay_id:
|
||||
return {}
|
||||
with _replay_lock:
|
||||
state = _replay_states.get(replay_id)
|
||||
if not state:
|
||||
return {}
|
||||
# Filtrer les clés internes
|
||||
return {k: v for k, v in state.items() if not k.startswith("_")}
|
||||
|
||||
|
||||
_chat_manager = ChatManager(
|
||||
task_planner=_task_planner,
|
||||
workflows_provider=_list_available_workflows,
|
||||
replay_callback=_chat_replay_callback,
|
||||
status_provider=_chat_status_provider,
|
||||
)
|
||||
|
||||
|
||||
class ChatMessageRequest(BaseModel):
|
||||
"""Message envoyé par l'utilisateur."""
|
||||
message: str
|
||||
|
||||
|
||||
class ChatConfirmRequest(BaseModel):
|
||||
"""Confirmation (ou refus) d'un plan en attente."""
|
||||
confirmed: bool = True
|
||||
|
||||
|
||||
class ChatSessionCreateRequest(BaseModel):
|
||||
"""Paramètres de création d'une session de chat."""
|
||||
machine_id: str = "default"
|
||||
|
||||
|
||||
@app.post("/api/v1/chat/session")
|
||||
async def create_chat_session(request: ChatSessionCreateRequest = None):
|
||||
"""Créer une nouvelle session de chat avec Léa."""
|
||||
machine_id = request.machine_id if request else "default"
|
||||
session = _chat_manager.create_session(machine_id=machine_id)
|
||||
return {
|
||||
"ok": True,
|
||||
"session_id": session.session_id,
|
||||
"state": session.state,
|
||||
"history": session.get_history(),
|
||||
}
|
||||
|
||||
|
||||
@app.post("/api/v1/chat/{session_id}/message")
|
||||
async def post_chat_message(session_id: str, request: ChatMessageRequest):
|
||||
"""Envoyer un message dans une session de chat."""
|
||||
import asyncio
|
||||
|
||||
session = _chat_manager.get_session(session_id)
|
||||
if session is None:
|
||||
raise HTTPException(status_code=404, detail=f"Session chat '{session_id}' non trouvée")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: session.send_message(request.message),
|
||||
)
|
||||
# Toujours retourner l'historique + l'état courant pour que le client se mette à jour
|
||||
return {
|
||||
**result,
|
||||
"session_id": session_id,
|
||||
"state": session.state,
|
||||
"history": session.get_history(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/v1/chat/{session_id}/history")
|
||||
async def get_chat_history(session_id: str):
|
||||
"""Récupérer l'historique d'une session de chat."""
|
||||
session = _chat_manager.get_session(session_id)
|
||||
if session is None:
|
||||
raise HTTPException(status_code=404, detail=f"Session chat '{session_id}' non trouvée")
|
||||
|
||||
# Rafraîchir la progression si en cours d'exécution
|
||||
if session.state == "executing":
|
||||
try:
|
||||
session.refresh_progress()
|
||||
except Exception as e:
|
||||
logger.debug(f"chat refresh_progress erreur: {e}")
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"session_id": session_id,
|
||||
"snapshot": session.get_snapshot(),
|
||||
}
|
||||
|
||||
|
||||
@app.post("/api/v1/chat/{session_id}/confirm")
|
||||
async def confirm_chat_plan(session_id: str, request: ChatConfirmRequest = None):
|
||||
"""Confirmer (ou refuser) l'exécution du plan en attente."""
|
||||
import asyncio
|
||||
|
||||
session = _chat_manager.get_session(session_id)
|
||||
if session is None:
|
||||
raise HTTPException(status_code=404, detail=f"Session chat '{session_id}' non trouvée")
|
||||
|
||||
confirmed = request.confirmed if request else True
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: session.confirm(confirmed=confirmed),
|
||||
)
|
||||
return {
|
||||
**result,
|
||||
"session_id": session_id,
|
||||
"state": session.state,
|
||||
"history": session.get_history(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/v1/chat/sessions")
|
||||
async def list_chat_sessions():
|
||||
"""Lister toutes les sessions de chat actives."""
|
||||
return {
|
||||
"ok": True,
|
||||
"sessions": _chat_manager.list_sessions(),
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
|
||||
622
agent_v0/server_v1/chat_interface.py
Normal file
622
agent_v0/server_v1/chat_interface.py
Normal file
@@ -0,0 +1,622 @@
|
||||
"""
|
||||
ChatInterface — Interface de chat conversationnelle pour Léa.
|
||||
|
||||
Permet au TIM (Technicien Information Médicale) de parler à Léa en langage
|
||||
naturel :
|
||||
- "Ouvre le Bloc-notes et écris bonjour"
|
||||
- Léa comprend (TaskPlanner) et propose un plan
|
||||
- Le TIM confirme (ou refuse)
|
||||
- Léa exécute (replay) et envoie des updates de progression
|
||||
- Historique conversationnel conservé par session
|
||||
|
||||
C'est une couche LÉGÈRE au-dessus du TaskPlanner. Toute la logique de
|
||||
compréhension reste dans TaskPlanner — ChatInterface gère uniquement
|
||||
l'état conversationnel, la confirmation et le suivi d'exécution.
|
||||
|
||||
États de la session :
|
||||
idle → en attente d'un message
|
||||
planning → TaskPlanner.understand() en cours
|
||||
awaiting_confirmation → plan prêt, attend la confirmation du TIM
|
||||
executing → replay en cours
|
||||
done → dernier tour terminé (retour à idle au prochain message)
|
||||
error → erreur interne (instruction non comprise, exception…)
|
||||
|
||||
Langue : 100% français (c'est l'interface utilisateur).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =============================================================================
|
||||
# États
|
||||
# =============================================================================
|
||||
|
||||
STATE_IDLE = "idle"
|
||||
STATE_PLANNING = "planning"
|
||||
STATE_AWAITING_CONFIRMATION = "awaiting_confirmation"
|
||||
STATE_EXECUTING = "executing"
|
||||
STATE_DONE = "done"
|
||||
STATE_ERROR = "error"
|
||||
|
||||
VALID_STATES = {
|
||||
STATE_IDLE,
|
||||
STATE_PLANNING,
|
||||
STATE_AWAITING_CONFIRMATION,
|
||||
STATE_EXECUTING,
|
||||
STATE_DONE,
|
||||
STATE_ERROR,
|
||||
}
|
||||
|
||||
# Rôles de messages
|
||||
ROLE_USER = "user"
|
||||
ROLE_LEA = "lea"
|
||||
ROLE_SYSTEM = "system"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Message
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class ChatMessage:
|
||||
"""Un message dans l'historique d'une conversation."""
|
||||
role: str # "user", "lea", "system"
|
||||
content: str # Texte du message
|
||||
timestamp: float = field(default_factory=time.time)
|
||||
# Données contextuelles optionnelles (plan, résultat, progression…)
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"role": self.role,
|
||||
"content": self.content,
|
||||
"timestamp": self.timestamp,
|
||||
"meta": self.meta,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ChatSession
|
||||
# =============================================================================
|
||||
|
||||
class ChatSession:
|
||||
"""Une conversation entre un utilisateur et Léa.
|
||||
|
||||
Maintient l'historique, l'état courant, et le dernier plan en attente
|
||||
de confirmation. Thread-safe (un lock par session).
|
||||
|
||||
Dépendances injectées (pour tester facilement) :
|
||||
- task_planner : instance de TaskPlanner (ou mock)
|
||||
- workflows_provider : callable () -> List[Dict] (liste des workflows)
|
||||
- replay_callback : callable (session_id, machine_id, params) -> replay_id
|
||||
- status_provider : callable (replay_id) -> Dict (pour suivre l'exécution)
|
||||
|
||||
Toutes ces dépendances sont optionnelles : ChatSession dégrade
|
||||
gracieusement (fallback) si gemma4 / replay indisponibles.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
session_id: str = "",
|
||||
task_planner: Any = None,
|
||||
workflows_provider: Optional[Callable[[], List[Dict[str, Any]]]] = None,
|
||||
replay_callback: Optional[Callable[..., str]] = None,
|
||||
status_provider: Optional[Callable[[str], Dict[str, Any]]] = None,
|
||||
machine_id: str = "default",
|
||||
):
|
||||
self.session_id = session_id or f"chat_{uuid.uuid4().hex[:12]}"
|
||||
self.machine_id = machine_id
|
||||
self.created_at = time.time()
|
||||
self.updated_at = self.created_at
|
||||
|
||||
self._task_planner = task_planner
|
||||
self._workflows_provider = workflows_provider
|
||||
self._replay_callback = replay_callback
|
||||
self._status_provider = status_provider
|
||||
|
||||
self._state: str = STATE_IDLE
|
||||
self._messages: List[ChatMessage] = []
|
||||
self._pending_plan: Any = None # TaskPlan en attente de confirmation
|
||||
self._active_replay_id: str = "" # Replay courant (si executing)
|
||||
self._last_progress: Dict[str, Any] = {}
|
||||
|
||||
self._lock = threading.RLock()
|
||||
|
||||
# Message d'accueil
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"Bonjour ! Je suis Léa. Dites-moi ce que vous voulez que je fasse.",
|
||||
meta={"welcome": True},
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Accesseurs
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
@property
|
||||
def state(self) -> str:
|
||||
with self._lock:
|
||||
return self._state
|
||||
|
||||
def get_history(self) -> List[Dict[str, Any]]:
|
||||
"""Retourne l'historique complet des messages (sérialisé)."""
|
||||
with self._lock:
|
||||
return [m.to_dict() for m in self._messages]
|
||||
|
||||
def get_snapshot(self) -> Dict[str, Any]:
|
||||
"""État complet pour l'UI (historique + état + progression)."""
|
||||
with self._lock:
|
||||
return {
|
||||
"session_id": self.session_id,
|
||||
"state": self._state,
|
||||
"machine_id": self.machine_id,
|
||||
"created_at": self.created_at,
|
||||
"updated_at": self.updated_at,
|
||||
"messages": [m.to_dict() for m in self._messages],
|
||||
"pending_plan": (
|
||||
self._pending_plan.to_dict()
|
||||
if self._pending_plan is not None
|
||||
else None
|
||||
),
|
||||
"active_replay_id": self._active_replay_id,
|
||||
"progress": dict(self._last_progress),
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# API publique
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def send_message(self, text: str) -> Dict[str, Any]:
|
||||
"""Envoyer un message utilisateur.
|
||||
|
||||
Trois cas possibles selon l'état courant :
|
||||
1. awaiting_confirmation → c'est une réponse OUI/NON
|
||||
2. executing → on rafraîchit la progression
|
||||
3. idle/done/error → nouvelle instruction, on appelle TaskPlanner
|
||||
"""
|
||||
text = (text or "").strip()
|
||||
if not text:
|
||||
return {
|
||||
"ok": False,
|
||||
"error": "Message vide",
|
||||
"state": self._state,
|
||||
}
|
||||
|
||||
with self._lock:
|
||||
# Cas 1 : on attend une confirmation
|
||||
if self._state == STATE_AWAITING_CONFIRMATION:
|
||||
return self._handle_confirmation_reply(text)
|
||||
|
||||
# Cas 2 : en pleine exécution → message ajouté mais pas d'action
|
||||
if self._state == STATE_EXECUTING:
|
||||
self._append(ROLE_USER, text)
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"Je suis en train d'exécuter le workflow. Un instant…",
|
||||
)
|
||||
return {"ok": True, "state": self._state}
|
||||
|
||||
# Cas 3 : nouvelle instruction
|
||||
self._append(ROLE_USER, text)
|
||||
self._set_state(STATE_PLANNING)
|
||||
|
||||
# Appel TaskPlanner hors du lock (peut être lent : gemma4)
|
||||
return self._plan_and_reply(text)
|
||||
|
||||
def confirm(self, confirmed: bool = True) -> Dict[str, Any]:
|
||||
"""Confirmer (ou refuser) l'exécution du plan en attente."""
|
||||
with self._lock:
|
||||
if self._state != STATE_AWAITING_CONFIRMATION:
|
||||
return {
|
||||
"ok": False,
|
||||
"error": f"Pas de plan en attente (état={self._state})",
|
||||
"state": self._state,
|
||||
}
|
||||
|
||||
if not confirmed:
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"D'accord, j'annule. Dites-moi autre chose quand vous voulez.",
|
||||
)
|
||||
self._pending_plan = None
|
||||
self._set_state(STATE_IDLE)
|
||||
return {"ok": True, "state": self._state, "confirmed": False}
|
||||
|
||||
plan = self._pending_plan
|
||||
if plan is None:
|
||||
self._set_state(STATE_IDLE)
|
||||
return {
|
||||
"ok": False,
|
||||
"error": "Aucun plan à confirmer",
|
||||
"state": self._state,
|
||||
}
|
||||
|
||||
self._set_state(STATE_EXECUTING)
|
||||
|
||||
# Exécution hors du lock
|
||||
return self._execute_plan(plan)
|
||||
|
||||
def refresh_progress(self) -> Dict[str, Any]:
|
||||
"""Rafraîchir la progression du replay en cours.
|
||||
|
||||
Appelé par le client (polling) pour obtenir les updates d'exécution.
|
||||
Si le replay est terminé, passe l'état à done.
|
||||
"""
|
||||
with self._lock:
|
||||
if self._state != STATE_EXECUTING or not self._active_replay_id:
|
||||
return {"ok": True, "state": self._state, "progress": self._last_progress}
|
||||
|
||||
replay_id = self._active_replay_id
|
||||
provider = self._status_provider
|
||||
|
||||
if provider is None:
|
||||
return {"ok": True, "state": self._state, "progress": {}}
|
||||
|
||||
try:
|
||||
status = provider(replay_id) or {}
|
||||
except Exception as e:
|
||||
logger.warning(f"ChatSession: status_provider erreur: {e}")
|
||||
status = {}
|
||||
|
||||
with self._lock:
|
||||
self._last_progress = status
|
||||
self.updated_at = time.time()
|
||||
|
||||
# Détection de fin
|
||||
replay_status = str(status.get("status", "")).lower()
|
||||
completed = status.get("completed_actions", 0)
|
||||
total = status.get("total_actions", 0)
|
||||
|
||||
if replay_status in ("done", "completed", "finished", "success"):
|
||||
summary = (
|
||||
f"Workflow terminé ! {completed}/{total} actions réussies."
|
||||
if total
|
||||
else "Workflow terminé."
|
||||
)
|
||||
self._append(ROLE_LEA, summary, meta={"progress": dict(status)})
|
||||
self._set_state(STATE_DONE)
|
||||
self._active_replay_id = ""
|
||||
elif replay_status in ("failed", "error", "aborted"):
|
||||
err = status.get("error") or status.get("message") or "Erreur inconnue"
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
f"Le workflow a échoué : {err}",
|
||||
meta={"progress": dict(status)},
|
||||
)
|
||||
self._set_state(STATE_ERROR)
|
||||
self._active_replay_id = ""
|
||||
elif replay_status == "paused_need_help":
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"Je suis bloquée sur une action, j'ai besoin d'aide…",
|
||||
meta={"progress": dict(status)},
|
||||
)
|
||||
# on reste en executing pour que le TIM puisse reprendre
|
||||
# else : toujours en cours, pas de message
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"state": self._state,
|
||||
"progress": dict(self._last_progress),
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Logique interne
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _plan_and_reply(self, instruction: str) -> Dict[str, Any]:
|
||||
"""Appeler TaskPlanner.understand() et produire une réponse."""
|
||||
plan = None
|
||||
error_msg = ""
|
||||
|
||||
if self._task_planner is None:
|
||||
error_msg = "Planificateur indisponible"
|
||||
else:
|
||||
try:
|
||||
workflows = []
|
||||
if self._workflows_provider is not None:
|
||||
try:
|
||||
workflows = self._workflows_provider() or []
|
||||
except Exception as e:
|
||||
logger.warning(f"ChatSession: workflows_provider erreur: {e}")
|
||||
workflows = []
|
||||
|
||||
plan = self._task_planner.understand(
|
||||
instruction=instruction,
|
||||
available_workflows=workflows,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"ChatSession: TaskPlanner.understand erreur: {e}")
|
||||
error_msg = f"Erreur de compréhension : {e}"
|
||||
|
||||
# Fallback gracieux si pas de plan / gemma4 indisponible
|
||||
if plan is None:
|
||||
with self._lock:
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
f"Désolée, je n'arrive pas à comprendre pour l'instant. {error_msg}".strip(),
|
||||
meta={"error": error_msg},
|
||||
)
|
||||
self._set_state(STATE_ERROR)
|
||||
return {
|
||||
"ok": False,
|
||||
"state": self._state,
|
||||
"error": error_msg,
|
||||
}
|
||||
|
||||
# Plan non compris
|
||||
if not plan.understood:
|
||||
reason = plan.error or "je n'ai pas compris votre demande"
|
||||
with self._lock:
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
(
|
||||
f"Désolée, {reason}. "
|
||||
"Pouvez-vous reformuler ? Je connais les workflows que vous m'avez appris."
|
||||
),
|
||||
meta={"plan": plan.to_dict()},
|
||||
)
|
||||
self._set_state(STATE_ERROR)
|
||||
return {
|
||||
"ok": False,
|
||||
"state": self._state,
|
||||
"plan": plan.to_dict(),
|
||||
"error": reason,
|
||||
}
|
||||
|
||||
# Plan compris → formuler la proposition
|
||||
proposal = self._format_proposal(plan)
|
||||
|
||||
with self._lock:
|
||||
self._pending_plan = plan
|
||||
self._append(ROLE_LEA, proposal, meta={"plan": plan.to_dict()})
|
||||
self._set_state(STATE_AWAITING_CONFIRMATION)
|
||||
return {
|
||||
"ok": True,
|
||||
"state": self._state,
|
||||
"plan": plan.to_dict(),
|
||||
"message": proposal,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _format_proposal(plan: Any) -> str:
|
||||
"""Formuler une proposition en français à partir d'un TaskPlan."""
|
||||
lines = []
|
||||
lines.append(f"J'ai compris : « {plan.instruction} ».")
|
||||
|
||||
if plan.workflow_name:
|
||||
conf_pct = int(round((plan.match_confidence or 0.0) * 100))
|
||||
lines.append(
|
||||
f"Je vais utiliser le workflow « {plan.workflow_name} »"
|
||||
f" (confiance {conf_pct}%)."
|
||||
)
|
||||
elif plan.mode == "free" and plan.steps:
|
||||
lines.append(
|
||||
f"Je n'ai pas de workflow enregistré pour ça, "
|
||||
f"mais j'ai planifié {len(plan.steps)} étape(s) :"
|
||||
)
|
||||
for i, step in enumerate(plan.steps[:5], 1):
|
||||
desc = step.get("description", "") if isinstance(step, dict) else str(step)
|
||||
lines.append(f" {i}. {desc}")
|
||||
if len(plan.steps) > 5:
|
||||
lines.append(f" … et {len(plan.steps) - 5} autre(s) étape(s).")
|
||||
else:
|
||||
lines.append("Je n'ai pas de plan d'action clair pour cette demande.")
|
||||
|
||||
if plan.parameters:
|
||||
params_str = ", ".join(f"{k}={v}" for k, v in plan.parameters.items())
|
||||
lines.append(f"Paramètres détectés : {params_str}.")
|
||||
|
||||
if plan.is_loop:
|
||||
src = plan.loop_source or "éléments à traiter"
|
||||
lines.append(f"Traitement en boucle sur : {src}.")
|
||||
|
||||
lines.append("")
|
||||
lines.append("Est-ce que je peux y aller ? (oui / non)")
|
||||
return "\n".join(lines)
|
||||
|
||||
def _handle_confirmation_reply(self, text: str) -> Dict[str, Any]:
|
||||
"""Interpréter un message utilisateur comme OUI/NON."""
|
||||
self._append(ROLE_USER, text)
|
||||
yes_tokens = {"oui", "yes", "ok", "y", "go", "vas-y", "allez", "allez-y", "confirme", "confirmer", "continue"}
|
||||
no_tokens = {"non", "no", "annule", "annuler", "stop", "arrête", "arrete", "abandonne", "abandonner"}
|
||||
|
||||
t = text.strip().lower().rstrip("!.?")
|
||||
|
||||
if t in yes_tokens or any(t.startswith(tok + " ") for tok in yes_tokens):
|
||||
# Déverrouiller : sortir du lock avant d'exécuter (confirm re-prend le lock)
|
||||
pass
|
||||
elif t in no_tokens or any(t.startswith(tok + " ") for tok in no_tokens):
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"D'accord, j'annule. Dites-moi autre chose quand vous voulez.",
|
||||
)
|
||||
self._pending_plan = None
|
||||
self._set_state(STATE_IDLE)
|
||||
return {"ok": True, "state": self._state, "confirmed": False}
|
||||
else:
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"Je n'ai pas compris votre réponse. Répondez « oui » pour lancer ou « non » pour annuler.",
|
||||
)
|
||||
return {"ok": True, "state": self._state, "needs_clarification": True}
|
||||
|
||||
# Libérer le lock pour confirm() qui le re-prendra
|
||||
plan = self._pending_plan
|
||||
self._pending_plan = None
|
||||
self._set_state(STATE_EXECUTING)
|
||||
# Exécution hors du lock (sortie du with bloc appelant)
|
||||
# Note : _handle_confirmation_reply est appelé sous lock via send_message
|
||||
# On ne peut pas appeler _execute_plan ici sans risque de double-lock.
|
||||
# On relâche le lock via une astuce : on retourne un marqueur et send_message
|
||||
# orchestrera. Ici on appelle directement _execute_plan qui utilise RLock,
|
||||
# donc c'est safe (re-entrant).
|
||||
return self._execute_plan(plan)
|
||||
|
||||
def _execute_plan(self, plan: Any) -> Dict[str, Any]:
|
||||
"""Lancer le replay correspondant au plan."""
|
||||
if plan is None:
|
||||
with self._lock:
|
||||
self._append(ROLE_LEA, "Rien à exécuter.", meta={})
|
||||
self._set_state(STATE_IDLE)
|
||||
return {"ok": False, "state": self._state, "error": "Aucun plan"}
|
||||
|
||||
if self._replay_callback is None:
|
||||
with self._lock:
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"Je ne peux pas exécuter : aucun moteur d'exécution n'est configuré.",
|
||||
)
|
||||
self._set_state(STATE_ERROR)
|
||||
return {
|
||||
"ok": False,
|
||||
"state": self._state,
|
||||
"error": "replay_callback non configuré",
|
||||
}
|
||||
|
||||
# Annoncer le démarrage
|
||||
with self._lock:
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
"C'est parti ! Je lance le workflow…",
|
||||
meta={"plan": plan.to_dict()},
|
||||
)
|
||||
|
||||
# Appeler le callback
|
||||
try:
|
||||
if plan.workflow_match:
|
||||
replay_id = self._replay_callback(
|
||||
session_id=plan.workflow_match,
|
||||
machine_id=self.machine_id,
|
||||
params=plan.parameters,
|
||||
)
|
||||
else:
|
||||
# Mode libre : pas encore branché côté chat (on refuse proprement)
|
||||
replay_id = ""
|
||||
raise RuntimeError(
|
||||
"Mode libre non supporté pour l'instant — "
|
||||
"entraînez un workflow pour cette tâche"
|
||||
)
|
||||
except Exception as e:
|
||||
with self._lock:
|
||||
self._append(
|
||||
ROLE_LEA,
|
||||
f"Je n'ai pas pu lancer le workflow : {e}",
|
||||
meta={"error": str(e)},
|
||||
)
|
||||
self._set_state(STATE_ERROR)
|
||||
return {"ok": False, "state": self._state, "error": str(e)}
|
||||
|
||||
with self._lock:
|
||||
self._active_replay_id = replay_id or ""
|
||||
return {
|
||||
"ok": True,
|
||||
"state": self._state,
|
||||
"replay_id": self._active_replay_id,
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _append(self, role: str, content: str, meta: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Ajouter un message à l'historique (doit être appelé sous lock)."""
|
||||
msg = ChatMessage(role=role, content=content, meta=meta or {})
|
||||
self._messages.append(msg)
|
||||
self.updated_at = msg.timestamp
|
||||
|
||||
def _set_state(self, new_state: str) -> None:
|
||||
"""Changer d'état (doit être appelé sous lock)."""
|
||||
if new_state not in VALID_STATES:
|
||||
raise ValueError(f"État invalide : {new_state}")
|
||||
old = self._state
|
||||
self._state = new_state
|
||||
self.updated_at = time.time()
|
||||
if old != new_state:
|
||||
logger.debug(
|
||||
f"ChatSession {self.session_id}: {old} -> {new_state}"
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ChatManager — registre en mémoire des sessions
|
||||
# =============================================================================
|
||||
|
||||
class ChatManager:
|
||||
"""Registre en mémoire des sessions de chat.
|
||||
|
||||
Thread-safe. Utilisé par l'API FastAPI pour gérer plusieurs
|
||||
conversations simultanées.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
task_planner: Any = None,
|
||||
workflows_provider: Optional[Callable[[], List[Dict[str, Any]]]] = None,
|
||||
replay_callback: Optional[Callable[..., str]] = None,
|
||||
status_provider: Optional[Callable[[str], Dict[str, Any]]] = None,
|
||||
):
|
||||
self._task_planner = task_planner
|
||||
self._workflows_provider = workflows_provider
|
||||
self._replay_callback = replay_callback
|
||||
self._status_provider = status_provider
|
||||
self._sessions: Dict[str, ChatSession] = {}
|
||||
self._lock = threading.RLock()
|
||||
|
||||
def create_session(self, machine_id: str = "default") -> ChatSession:
|
||||
"""Créer une nouvelle session de chat."""
|
||||
session = ChatSession(
|
||||
task_planner=self._task_planner,
|
||||
workflows_provider=self._workflows_provider,
|
||||
replay_callback=self._replay_callback,
|
||||
status_provider=self._status_provider,
|
||||
machine_id=machine_id,
|
||||
)
|
||||
with self._lock:
|
||||
self._sessions[session.session_id] = session
|
||||
logger.info(f"ChatManager: session créée {session.session_id}")
|
||||
return session
|
||||
|
||||
def get_session(self, session_id: str) -> Optional[ChatSession]:
|
||||
with self._lock:
|
||||
return self._sessions.get(session_id)
|
||||
|
||||
def list_sessions(self) -> List[Dict[str, Any]]:
|
||||
with self._lock:
|
||||
return [
|
||||
{
|
||||
"session_id": s.session_id,
|
||||
"state": s.state,
|
||||
"machine_id": s.machine_id,
|
||||
"created_at": s.created_at,
|
||||
"updated_at": s.updated_at,
|
||||
"message_count": len(s.get_history()),
|
||||
}
|
||||
for s in self._sessions.values()
|
||||
]
|
||||
|
||||
def delete_session(self, session_id: str) -> bool:
|
||||
with self._lock:
|
||||
return self._sessions.pop(session_id, None) is not None
|
||||
|
||||
def cleanup_old(self, max_age_s: float = 3600 * 24) -> int:
|
||||
"""Supprimer les sessions inactives depuis max_age_s secondes."""
|
||||
now = time.time()
|
||||
removed = 0
|
||||
with self._lock:
|
||||
to_delete = [
|
||||
sid for sid, s in self._sessions.items()
|
||||
if (now - s.updated_at) > max_age_s
|
||||
]
|
||||
for sid in to_delete:
|
||||
del self._sessions[sid]
|
||||
removed += 1
|
||||
return removed
|
||||
Reference in New Issue
Block a user