feat: chat unifié, GestureCatalog, Copilot, Léa UI, extraction données, vérification replay
Refonte majeure du système Agent Chat et ajout de nombreux modules : - Chat unifié : suppression du dual Workflows/Agent Libre, tout passe par /api/chat avec résolution en 3 niveaux (workflow → geste → "montre-moi") - GestureCatalog : 38 raccourcis clavier universels Windows avec matching sémantique, substitution automatique dans les replays, et endpoint /api/gestures - Mode Copilot : exécution pas-à-pas des workflows avec validation humaine via WebSocket (approve/skip/abort) avant chaque action - Léa UI (agent_v0/lea_ui/) : interface PyQt5 pour Windows avec overlay transparent pour feedback visuel pendant le replay - Data Extraction (core/extraction/) : moteur d'extraction visuelle de données (OCR + VLM → SQLite), avec schémas YAML et export CSV/Excel - ReplayVerifier (agent_v0/server_v1/) : vérification post-action par comparaison de screenshots, avec logique de retry (max 3) - IntentParser durci : meilleur fallback regex, type GREETING, patterns améliorés - Dashboard : nouvelles pages gestures, streaming, extractions - Tests : 63 tests GestureCatalog, 47 tests extraction, corrections tests existants - Dépréciation : /api/agent/plan et /api/agent/execute retournent HTTP 410, suppression du code hardcodé _plan_to_replay_actions Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
23
tests/integration/conftest.py
Normal file
23
tests/integration/conftest.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""Conftest pour les tests d'intégration."""
|
||||
import importlib
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = str(Path(__file__).resolve().parents[2])
|
||||
|
||||
# Forcer ROOT en tête de sys.path pour que le agent_v0 local (rpa_vision_v3)
|
||||
# soit trouvé AVANT le agent_v0 standalone de ~/ai/
|
||||
if ROOT in sys.path:
|
||||
sys.path.remove(ROOT)
|
||||
sys.path.insert(0, ROOT)
|
||||
|
||||
# Si agent_v0 est déjà chargé depuis le mauvais chemin, le remplacer
|
||||
_agent_mod = sys.modules.get("agent_v0")
|
||||
if _agent_mod and not getattr(_agent_mod, "__file__", "").startswith(ROOT):
|
||||
# Supprimer les entrées liées à l'ancien agent_v0
|
||||
to_remove = [k for k in sys.modules if k == "agent_v0" or k.startswith("agent_v0.")]
|
||||
for k in to_remove:
|
||||
del sys.modules[k]
|
||||
|
||||
# Pré-importer le bon agent_v0.server_v1
|
||||
import agent_v0.server_v1 # noqa: F401
|
||||
342
tests/integration/test_client_server_compat.py
Normal file
342
tests/integration/test_client_server_compat.py
Normal file
@@ -0,0 +1,342 @@
|
||||
"""
|
||||
Tests de compatibilité Client (Agent V1) ↔ Serveur (api_stream).
|
||||
|
||||
Vérifie que les payloads envoyés par le TraceStreamer correspondent
|
||||
exactement à ce que l'API serveur attend (formats, champs, endpoints).
|
||||
|
||||
Sans réseau réel : on mocke requests.post et on valide les appels.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, call, patch
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# TraceStreamer ↔ API endpoints
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestStreamerEndpoints:
|
||||
"""Vérifie que le client appelle les bons endpoints."""
|
||||
|
||||
def test_register_endpoint(self):
|
||||
"""start() appelle POST /register avec session_id."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
with patch("agent_v0.agent_v1.network.streamer.requests") as mock_req:
|
||||
mock_req.post.return_value = MagicMock(ok=True)
|
||||
streamer = TraceStreamer("sess_test_001")
|
||||
streamer.start()
|
||||
streamer.stop()
|
||||
|
||||
# Trouver l'appel register
|
||||
register_calls = [
|
||||
c for c in mock_req.post.call_args_list
|
||||
if "/register" in str(c)
|
||||
]
|
||||
assert len(register_calls) >= 1, "register endpoint jamais appelé"
|
||||
_, kwargs = register_calls[0]
|
||||
assert kwargs["params"]["session_id"] == "sess_test_001"
|
||||
|
||||
def test_finalize_endpoint(self):
|
||||
"""stop() appelle POST /finalize avec session_id."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
with patch("agent_v0.agent_v1.network.streamer.requests") as mock_req:
|
||||
mock_req.post.return_value = MagicMock(ok=True, json=lambda: {"status": "ok"})
|
||||
streamer = TraceStreamer("sess_test_002")
|
||||
streamer._server_available = True
|
||||
streamer.running = False
|
||||
streamer._finalize_session()
|
||||
|
||||
finalize_calls = [
|
||||
c for c in mock_req.post.call_args_list
|
||||
if "/finalize" in str(c)
|
||||
]
|
||||
assert len(finalize_calls) >= 1, "finalize endpoint jamais appelé"
|
||||
_, kwargs = finalize_calls[0]
|
||||
assert kwargs["params"]["session_id"] == "sess_test_002"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Payload formats
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestEventPayloadFormat:
|
||||
"""Vérifie que les événements envoyés ont le bon format."""
|
||||
|
||||
def test_event_payload_matches_server_model(self):
|
||||
"""Le payload event doit contenir session_id, timestamp, event."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
captured_payload = {}
|
||||
|
||||
with patch("agent_v0.agent_v1.network.streamer.requests") as mock_req:
|
||||
mock_req.post.return_value = MagicMock(ok=True)
|
||||
|
||||
streamer = TraceStreamer("sess_test_003")
|
||||
streamer._server_available = True
|
||||
|
||||
# Envoyer directement (sans thread)
|
||||
test_event = {
|
||||
"type": "mouse_click",
|
||||
"button": "left",
|
||||
"pos": (500, 300),
|
||||
"timestamp": 1234567890.0,
|
||||
"window": {"title": "Firefox", "app_name": "firefox"},
|
||||
}
|
||||
streamer._send_event(test_event)
|
||||
|
||||
# Vérifier le payload envoyé
|
||||
event_calls = [
|
||||
c for c in mock_req.post.call_args_list
|
||||
if "/event" in str(c)
|
||||
]
|
||||
assert len(event_calls) == 1
|
||||
_, kwargs = event_calls[0]
|
||||
payload = kwargs["json"]
|
||||
|
||||
# Champs requis par le modèle Pydantic StreamEvent du serveur
|
||||
assert "session_id" in payload
|
||||
assert "timestamp" in payload
|
||||
assert "event" in payload
|
||||
assert payload["session_id"] == "sess_test_003"
|
||||
assert isinstance(payload["timestamp"], float)
|
||||
assert payload["event"]["type"] == "mouse_click"
|
||||
|
||||
def test_event_with_window_info(self):
|
||||
"""Le serveur utilise event.window pour last_window_info."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
with patch("agent_v0.agent_v1.network.streamer.requests") as mock_req:
|
||||
mock_req.post.return_value = MagicMock(ok=True)
|
||||
|
||||
streamer = TraceStreamer("sess_test_004")
|
||||
streamer._server_available = True
|
||||
|
||||
event_with_window = {
|
||||
"type": "mouse_click",
|
||||
"window": {"title": "Chrome", "app_name": "chrome"},
|
||||
}
|
||||
streamer._send_event(event_with_window)
|
||||
|
||||
event_calls = [
|
||||
c for c in mock_req.post.call_args_list
|
||||
if "/event" in str(c)
|
||||
]
|
||||
payload = event_calls[0][1]["json"]
|
||||
# Le champ window doit être transmis au serveur
|
||||
assert "window" in payload["event"]
|
||||
assert payload["event"]["window"]["title"] == "Chrome"
|
||||
assert payload["event"]["window"]["app_name"] == "chrome"
|
||||
|
||||
|
||||
class TestImagePayloadFormat:
|
||||
"""Vérifie le format d'envoi des screenshots."""
|
||||
|
||||
def test_image_params_match_server(self, tmp_path):
|
||||
"""L'envoi image utilise les bons params query (session_id, shot_id)."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
# Créer un faux fichier image
|
||||
fake_img = tmp_path / "test.png"
|
||||
fake_img.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 100)
|
||||
|
||||
with patch("agent_v0.agent_v1.network.streamer.requests") as mock_req:
|
||||
mock_req.post.return_value = MagicMock(ok=True)
|
||||
|
||||
streamer = TraceStreamer("sess_test_005")
|
||||
streamer._server_available = True
|
||||
streamer._send_image(str(fake_img), "shot_0001_full")
|
||||
|
||||
img_calls = [
|
||||
c for c in mock_req.post.call_args_list
|
||||
if "/image" in str(c)
|
||||
]
|
||||
assert len(img_calls) == 1
|
||||
_, kwargs = img_calls[0]
|
||||
|
||||
# Vérifier les params query
|
||||
assert kwargs["params"]["session_id"] == "sess_test_005"
|
||||
assert kwargs["params"]["shot_id"] == "shot_0001_full"
|
||||
|
||||
# Vérifier que le fichier est envoyé
|
||||
assert "files" in kwargs
|
||||
assert "file" in kwargs["files"]
|
||||
|
||||
def test_empty_path_ignored(self):
|
||||
"""push_image avec chemin vide ne doit pas enqueue."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
streamer = TraceStreamer("sess_test_006")
|
||||
streamer.push_image("", "heartbeat_empty")
|
||||
assert streamer.queue.empty(), "Chemin vide ne doit pas être enfilé"
|
||||
|
||||
def test_crop_naming_convention(self, tmp_path):
|
||||
"""Le serveur distingue full/crop par '_crop' dans le shot_id."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
fake_img = tmp_path / "crop.png"
|
||||
fake_img.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 50)
|
||||
|
||||
with patch("agent_v0.agent_v1.network.streamer.requests") as mock_req:
|
||||
mock_req.post.return_value = MagicMock(ok=True)
|
||||
|
||||
streamer = TraceStreamer("sess_test_007")
|
||||
streamer._server_available = True
|
||||
|
||||
# Full screenshot
|
||||
streamer._send_image(str(fake_img), "shot_0001_full")
|
||||
# Crop screenshot
|
||||
streamer._send_image(str(fake_img), "shot_0001_crop")
|
||||
|
||||
img_calls = [
|
||||
c for c in mock_req.post.call_args_list
|
||||
if "/image" in str(c)
|
||||
]
|
||||
assert len(img_calls) == 2
|
||||
|
||||
shot_ids = [c[1]["params"]["shot_id"] for c in img_calls]
|
||||
assert "shot_0001_full" in shot_ids
|
||||
assert "shot_0001_crop" in shot_ids
|
||||
|
||||
# Vérifier que le serveur pourra distinguer
|
||||
# (api_stream.py check "_crop" in shot_id)
|
||||
assert "_crop" in "shot_0001_crop"
|
||||
assert "_crop" not in "shot_0001_full"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Server-side validation (StreamEvent model)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestServerModelValidation:
|
||||
"""Vérifie que les payloads client passent la validation Pydantic côté serveur."""
|
||||
|
||||
def test_streamevent_model_accepts_client_payload(self):
|
||||
"""Le payload client est accepté par le modèle StreamEvent du serveur."""
|
||||
import agent_v0.server_v1 # noqa: F401 — force le bon import
|
||||
from agent_v0.server_v1.api_stream import StreamEvent
|
||||
|
||||
# Payload typique envoyé par le client
|
||||
payload = {
|
||||
"session_id": "sess_20260311T100530_abc123",
|
||||
"timestamp": 1741689930.123,
|
||||
"event": {
|
||||
"type": "mouse_click",
|
||||
"button": "left",
|
||||
"pos": [500, 300],
|
||||
"timestamp": 1741689930.123,
|
||||
"window": {"title": "Firefox", "app_name": "firefox"},
|
||||
"screenshot_id": "shot_0001",
|
||||
},
|
||||
}
|
||||
model = StreamEvent(**payload)
|
||||
assert model.session_id == "sess_20260311T100530_abc123"
|
||||
assert model.event["type"] == "mouse_click"
|
||||
assert model.event["window"]["title"] == "Firefox"
|
||||
|
||||
def test_streamevent_heartbeat(self):
|
||||
"""Heartbeat events passent la validation."""
|
||||
import agent_v0.server_v1 # noqa: F401
|
||||
from agent_v0.server_v1.api_stream import StreamEvent
|
||||
|
||||
payload = {
|
||||
"session_id": "sess_heartbeat",
|
||||
"timestamp": 1741689935.0,
|
||||
"event": {
|
||||
"type": "heartbeat",
|
||||
"image": "/tmp/shots/context_1741689935_heartbeat.png",
|
||||
"timestamp": 1741689935.0,
|
||||
},
|
||||
}
|
||||
model = StreamEvent(**payload)
|
||||
assert model.event["type"] == "heartbeat"
|
||||
|
||||
def test_streamevent_window_focus_change(self):
|
||||
"""Window focus change events passent la validation."""
|
||||
import agent_v0.server_v1 # noqa: F401
|
||||
from agent_v0.server_v1.api_stream import StreamEvent
|
||||
|
||||
payload = {
|
||||
"session_id": "sess_focus",
|
||||
"timestamp": 1741689940.0,
|
||||
"event": {
|
||||
"type": "window_focus_change",
|
||||
"from": {"title": "Terminal", "app_name": "gnome-terminal"},
|
||||
"to": {"title": "Firefox", "app_name": "firefox"},
|
||||
"timestamp": 1741689940.0,
|
||||
},
|
||||
}
|
||||
model = StreamEvent(**payload)
|
||||
assert model.event["type"] == "window_focus_change"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Server processes client data correctly
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestServerProcessesClientData:
|
||||
"""Vérifie que le serveur traite correctement les données du client."""
|
||||
|
||||
def test_window_info_extracted_from_event(self):
|
||||
"""Le LiveSessionManager extrait window info des événements."""
|
||||
import agent_v0.server_v1 # noqa: F401
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
|
||||
mgr = LiveSessionManager()
|
||||
# Événement typique envoyé par l'Agent V1
|
||||
mgr.add_event("sess_client", {
|
||||
"type": "mouse_click",
|
||||
"button": "left",
|
||||
"pos": [500, 300],
|
||||
"window": {"title": "Firefox", "app_name": "firefox"},
|
||||
})
|
||||
|
||||
session = mgr.get_session("sess_client")
|
||||
assert session.last_window_info["title"] == "Firefox"
|
||||
assert session.last_window_info["app_name"] == "firefox"
|
||||
|
||||
def test_crop_filtered_in_raw_session(self):
|
||||
"""Les crops sont filtrés lors de la conversion RawSession."""
|
||||
import agent_v0.server_v1 # noqa: F401
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
|
||||
mgr = LiveSessionManager()
|
||||
# Le client envoie full + crop
|
||||
mgr.add_screenshot("sess_raw", "shot_0001_full", "/tmp/full.png")
|
||||
mgr.add_screenshot("sess_raw", "shot_0001_crop", "/tmp/crop.png")
|
||||
|
||||
raw = mgr.to_raw_session("sess_raw")
|
||||
# Seul le full doit apparaître dans RawSession
|
||||
assert len(raw["screenshots"]) == 1
|
||||
assert raw["screenshots"][0]["screenshot_id"] == "shot_0001_full"
|
||||
|
||||
def test_server_failure_tracking(self):
|
||||
"""Le streamer désactive les envois après 10 échecs consécutifs."""
|
||||
from agent_v0.agent_v1.network.streamer import TraceStreamer
|
||||
|
||||
with patch("agent_v0.agent_v1.network.streamer.requests") as mock_req:
|
||||
mock_req.post.return_value = MagicMock(ok=False, status_code=500)
|
||||
|
||||
streamer = TraceStreamer("sess_fail")
|
||||
streamer._server_available = True
|
||||
|
||||
# 10 échecs consécutifs
|
||||
for _ in range(10):
|
||||
streamer._send_event({"type": "test"})
|
||||
|
||||
# Le streamer est toujours _server_available=True car
|
||||
# c'est la boucle _stream_loop qui fait le tracking.
|
||||
# Mais _send_event retourne False
|
||||
assert not streamer._send_event({"type": "test"})
|
||||
254
tests/integration/test_graph_to_visual.py
Normal file
254
tests/integration/test_graph_to_visual.py
Normal file
@@ -0,0 +1,254 @@
|
||||
"""
|
||||
Tests du GraphToVisualConverter — conversion core Workflow → VWB VisualWorkflow.
|
||||
|
||||
Vérifie que le pont inverse (GraphBuilder → VWB) fonctionne correctement :
|
||||
- Chaque WorkflowNode produit un VisualNode avec position, type, ports
|
||||
- Chaque WorkflowEdge produit un VisualEdge avec source/target
|
||||
- L'ordre topologique est respecté (entry → end)
|
||||
- Les métadonnées visuelles (couleurs, labels) sont cohérentes
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
def _make_core_workflow(num_nodes=3):
|
||||
"""Crée un core Workflow minimal pour les tests."""
|
||||
from core.models.workflow_graph import (
|
||||
Workflow,
|
||||
WorkflowNode,
|
||||
WorkflowEdge,
|
||||
Action,
|
||||
TargetSpec,
|
||||
ScreenTemplate,
|
||||
WindowConstraint,
|
||||
TextConstraint,
|
||||
UIConstraint,
|
||||
EmbeddingPrototype,
|
||||
EdgeConstraints,
|
||||
PostConditions,
|
||||
EdgeStats,
|
||||
SafetyRules,
|
||||
WorkflowStats,
|
||||
LearningConfig,
|
||||
)
|
||||
|
||||
nodes = []
|
||||
for i in range(num_nodes):
|
||||
node = WorkflowNode(
|
||||
node_id=f"node_{i}",
|
||||
name=f"Étape {i}",
|
||||
description=f"Description nœud {i}",
|
||||
template=ScreenTemplate(
|
||||
window=WindowConstraint(title_pattern=f"App{i}"),
|
||||
text=TextConstraint(),
|
||||
ui=UIConstraint(),
|
||||
embedding=EmbeddingPrototype(
|
||||
provider="test",
|
||||
vector_id=f"vec_{i}",
|
||||
min_cosine_similarity=0.8,
|
||||
sample_count=1,
|
||||
),
|
||||
),
|
||||
is_entry=(i == 0),
|
||||
is_end=(i == num_nodes - 1),
|
||||
metadata={
|
||||
"visual_type": "click" if i > 0 and i < num_nodes - 1 else ("start" if i == 0 else "end"),
|
||||
"parameters": {"target": f"button_{i}"},
|
||||
},
|
||||
)
|
||||
nodes.append(node)
|
||||
|
||||
edges = []
|
||||
for i in range(num_nodes - 1):
|
||||
edge = WorkflowEdge(
|
||||
edge_id=f"edge_{i}_to_{i+1}",
|
||||
from_node=f"node_{i}",
|
||||
to_node=f"node_{i+1}",
|
||||
action=Action(
|
||||
type="mouse_click",
|
||||
target=TargetSpec(by_text=f"button_{i}"),
|
||||
),
|
||||
constraints=EdgeConstraints(),
|
||||
post_conditions=PostConditions(expected_node=f"node_{i+1}"),
|
||||
stats=EdgeStats(),
|
||||
)
|
||||
edges.append(edge)
|
||||
|
||||
from datetime import datetime
|
||||
now = datetime.now()
|
||||
|
||||
return Workflow(
|
||||
workflow_id="test_wf_001",
|
||||
name="Test Workflow",
|
||||
description="Workflow de test pour conversion",
|
||||
version=1,
|
||||
learning_state="OBSERVATION",
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
entry_nodes=["node_0"],
|
||||
end_nodes=[f"node_{num_nodes - 1}"],
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
safety_rules=SafetyRules(),
|
||||
stats=WorkflowStats(),
|
||||
learning=LearningConfig(),
|
||||
metadata={"tags": ["test"], "source": "test"},
|
||||
)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Tests
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestGraphToVisualConverter:
|
||||
"""Tests de conversion core Workflow → VisualWorkflow."""
|
||||
|
||||
def test_basic_conversion(self):
|
||||
"""Un workflow 3 nodes se convertit sans erreur."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
converter = GraphToVisualConverter()
|
||||
visual = converter.convert(wf)
|
||||
|
||||
assert visual.id == "test_wf_001"
|
||||
assert visual.name == "Test Workflow"
|
||||
assert len(visual.nodes) == 3
|
||||
assert len(visual.edges) == 2
|
||||
|
||||
def test_node_ids_preserved(self):
|
||||
"""Les IDs des nodes sont préservés."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(4)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
visual_ids = {n.id for n in visual.nodes}
|
||||
assert visual_ids == {"node_0", "node_1", "node_2", "node_3"}
|
||||
|
||||
def test_edge_source_target_preserved(self):
|
||||
"""Les edges connectent les bons nodes."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
edge_pairs = [(e.source, e.target) for e in visual.edges]
|
||||
assert ("node_0", "node_1") in edge_pairs
|
||||
assert ("node_1", "node_2") in edge_pairs
|
||||
|
||||
def test_visual_types_inferred(self):
|
||||
"""Les types visuels sont correctement inférés depuis les métadonnées."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
types = {n.id: n.type for n in visual.nodes}
|
||||
assert types["node_0"] == "start"
|
||||
assert types["node_1"] == "click"
|
||||
assert types["node_2"] == "end"
|
||||
|
||||
def test_positions_ordered_vertically(self):
|
||||
"""Les nodes sont positionnés de haut en bas."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(5)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
y_positions = [n.position.y for n in visual.nodes]
|
||||
assert y_positions == sorted(y_positions), "Les nodes doivent être ordonnés verticalement"
|
||||
|
||||
def test_start_node_has_no_input_port(self):
|
||||
"""Le node 'start' n'a pas de port d'entrée."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
start_node = [n for n in visual.nodes if n.type == "start"][0]
|
||||
assert len(start_node.input_ports) == 0
|
||||
assert len(start_node.output_ports) == 1
|
||||
|
||||
def test_end_node_has_no_output_port(self):
|
||||
"""Le node 'end' n'a pas de port de sortie."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
end_node = [n for n in visual.nodes if n.type == "end"][0]
|
||||
assert len(end_node.input_ports) == 1
|
||||
assert len(end_node.output_ports) == 0
|
||||
|
||||
def test_to_dict_roundtrip(self):
|
||||
"""Le VisualWorkflow produit un dict valide et reconstructible."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
d = visual.to_dict()
|
||||
assert d["id"] == "test_wf_001"
|
||||
assert len(d["nodes"]) == 3
|
||||
assert len(d["edges"]) == 2
|
||||
|
||||
# Vérifier que les nodes dict ont les bons champs
|
||||
node0 = d["nodes"][0]
|
||||
assert "id" in node0
|
||||
assert "type" in node0
|
||||
assert "position" in node0
|
||||
|
||||
def test_large_workflow(self):
|
||||
"""Un workflow de 20 nodes se convertit correctement."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(20)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
assert len(visual.nodes) == 20
|
||||
assert len(visual.edges) == 19
|
||||
|
||||
def test_colors_assigned(self):
|
||||
"""Chaque type de node a une couleur."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import GraphToVisualConverter
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
visual = GraphToVisualConverter().convert(wf)
|
||||
|
||||
for node in visual.nodes:
|
||||
assert node.color is not None
|
||||
assert node.color.startswith("#")
|
||||
|
||||
def test_utility_function(self):
|
||||
"""La fonction utilitaire convert_graph_to_visual fonctionne."""
|
||||
sys.path.insert(0, str(Path(_ROOT) / "visual_workflow_builder" / "backend"))
|
||||
from services.graph_to_visual_converter import convert_graph_to_visual
|
||||
|
||||
wf = _make_core_workflow(3)
|
||||
visual = convert_graph_to_visual(wf)
|
||||
|
||||
assert visual.name == "Test Workflow"
|
||||
assert len(visual.nodes) == 3
|
||||
524
tests/integration/test_stream_processor.py
Normal file
524
tests/integration/test_stream_processor.py
Normal file
@@ -0,0 +1,524 @@
|
||||
"""
|
||||
Tests d'intégration pour StreamProcessor + LiveSessionManager + StreamWorker.
|
||||
|
||||
Vérifie le pipeline complet : session → événements → screenshots → workflow.
|
||||
Sans GPU/modèles lourds (mocks pour ScreenAnalyzer et CLIP).
|
||||
"""
|
||||
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
# Garantir que la racine du projet est dans sys.path (nécessaire pour les
|
||||
# imports relatifs de agent_v0.server_v1)
|
||||
_ROOT = str(Path(__file__).resolve().parents[2])
|
||||
if _ROOT not in sys.path:
|
||||
sys.path.insert(0, _ROOT)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir():
|
||||
d = tempfile.mkdtemp(prefix="test_stream_")
|
||||
yield d
|
||||
shutil.rmtree(d, ignore_errors=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def processor(temp_dir):
|
||||
from agent_v0.server_v1.stream_processor import StreamProcessor
|
||||
return StreamProcessor(data_dir=temp_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def worker(temp_dir, processor):
|
||||
from agent_v0.server_v1.worker_stream import StreamWorker
|
||||
return StreamWorker(live_dir=temp_dir, processor=processor)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# LiveSessionManager
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestLiveSessionManager:
|
||||
def test_register_and_get(self):
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
mgr = LiveSessionManager()
|
||||
s = mgr.register_session("sess_001")
|
||||
assert s.session_id == "sess_001"
|
||||
assert mgr.get_session("sess_001") is s
|
||||
|
||||
def test_get_or_create(self):
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
mgr = LiveSessionManager()
|
||||
s1 = mgr.get_or_create("sess_002")
|
||||
s2 = mgr.get_or_create("sess_002")
|
||||
assert s1 is s2
|
||||
|
||||
def test_add_event_updates_window_info(self):
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
mgr = LiveSessionManager()
|
||||
mgr.add_event("sess_003", {
|
||||
"type": "mouse_click",
|
||||
"window": {"title": "Firefox", "app_name": "firefox"},
|
||||
})
|
||||
session = mgr.get_session("sess_003")
|
||||
assert session.last_window_info["title"] == "Firefox"
|
||||
assert len(session.events) == 1
|
||||
|
||||
def test_add_screenshot(self):
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
mgr = LiveSessionManager()
|
||||
mgr.add_screenshot("sess_004", "shot_001", "/tmp/shot_001.png")
|
||||
session = mgr.get_session("sess_004")
|
||||
assert session.shot_paths["shot_001"] == "/tmp/shot_001.png"
|
||||
|
||||
def test_finalize(self):
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
mgr = LiveSessionManager()
|
||||
mgr.register_session("sess_005")
|
||||
session = mgr.finalize("sess_005")
|
||||
assert session.finalized is True
|
||||
|
||||
def test_active_session_count(self):
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
mgr = LiveSessionManager()
|
||||
mgr.register_session("a")
|
||||
mgr.register_session("b")
|
||||
assert mgr.active_session_count == 2
|
||||
mgr.finalize("a")
|
||||
assert mgr.active_session_count == 1
|
||||
|
||||
def test_to_raw_session(self):
|
||||
from agent_v0.server_v1.live_session_manager import LiveSessionManager
|
||||
mgr = LiveSessionManager()
|
||||
mgr.add_event("sess_006", {"type": "click", "timestamp": 1000})
|
||||
mgr.add_screenshot("sess_006", "shot_full_001", "/tmp/full.png")
|
||||
mgr.add_screenshot("sess_006", "shot_001_crop", "/tmp/crop.png")
|
||||
|
||||
raw = mgr.to_raw_session("sess_006")
|
||||
assert raw is not None
|
||||
assert raw["session_id"] == "sess_006"
|
||||
assert len(raw["events"]) == 1
|
||||
# Les crops sont filtrés
|
||||
assert len(raw["screenshots"]) == 1
|
||||
assert raw["screenshots"][0]["screenshot_id"] == "shot_full_001"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# StreamProcessor
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestStreamProcessor:
|
||||
def test_process_event(self, processor):
|
||||
result = processor.process_event("sess_010", {
|
||||
"type": "mouse_click",
|
||||
"timestamp": 1234,
|
||||
"window": {"title": "Chrome", "app_name": "chrome"},
|
||||
})
|
||||
assert result["status"] == "event_recorded"
|
||||
session = processor.session_manager.get_session("sess_010")
|
||||
assert session.last_window_info["title"] == "Chrome"
|
||||
|
||||
def test_process_crop(self, processor):
|
||||
result = processor.process_crop("sess_011", "shot_001_crop", "/tmp/crop.png")
|
||||
assert result["status"] == "crop_stored"
|
||||
|
||||
def test_process_screenshot_no_analyzer(self, processor):
|
||||
"""Sans ScreenAnalyzer, retourne un résultat minimal."""
|
||||
# Forcer l'initialisation sans modèles GPU
|
||||
processor._initialized = True
|
||||
processor._screen_analyzer = None
|
||||
processor._faiss_manager = None
|
||||
|
||||
result = processor.process_screenshot("sess_012", "shot_001", "/tmp/full.png")
|
||||
assert result["shot_id"] == "shot_001"
|
||||
assert result["state_id"] is None # Pas d'analyse
|
||||
assert result["ui_elements_count"] == 0
|
||||
|
||||
@patch("agent_v0.server_v1.stream_processor.StreamProcessor._ensure_initialized")
|
||||
def test_process_screenshot_with_mock_analyzer(self, mock_init, processor):
|
||||
"""Avec un ScreenAnalyzer mocké, vérifie le flux complet."""
|
||||
from core.models.screen_state import (
|
||||
ScreenState, WindowContext, RawLevel,
|
||||
PerceptionLevel, ContextLevel, EmbeddingRef,
|
||||
)
|
||||
|
||||
mock_state = ScreenState(
|
||||
screen_state_id="state_001",
|
||||
timestamp="2026-01-01T00:00:00",
|
||||
session_id="sess_013",
|
||||
window=WindowContext(app_name="test", window_title="Test", screen_resolution=[1920, 1080]),
|
||||
raw=RawLevel(screenshot_path="/tmp/test.png", capture_method="mss", file_size_bytes=0),
|
||||
perception=PerceptionLevel(
|
||||
embedding=EmbeddingRef(provider="test", vector_id="v1", dimensions=512),
|
||||
detected_text=["Bonjour", "Valider"],
|
||||
text_detection_method="mock",
|
||||
confidence_avg=0.9,
|
||||
),
|
||||
context=ContextLevel(),
|
||||
ui_elements=[MagicMock(), MagicMock(), MagicMock()],
|
||||
)
|
||||
|
||||
processor._screen_analyzer = MagicMock()
|
||||
processor._screen_analyzer.analyze.return_value = mock_state
|
||||
processor._faiss_manager = None
|
||||
processor._initialized = True
|
||||
|
||||
result = processor.process_screenshot("sess_013", "shot_full", "/tmp/full.png")
|
||||
assert result["state_id"] == "state_001"
|
||||
assert result["ui_elements_count"] == 3
|
||||
assert result["text_detected"] == 2
|
||||
|
||||
# Le ScreenState est stocké pour le build final
|
||||
assert len(processor._screen_states["sess_013"]) == 1
|
||||
|
||||
def test_finalize_insufficient_data(self, processor):
|
||||
"""Finalisation avec pas assez de données."""
|
||||
processor._initialized = True
|
||||
processor.session_manager.register_session("sess_014")
|
||||
result = processor.finalize_session("sess_014")
|
||||
assert result["status"] == "insufficient_data"
|
||||
|
||||
def test_stats(self, processor):
|
||||
stats = processor.stats
|
||||
assert stats["active_sessions"] == 0
|
||||
assert stats["total_workflows"] == 0
|
||||
assert stats["initialized"] is False
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# StreamWorker
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestStreamWorker:
|
||||
def test_process_event_direct(self, worker):
|
||||
result = worker.process_event_direct("sess_020", {"type": "click"})
|
||||
assert result["status"] == "event_recorded"
|
||||
|
||||
def test_process_crop_direct(self, worker):
|
||||
result = worker.process_crop_direct("sess_021", "crop_001", "/tmp/crop.png")
|
||||
assert result["status"] == "crop_stored"
|
||||
|
||||
def test_stats(self, worker):
|
||||
stats = worker.stats
|
||||
assert "active_sessions" in stats
|
||||
|
||||
def test_poll_reads_events_from_disk(self, worker, temp_dir):
|
||||
"""Le worker lit les événements JSONL depuis le disque."""
|
||||
session_dir = Path(temp_dir) / "test_sess"
|
||||
session_dir.mkdir()
|
||||
event_file = session_dir / "live_events.jsonl"
|
||||
event_file.write_text(
|
||||
json.dumps({"type": "click", "timestamp": 100}) + "\n"
|
||||
+ json.dumps({"type": "key_press", "timestamp": 200}) + "\n"
|
||||
)
|
||||
|
||||
# Simuler un tour de polling
|
||||
worker._check_live_sessions()
|
||||
|
||||
session = worker.processor.session_manager.get_session("test_sess")
|
||||
assert session is not None
|
||||
assert len(session.events) == 2
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# GraphBuilder precomputed_states
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestGraphBuilderPrecomputed:
|
||||
def test_accepts_precomputed_states(self):
|
||||
"""GraphBuilder.build_from_session accepte precomputed_states."""
|
||||
import inspect
|
||||
from core.graph.graph_builder import GraphBuilder
|
||||
sig = inspect.signature(GraphBuilder.build_from_session)
|
||||
assert "precomputed_states" in sig.parameters
|
||||
|
||||
def test_raises_without_screenshots_or_states(self):
|
||||
"""Erreur si ni screenshots ni precomputed_states."""
|
||||
from core.graph.graph_builder import GraphBuilder
|
||||
from core.models.raw_session import RawSession
|
||||
|
||||
builder = GraphBuilder(min_pattern_repetitions=2)
|
||||
session = MagicMock(spec=RawSession)
|
||||
session.screenshots = []
|
||||
session.session_id = "empty"
|
||||
|
||||
with pytest.raises(ValueError, match="no screenshots"):
|
||||
builder.build_from_session(session)
|
||||
|
||||
def test_skips_screen_state_creation_with_precomputed(self):
|
||||
"""Avec precomputed_states, _create_screen_states n'est pas appelé."""
|
||||
from core.graph.graph_builder import GraphBuilder
|
||||
from core.models.raw_session import RawSession
|
||||
|
||||
builder = GraphBuilder(min_pattern_repetitions=2)
|
||||
builder._create_screen_states = MagicMock()
|
||||
|
||||
# Mock du reste du pipeline
|
||||
fake_embedding = np.random.randn(512).astype(np.float32)
|
||||
fake_embedding /= np.linalg.norm(fake_embedding)
|
||||
builder._compute_embeddings = MagicMock(return_value=[fake_embedding, fake_embedding])
|
||||
builder._detect_patterns = MagicMock(return_value={})
|
||||
builder._build_nodes = MagicMock(return_value=[])
|
||||
builder._build_edges = MagicMock(return_value=[])
|
||||
|
||||
session = MagicMock(spec=RawSession)
|
||||
session.session_id = "precomp"
|
||||
session.screenshots = []
|
||||
|
||||
fake_states = [MagicMock(), MagicMock()]
|
||||
builder.build_from_session(session, precomputed_states=fake_states)
|
||||
|
||||
# _create_screen_states ne doit PAS être appelé
|
||||
builder._create_screen_states.assert_not_called()
|
||||
# _compute_embeddings doit recevoir les precomputed states
|
||||
builder._compute_embeddings.assert_called_once_with(fake_states)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Thread-safety de StreamProcessor
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestStreamProcessorThreadSafety:
|
||||
"""Vérifie que les accès concurrents aux dicts internes sont protégés."""
|
||||
|
||||
def test_has_data_lock(self, processor):
|
||||
"""StreamProcessor possède un _data_lock dédié."""
|
||||
assert hasattr(processor, "_data_lock")
|
||||
assert isinstance(processor._data_lock, type(threading.Lock()))
|
||||
|
||||
def test_concurrent_screen_states_access(self, processor):
|
||||
"""Accès concurrent à _screen_states ne lève pas d'erreur."""
|
||||
processor._initialized = True
|
||||
processor._screen_analyzer = None
|
||||
|
||||
errors = []
|
||||
|
||||
def add_states(session_id):
|
||||
try:
|
||||
for i in range(50):
|
||||
with processor._data_lock:
|
||||
if session_id not in processor._screen_states:
|
||||
processor._screen_states[session_id] = []
|
||||
processor._screen_states[session_id].append(f"state_{i}")
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
|
||||
threads = [
|
||||
threading.Thread(target=add_states, args=(f"sess_{t}",))
|
||||
for t in range(5)
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
assert len(errors) == 0
|
||||
assert len(processor._screen_states) == 5
|
||||
|
||||
def test_concurrent_embeddings_access(self, processor):
|
||||
"""Accès concurrent à _embeddings ne lève pas d'erreur."""
|
||||
errors = []
|
||||
|
||||
def add_embeddings(session_id):
|
||||
try:
|
||||
for i in range(50):
|
||||
with processor._data_lock:
|
||||
if session_id not in processor._embeddings:
|
||||
processor._embeddings[session_id] = []
|
||||
processor._embeddings[session_id].append(
|
||||
np.random.randn(512).astype(np.float32)
|
||||
)
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
|
||||
threads = [
|
||||
threading.Thread(target=add_embeddings, args=(f"sess_{t}",))
|
||||
for t in range(5)
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
assert len(errors) == 0
|
||||
assert len(processor._embeddings) == 5
|
||||
|
||||
def test_concurrent_workflows_access(self, processor):
|
||||
"""Accès concurrent à _workflows ne lève pas d'erreur."""
|
||||
errors = []
|
||||
|
||||
def add_workflow(wf_id):
|
||||
try:
|
||||
mock_wf = MagicMock()
|
||||
mock_wf.nodes = [1, 2]
|
||||
mock_wf.edges = [1]
|
||||
with processor._data_lock:
|
||||
processor._workflows[wf_id] = mock_wf
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
|
||||
threads = [
|
||||
threading.Thread(target=add_workflow, args=(f"wf_{t}",))
|
||||
for t in range(10)
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
assert len(errors) == 0
|
||||
assert len(processor._workflows) == 10
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# list_sessions / list_workflows
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestStreamProcessorListMethods:
|
||||
"""Tests pour list_sessions() et list_workflows()."""
|
||||
|
||||
def test_list_sessions_empty(self, processor):
|
||||
result = processor.list_sessions()
|
||||
assert result == []
|
||||
|
||||
def test_list_sessions_with_data(self, processor):
|
||||
processor.session_manager.register_session("sess_ls_1")
|
||||
processor.session_manager.add_event("sess_ls_1", {
|
||||
"type": "click",
|
||||
"window": {"title": "App", "app_name": "app"},
|
||||
})
|
||||
processor.session_manager.add_screenshot("sess_ls_1", "shot_1", "/tmp/s.png")
|
||||
|
||||
with processor._data_lock:
|
||||
processor._screen_states["sess_ls_1"] = ["state_a", "state_b"]
|
||||
processor._embeddings["sess_ls_1"] = [np.zeros(512)]
|
||||
|
||||
sessions = processor.list_sessions()
|
||||
assert len(sessions) == 1
|
||||
s = sessions[0]
|
||||
assert s["session_id"] == "sess_ls_1"
|
||||
assert s["events_count"] == 1
|
||||
assert s["screenshots_count"] == 1
|
||||
assert s["states_count"] == 2
|
||||
assert s["embeddings_count"] == 1
|
||||
assert s["finalized"] is False
|
||||
|
||||
def test_list_sessions_multiple(self, processor):
|
||||
processor.session_manager.register_session("a")
|
||||
processor.session_manager.register_session("b")
|
||||
processor.session_manager.finalize("b")
|
||||
|
||||
sessions = processor.list_sessions()
|
||||
assert len(sessions) == 2
|
||||
by_id = {s["session_id"]: s for s in sessions}
|
||||
assert by_id["a"]["finalized"] is False
|
||||
assert by_id["b"]["finalized"] is True
|
||||
|
||||
def test_list_workflows_empty(self, processor):
|
||||
result = processor.list_workflows()
|
||||
assert result == []
|
||||
|
||||
def test_list_workflows_with_data(self, processor):
|
||||
mock_wf = MagicMock()
|
||||
mock_wf.nodes = [1, 2, 3]
|
||||
mock_wf.edges = [1, 2]
|
||||
mock_wf.name = "test_workflow"
|
||||
with processor._data_lock:
|
||||
processor._workflows["wf_001"] = mock_wf
|
||||
|
||||
workflows = processor.list_workflows()
|
||||
assert len(workflows) == 1
|
||||
wf = workflows[0]
|
||||
assert wf["workflow_id"] == "wf_001"
|
||||
assert wf["nodes"] == 3
|
||||
assert wf["edges"] == 2
|
||||
assert wf["name"] == "test_workflow"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# API endpoints (sessions / workflows)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class TestAPIEndpoints:
|
||||
"""Tests pour les endpoints GET sessions et workflows."""
|
||||
|
||||
@pytest.fixture
|
||||
def client(self, temp_dir):
|
||||
"""Client de test FastAPI."""
|
||||
from fastapi.testclient import TestClient
|
||||
from agent_v0.server_v1 import api_stream
|
||||
from agent_v0.server_v1.stream_processor import StreamProcessor
|
||||
from agent_v0.server_v1.worker_stream import StreamWorker
|
||||
|
||||
# Remplacer le processor global par un processor de test
|
||||
original_processor = api_stream.processor
|
||||
original_worker = api_stream.worker
|
||||
test_processor = StreamProcessor(data_dir=temp_dir)
|
||||
api_stream.processor = test_processor
|
||||
api_stream.worker = StreamWorker(
|
||||
live_dir=temp_dir, processor=test_processor
|
||||
)
|
||||
|
||||
client = TestClient(api_stream.app, raise_server_exceptions=False)
|
||||
yield client, test_processor
|
||||
|
||||
# Restaurer
|
||||
api_stream.processor = original_processor
|
||||
api_stream.worker = original_worker
|
||||
|
||||
def test_get_sessions_empty(self, client):
|
||||
c, _ = client
|
||||
resp = c.get("/api/v1/traces/stream/sessions")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert data["sessions"] == []
|
||||
|
||||
def test_get_sessions_with_data(self, client):
|
||||
c, proc = client
|
||||
proc.session_manager.register_session("api_sess_1")
|
||||
proc.session_manager.add_event("api_sess_1", {"type": "click"})
|
||||
|
||||
resp = c.get("/api/v1/traces/stream/sessions")
|
||||
assert resp.status_code == 200
|
||||
sessions = resp.json()["sessions"]
|
||||
assert len(sessions) == 1
|
||||
assert sessions[0]["session_id"] == "api_sess_1"
|
||||
assert sessions[0]["events_count"] == 1
|
||||
|
||||
def test_get_workflows_empty(self, client):
|
||||
c, _ = client
|
||||
resp = c.get("/api/v1/traces/stream/workflows")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert data["workflows"] == []
|
||||
|
||||
def test_get_workflows_with_data(self, client):
|
||||
c, proc = client
|
||||
mock_wf = MagicMock()
|
||||
mock_wf.nodes = [1, 2]
|
||||
mock_wf.edges = [1]
|
||||
mock_wf.name = "api_test_wf"
|
||||
with proc._data_lock:
|
||||
proc._workflows["wf_api_001"] = mock_wf
|
||||
|
||||
resp = c.get("/api/v1/traces/stream/workflows")
|
||||
assert resp.status_code == 200
|
||||
workflows = resp.json()["workflows"]
|
||||
assert len(workflows) == 1
|
||||
assert workflows[0]["workflow_id"] == "wf_api_001"
|
||||
assert workflows[0]["nodes"] == 2
|
||||
Reference in New Issue
Block a user