feat(analytics): normalise API + contrat explicite get_next_action (Lot A)
Contrat get_next_action() — suppression du None ambigu :
{"status": "selected", "edge": ..., ...}
{"status": "terminal"}
{"status": "blocked", "reason": "no_valid_edge" | ...}
ExecutionLoop dispatche proprement : blocked -> PAUSED + _pause_requested,
terminal -> succès légitime. Rétrocompat défensive (None legacy -> blocked).
Analytics API normalisée (kwargs-only) :
on_execution_complete(duration_ms, status, steps_total|completed|failed)
on_step_complete(duration_ms, ...)
on_recovery_attempt(duration_ms, ...)
Découverte critique : les anciens appels utilisaient des méthodes et champs
inexistants (ExecutionMetrics.duration, metrics_collector.record_execution).
Le code n'avait jamais tourné au runtime — zéro analytics remontée.
L'exception était avalée par le try/except englobant.
58 tests (18 analytics + 11 contrat + 20 ExecutionLoop + 12 edge_scorer
non-régression). Migration complète, pas de pont legacy.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -354,66 +354,306 @@ class WorkflowPipeline:
|
||||
# =========================================================================
|
||||
# Mode MATCHING : Reconnaissance de l'état actuel
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def match_current_state_from_state(
|
||||
self,
|
||||
screen_state: ScreenState,
|
||||
workflow_id: Optional[str] = None,
|
||||
*,
|
||||
min_similarity: float = 0.5,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Matcher un ``ScreenState`` enrichi contre les nodes d'un workflow.
|
||||
|
||||
Lot E — premier vrai matching context-aware. Cette méthode consomme
|
||||
directement le ``ScreenState`` déjà construit par ``ExecutionLoop``
|
||||
(avec ``window_title``, ``detected_text`` et ``ui_elements``
|
||||
renseignés par le ``ScreenAnalyzer``) au lieu de reconstruire un
|
||||
stub vide avec ``window_title="Unknown"``.
|
||||
|
||||
Stratégie :
|
||||
1. Si le ``HierarchicalMatcher`` est disponible ET que le workflow
|
||||
cible est chargeable, on privilégie le matching multi-niveau
|
||||
(fenêtre → région → élément) qui exploite pleinement les
|
||||
``ui_elements`` et le ``window_title``.
|
||||
2. Sinon on retombe sur le matching par embedding via FAISS
|
||||
(même logique que l'ancien ``match_current_state``, mais avec
|
||||
le ``ScreenState`` fourni, pas un stub).
|
||||
|
||||
Args:
|
||||
screen_state: ``ScreenState`` complet (ui_elements + detected_text
|
||||
+ window_info) construit en amont par l'``ExecutionLoop``.
|
||||
workflow_id: ID du workflow cible (tous si None).
|
||||
min_similarity: seuil minimum de confidence pour considérer un
|
||||
match valide. Conserve la sémantique historique (0.5 pour
|
||||
le hiérarchique, 0.85 pour le FAISS fallback).
|
||||
|
||||
Returns:
|
||||
Dict avec ``node_id``, ``workflow_id``, ``confidence`` (+ détails
|
||||
du matching hiérarchique si applicable), ou ``None`` si aucun
|
||||
match ne dépasse le seuil.
|
||||
"""
|
||||
logger.debug(
|
||||
"Matching ScreenState (app=%s, title=%s, ui_elements=%d, "
|
||||
"detected_text=%d)",
|
||||
screen_state.window.app_name,
|
||||
screen_state.window.window_title,
|
||||
len(screen_state.ui_elements),
|
||||
len(screen_state.perception.detected_text),
|
||||
)
|
||||
|
||||
# --- Stratégie 1 : matching hiérarchique si workflow disponible ---
|
||||
if workflow_id:
|
||||
workflow = self.load_workflow(workflow_id)
|
||||
if workflow is not None and getattr(workflow, "nodes", None):
|
||||
try:
|
||||
hier_result = self._match_hierarchical_from_state(
|
||||
screen_state=screen_state,
|
||||
workflow=workflow,
|
||||
workflow_id=workflow_id,
|
||||
min_similarity=min_similarity,
|
||||
)
|
||||
if hier_result is not None:
|
||||
return hier_result
|
||||
except Exception as exc:
|
||||
# Ne jamais casser le matching sur une erreur du
|
||||
# matcher hiérarchique : on retombe sur FAISS.
|
||||
logger.debug(
|
||||
f"Hierarchical matching failed, fallback FAISS: {exc}"
|
||||
)
|
||||
|
||||
# --- Stratégie 2 : fallback embedding + FAISS ---
|
||||
return self._match_via_faiss(
|
||||
screen_state=screen_state,
|
||||
workflow_id=workflow_id,
|
||||
min_similarity=min_similarity,
|
||||
)
|
||||
|
||||
def _match_hierarchical_from_state(
|
||||
self,
|
||||
screen_state: ScreenState,
|
||||
workflow: Workflow,
|
||||
workflow_id: str,
|
||||
min_similarity: float,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Déléguer le matching au ``HierarchicalMatcher`` en extrayant
|
||||
``window_info``, ``detected_elements`` et le screenshot à partir du
|
||||
``ScreenState`` fourni. Factorise la logique de ``match_hierarchical``
|
||||
sans re-ouvrir l'image si ce n'est pas nécessaire.
|
||||
"""
|
||||
# Reconstruire window_info à partir du ScreenState (pas "Unknown")
|
||||
window_info = {
|
||||
"title": screen_state.window.window_title,
|
||||
"app_name": screen_state.window.app_name,
|
||||
"window_title": screen_state.window.window_title,
|
||||
}
|
||||
detected_elements = list(screen_state.ui_elements)
|
||||
|
||||
# Ouvrir le screenshot si nécessaire (le matcher peut en avoir besoin
|
||||
# pour du matching au niveau région). Si le chemin n'existe pas, on
|
||||
# passe None et laisse le matcher travailler avec window + elements.
|
||||
screenshot = None
|
||||
path = screen_state.raw.screenshot_path
|
||||
if path:
|
||||
try:
|
||||
from PIL import Image
|
||||
screenshot = Image.open(path)
|
||||
except Exception as exc:
|
||||
logger.debug(f"Screenshot unavailable for hierarchical match: {exc}")
|
||||
|
||||
# Contexte temporel par workflow
|
||||
if workflow_id not in self._temporal_context:
|
||||
self._temporal_context[workflow_id] = TemporalContext()
|
||||
temporal_context = self._temporal_context[workflow_id]
|
||||
|
||||
result: MatchResult = self.hierarchical_matcher.match(
|
||||
screenshot=screenshot,
|
||||
workflow=workflow,
|
||||
window_info=window_info,
|
||||
detected_elements=detected_elements,
|
||||
temporal_context=temporal_context,
|
||||
)
|
||||
|
||||
if result.confidence < min_similarity:
|
||||
logger.debug(
|
||||
f"Hierarchical match below threshold: {result.confidence:.3f} "
|
||||
f"(min={min_similarity})"
|
||||
)
|
||||
return None
|
||||
|
||||
# Mémoriser le match pour le boost temporel suivant
|
||||
temporal_context.add_match(result.node_id, result.confidence)
|
||||
|
||||
return {
|
||||
"node_id": result.node_id,
|
||||
"workflow_id": workflow_id,
|
||||
"confidence": result.confidence,
|
||||
"window_confidence": result.window_confidence,
|
||||
"region_confidence": result.region_confidence,
|
||||
"element_confidence": result.element_confidence,
|
||||
"temporal_boost": result.temporal_boost,
|
||||
"matched_variant": result.matched_variant,
|
||||
"alternatives": [
|
||||
{"node_id": alt.node_id, "confidence": alt.confidence}
|
||||
for alt in result.alternatives
|
||||
],
|
||||
"match_time_ms": result.match_time_ms,
|
||||
"match_type": "hierarchical",
|
||||
}
|
||||
|
||||
def _match_via_faiss(
|
||||
self,
|
||||
screen_state: ScreenState,
|
||||
workflow_id: Optional[str],
|
||||
min_similarity: float,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Fallback embedding + recherche FAISS. On réutilise le ``ScreenState``
|
||||
fourni (donc ses ``ui_elements`` et son ``window_title`` réels)
|
||||
au lieu d'en recréer un stub.
|
||||
"""
|
||||
# Le seuil FAISS historique était 0.85. On l'honore comme plancher
|
||||
# par défaut mais on respecte un ``min_similarity`` plus permissif
|
||||
# si l'appelant en fournit un (hiérarchique pouvant déjà avoir échoué).
|
||||
threshold = max(min_similarity, 0.85)
|
||||
|
||||
state_embedding = self.embedding_builder.build(screen_state)
|
||||
query_vector = state_embedding.get_vector()
|
||||
|
||||
results = self.faiss_manager.search(query_vector, k=5)
|
||||
if not results:
|
||||
logger.debug("No match found in FAISS")
|
||||
return None
|
||||
|
||||
for result in results:
|
||||
metadata = result.get("metadata", {})
|
||||
result_workflow_id = metadata.get("workflow_id")
|
||||
|
||||
if workflow_id and result_workflow_id != workflow_id:
|
||||
continue
|
||||
|
||||
similarity = result.get("similarity", 0)
|
||||
if similarity >= threshold:
|
||||
return {
|
||||
"node_id": metadata.get("node_id"),
|
||||
"workflow_id": result_workflow_id,
|
||||
"confidence": similarity,
|
||||
"state_embedding_id": state_embedding.embedding_id,
|
||||
"match_type": "faiss",
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
f"Best FAISS match below threshold: "
|
||||
f"{results[0].get('similarity', 0):.3f} (min={threshold})"
|
||||
)
|
||||
return None
|
||||
|
||||
def match_current_state(
|
||||
self,
|
||||
screenshot_path: str,
|
||||
workflow_id: Optional[str] = None,
|
||||
window_title: Optional[str] = None
|
||||
window_title: Optional[str] = None,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Identifier dans quel node se trouve l'écran actuel.
|
||||
|
||||
Identifier dans quel node se trouve l'écran actuel (API legacy).
|
||||
|
||||
Lot E — cette méthode est désormais un **wrapper** de rétrocompat :
|
||||
elle construit un ``ScreenState`` enrichi via ``ScreenAnalyzer``
|
||||
(au lieu d'un stub avec ``window_title="Unknown"``) puis délègue
|
||||
à ``match_current_state_from_state``. Garantit la compat pour les
|
||||
callers externes qui ne manipulent que le chemin du screenshot.
|
||||
|
||||
Args:
|
||||
screenshot_path: Chemin vers le screenshot actuel
|
||||
workflow_id: ID du workflow à matcher (tous si None)
|
||||
window_title: Titre de fenêtre pour contexte
|
||||
|
||||
screenshot_path: Chemin vers le screenshot actuel.
|
||||
workflow_id: ID du workflow à matcher (tous si None).
|
||||
window_title: Titre de fenêtre pour contexte (utilisé comme
|
||||
hint si le ScreenAnalyzer n'est pas disponible).
|
||||
|
||||
Returns:
|
||||
Dict avec node_id, workflow_id, confidence, ou None si pas de match
|
||||
Dict avec ``node_id``, ``workflow_id``, ``confidence``, ou
|
||||
``None`` si pas de match.
|
||||
"""
|
||||
logger.debug(f"Matching screenshot: {screenshot_path}")
|
||||
|
||||
# Créer un ScreenState temporaire
|
||||
|
||||
# Construire un ScreenState enrichi via le ScreenAnalyzer partagé.
|
||||
screen_state = self._build_screen_state_for_matching(
|
||||
screenshot_path=screenshot_path,
|
||||
workflow_id=workflow_id,
|
||||
window_title=window_title,
|
||||
)
|
||||
|
||||
return self.match_current_state_from_state(
|
||||
screen_state=screen_state,
|
||||
workflow_id=workflow_id,
|
||||
)
|
||||
|
||||
def _build_screen_state_for_matching(
|
||||
self,
|
||||
screenshot_path: str,
|
||||
workflow_id: Optional[str],
|
||||
window_title: Optional[str],
|
||||
) -> ScreenState:
|
||||
"""
|
||||
Construire un ``ScreenState`` pour l'API legacy ``match_current_state``.
|
||||
|
||||
Tente d'utiliser le ``ScreenAnalyzer`` partagé ; en cas d'échec,
|
||||
retombe sur un stub minimaliste (équivalent fonctionnel de l'ancien
|
||||
comportement, mais clairement isolé ici).
|
||||
"""
|
||||
from core.models.screen_state import (
|
||||
WindowContext, RawLevel, PerceptionLevel, ContextLevel, EmbeddingRef
|
||||
)
|
||||
|
||||
screenshot_path = Path(screenshot_path)
|
||||
|
||||
|
||||
path = Path(screenshot_path)
|
||||
|
||||
# Tentative 1 : ScreenAnalyzer partagé (résultat enrichi)
|
||||
try:
|
||||
from core.pipeline import get_screen_analyzer
|
||||
analyzer = get_screen_analyzer()
|
||||
if analyzer is not None:
|
||||
window_info = None
|
||||
if window_title:
|
||||
window_info = {"title": window_title, "app_name": "unknown"}
|
||||
return analyzer.analyze(
|
||||
str(path),
|
||||
window_info=window_info,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug(
|
||||
f"ScreenAnalyzer unavailable in match_current_state wrapper: {exc}"
|
||||
)
|
||||
|
||||
# Tentative 2 : stub minimal (comportement legacy d'urgence)
|
||||
window = WindowContext(
|
||||
app_name="unknown",
|
||||
window_title=window_title or "Unknown",
|
||||
screen_resolution=[1920, 1080],
|
||||
workspace="main"
|
||||
workspace="main",
|
||||
)
|
||||
|
||||
raw = RawLevel(
|
||||
screenshot_path=str(screenshot_path),
|
||||
screenshot_path=str(path),
|
||||
capture_method="manual",
|
||||
file_size_bytes=screenshot_path.stat().st_size if screenshot_path.exists() else 0
|
||||
file_size_bytes=path.stat().st_size if path.exists() else 0,
|
||||
)
|
||||
|
||||
perception = PerceptionLevel(
|
||||
embedding=EmbeddingRef(
|
||||
provider="openclip_ViT-B-32",
|
||||
vector_id="temp",
|
||||
dimensions=512
|
||||
dimensions=512,
|
||||
),
|
||||
detected_text=[],
|
||||
text_detection_method="pending",
|
||||
confidence_avg=0.0
|
||||
confidence_avg=0.0,
|
||||
)
|
||||
|
||||
context = ContextLevel(
|
||||
current_workflow_candidate=workflow_id,
|
||||
workflow_step=None,
|
||||
user_id="matcher",
|
||||
tags=[],
|
||||
business_variables={}
|
||||
business_variables={},
|
||||
)
|
||||
|
||||
current_state = ScreenState(
|
||||
return ScreenState(
|
||||
screen_state_id=f"match_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
||||
timestamp=datetime.now(),
|
||||
session_id="matching",
|
||||
@@ -421,39 +661,8 @@ class WorkflowPipeline:
|
||||
raw=raw,
|
||||
perception=perception,
|
||||
context=context,
|
||||
ui_elements=[]
|
||||
ui_elements=[],
|
||||
)
|
||||
|
||||
# Calculer embedding
|
||||
state_embedding = self.embedding_builder.build(current_state)
|
||||
query_vector = state_embedding.get_vector()
|
||||
|
||||
# Rechercher dans FAISS
|
||||
results = self.faiss_manager.search(query_vector, k=5)
|
||||
|
||||
if not results:
|
||||
logger.debug("No match found in FAISS")
|
||||
return None
|
||||
|
||||
# Filtrer par workflow si spécifié
|
||||
for result in results:
|
||||
metadata = result.get("metadata", {})
|
||||
result_workflow_id = metadata.get("workflow_id")
|
||||
|
||||
if workflow_id and result_workflow_id != workflow_id:
|
||||
continue
|
||||
|
||||
similarity = result.get("similarity", 0)
|
||||
if similarity >= 0.85: # Seuil de matching
|
||||
return {
|
||||
"node_id": metadata.get("node_id"),
|
||||
"workflow_id": result_workflow_id,
|
||||
"confidence": similarity,
|
||||
"state_embedding_id": state_embedding.embedding_id
|
||||
}
|
||||
|
||||
logger.debug(f"Best match below threshold: {results[0].get('similarity', 0):.3f}")
|
||||
return None
|
||||
|
||||
def match_hierarchical(
|
||||
self,
|
||||
@@ -548,17 +757,56 @@ class WorkflowPipeline:
|
||||
def get_next_action(
|
||||
self,
|
||||
workflow_id: str,
|
||||
current_node_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
current_node_id: str,
|
||||
screen_state: Optional[ScreenState] = None,
|
||||
strategy: str = "best",
|
||||
source_similarity: float = 1.0,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Obtenir la prochaine action à exécuter.
|
||||
|
||||
|
||||
Contrat normalisé (Lot A — avril 2026) : retourne **toujours** un
|
||||
dict avec une clé ``status`` non-ambiguë. Le ``None`` ambigu qui
|
||||
confondait "workflow terminé" et "aucun edge valide" a été
|
||||
supprimé : l'appelant (ExecutionLoop) peut désormais distinguer
|
||||
ces cas pour déclencher une pause supervisée plutôt qu'une fin
|
||||
de workflow faux-positive.
|
||||
|
||||
Sélection d'edge (C3) :
|
||||
- Filtre dur sur ``pre_conditions`` (EdgeConstraints)
|
||||
- Ranking par score composite (success_rate, target_match, recency)
|
||||
- Tiebreak : success_rate le plus haut
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
current_node_id: ID du node actuel
|
||||
|
||||
screen_state: État courant, requis pour évaluer les
|
||||
``pre_conditions`` et le match ``target_spec``. Si None,
|
||||
fallback sur la logique sans filtre de contraintes.
|
||||
strategy: ``"best"`` (défaut, scoring complet) ou ``"first"``
|
||||
(mode legacy, premier edge sans tri)
|
||||
source_similarity: confiance du matching (``match_current_state``)
|
||||
qui a identifié ``current_node_id``. Propagée à l'EdgeScorer
|
||||
pour activer la précondition ``min_source_similarity`` des
|
||||
edges. Défaut ``1.0`` pour compat avec les appelants qui
|
||||
ne la fournissent pas encore (Lot B — avril 2026).
|
||||
|
||||
Returns:
|
||||
Dict avec action, target_node, confidence, ou None
|
||||
Dict avec l'une des formes suivantes :
|
||||
|
||||
- ``{"status": "selected", "edge_id": str, "action": dict,
|
||||
"target_node": str, "confidence": float, "score": float}``
|
||||
→ edge sélectionné, l'ExecutionLoop doit l'exécuter.
|
||||
|
||||
- ``{"status": "terminal"}`` → le node courant n'a pas
|
||||
d'outgoing_edge (fin légitime de workflow).
|
||||
|
||||
- ``{"status": "blocked", "reason": str}`` → il existe des
|
||||
outgoing_edges mais aucun ne satisfait les conditions
|
||||
(``reason="no_valid_edge"``), ou le workflow est introuvable
|
||||
(``reason="workflow_not_found"``). L'ExecutionLoop doit
|
||||
déclencher une pause supervisée et ne **jamais** traiter
|
||||
ce cas comme un succès.
|
||||
"""
|
||||
workflow = self._workflows.get(workflow_id)
|
||||
if not workflow:
|
||||
@@ -569,23 +817,44 @@ class WorkflowPipeline:
|
||||
self._workflows[workflow_id] = workflow
|
||||
else:
|
||||
logger.error(f"Workflow not found: {workflow_id}")
|
||||
return None
|
||||
|
||||
return {"status": "blocked", "reason": "workflow_not_found"}
|
||||
|
||||
# Trouver les edges sortants du node actuel
|
||||
outgoing_edges = workflow.get_outgoing_edges(current_node_id)
|
||||
|
||||
|
||||
if not outgoing_edges:
|
||||
# Aucun outgoing_edge = fin légitime du workflow
|
||||
logger.info(f"No outgoing edges from node {current_node_id}")
|
||||
return None
|
||||
|
||||
# Pour l'instant, prendre le premier edge (TODO: logique de sélection)
|
||||
edge = outgoing_edges[0]
|
||||
|
||||
return {"status": "terminal"}
|
||||
|
||||
# Sélection robuste via EdgeScorer (C3)
|
||||
from core.pipeline.edge_scorer import EdgeScorer
|
||||
|
||||
scorer = EdgeScorer()
|
||||
edge = scorer.select_best(
|
||||
outgoing_edges,
|
||||
screen_state=screen_state,
|
||||
strategy=strategy,
|
||||
source_similarity=source_similarity,
|
||||
)
|
||||
|
||||
if edge is None:
|
||||
# Il y avait des candidats mais aucun n'a passé les filtres.
|
||||
# On NE retourne PAS "terminal" : l'ExecutionLoop doit traiter
|
||||
# ce cas comme un blocage et demander de l'aide.
|
||||
logger.warning(
|
||||
f"No valid edge from {current_node_id} "
|
||||
f"({len(outgoing_edges)} candidates rejected)"
|
||||
)
|
||||
return {"status": "blocked", "reason": "no_valid_edge"}
|
||||
|
||||
return {
|
||||
"status": "selected",
|
||||
"edge_id": edge.edge_id,
|
||||
"action": edge.action.to_dict(),
|
||||
"target_node": edge.to_node,
|
||||
"confidence": edge.stats.success_rate if edge.stats else 1.0
|
||||
"confidence": edge.stats.success_rate if edge.stats else 1.0,
|
||||
"score": edge.stats.success_rate if edge.stats else 1.0,
|
||||
}
|
||||
|
||||
def should_execute_automatically(self, workflow_id: str) -> bool:
|
||||
@@ -759,10 +1028,11 @@ class WorkflowPipeline:
|
||||
current_node_id = match_result["node_id"]
|
||||
logger.info(f"Matched current state to node: {current_node_id} (confidence: {match_result['confidence']:.3f})")
|
||||
|
||||
# 2. Obtenir la prochaine action
|
||||
# 2. Obtenir la prochaine action (contrat dict avec status explicite)
|
||||
action_info = self.get_next_action(workflow_id, current_node_id)
|
||||
|
||||
if not action_info:
|
||||
action_status = action_info.get("status")
|
||||
|
||||
if action_status == "terminal":
|
||||
return {
|
||||
"execution_id": execution_id,
|
||||
"workflow_id": workflow_id,
|
||||
@@ -771,9 +1041,21 @@ class WorkflowPipeline:
|
||||
"message": "Workflow completed - no more actions",
|
||||
"current_node": current_node_id,
|
||||
"execution_time_ms": (datetime.now() - start_time).total_seconds() * 1000,
|
||||
"correlation_id": execution_id
|
||||
"correlation_id": execution_id,
|
||||
}
|
||||
|
||||
|
||||
if action_status == "blocked":
|
||||
return {
|
||||
"execution_id": execution_id,
|
||||
"workflow_id": workflow_id,
|
||||
"success": False,
|
||||
"step_type": "action_selection",
|
||||
"error": f"No valid edge: {action_info.get('reason', 'unknown')}",
|
||||
"current_node": current_node_id,
|
||||
"execution_time_ms": (datetime.now() - start_time).total_seconds() * 1000,
|
||||
"correlation_id": execution_id,
|
||||
}
|
||||
|
||||
logger.info(f"Next action: {action_info['action']['type']} -> {action_info['target_node']}")
|
||||
|
||||
# 3. Charger le workflow pour obtenir l'edge complet
|
||||
|
||||
@@ -125,25 +125,47 @@ class WorkflowPipelineEnhanced:
|
||||
current_node_id = match_result["node_id"]
|
||||
logger.info(f"Matched current state to node: {current_node_id} (confidence: {match_result['confidence']:.3f})")
|
||||
|
||||
# 2. Obtenir la prochaine action
|
||||
# 2. Obtenir la prochaine action (contrat dict avec status explicite)
|
||||
action_info = self.get_next_action(workflow_id, current_node_id)
|
||||
|
||||
if not action_info:
|
||||
# Workflow terminé
|
||||
action_status = action_info.get("status")
|
||||
|
||||
if action_status == "terminal":
|
||||
# Workflow terminé (aucun outgoing_edge = fin légitime)
|
||||
performance_metrics.total_execution_time_ms = (datetime.now() - start_time).total_seconds() * 1000
|
||||
|
||||
|
||||
result = WorkflowExecutionResult.workflow_complete(
|
||||
execution_id=execution_id,
|
||||
workflow_id=workflow_id,
|
||||
current_node=current_node_id,
|
||||
performance_metrics=performance_metrics
|
||||
performance_metrics=performance_metrics,
|
||||
)
|
||||
result.correlation_id = correlation_id
|
||||
result.match_result = match_result
|
||||
|
||||
|
||||
logger.info(f"Workflow {workflow_id} completed at node {current_node_id}")
|
||||
return result
|
||||
|
||||
|
||||
if action_status == "blocked":
|
||||
# Des edges existent mais aucun ne passe les filtres :
|
||||
# c'est un blocage, pas une fin de workflow.
|
||||
performance_metrics.total_execution_time_ms = (datetime.now() - start_time).total_seconds() * 1000
|
||||
|
||||
result = WorkflowExecutionResult.error(
|
||||
execution_id=execution_id,
|
||||
workflow_id=workflow_id,
|
||||
error_message=f"No valid edge: {action_info.get('reason', 'unknown')}",
|
||||
step_type="action_selection",
|
||||
current_node=current_node_id,
|
||||
performance_metrics=performance_metrics,
|
||||
)
|
||||
result.correlation_id = correlation_id
|
||||
|
||||
logger.warning(
|
||||
f"Workflow {workflow_id} blocked at node {current_node_id}: "
|
||||
f"{action_info.get('reason')}"
|
||||
)
|
||||
return result
|
||||
|
||||
logger.info(f"Next action: {action_info['action']['type']} -> {action_info['target_node']}")
|
||||
|
||||
# 3. Charger le workflow pour obtenir l'edge complet
|
||||
|
||||
Reference in New Issue
Block a user