v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
184
core/workflow/__init__.py
Normal file
184
core/workflow/__init__.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""
|
||||
Workflow Module - Gestion des workflows et variables
|
||||
|
||||
Ce module contient les outils pour gérer les workflows RPA :
|
||||
- VariableManager : Gestion des variables et paramètres
|
||||
- SemanticMatcher : Matching sémantique des commandes
|
||||
- Composition Models : Modèles pour la composition de workflows
|
||||
"""
|
||||
|
||||
from .variable_manager import (
|
||||
VariableManager,
|
||||
VariableDefinition,
|
||||
create_variable_manager_from_workflow
|
||||
)
|
||||
|
||||
from .semantic_matcher import (
|
||||
SemanticMatcher,
|
||||
WorkflowMatch,
|
||||
WorkflowMetadata,
|
||||
create_semantic_matcher
|
||||
)
|
||||
|
||||
from .composition_models import (
|
||||
# Enums
|
||||
FailureAction,
|
||||
LoopType,
|
||||
ConditionType,
|
||||
TriggerType,
|
||||
ConcurrencyMode,
|
||||
SequencePriority,
|
||||
# Conditions
|
||||
VisualCondition,
|
||||
# Chaînage
|
||||
ChainConfig,
|
||||
# Boucles
|
||||
LoopConfig,
|
||||
LoopState,
|
||||
# Conditions
|
||||
BranchConfig,
|
||||
ConditionalNode,
|
||||
# Triggers
|
||||
ScheduleTrigger,
|
||||
FileTrigger,
|
||||
VisualTrigger,
|
||||
TriggerContext,
|
||||
TriggerConfig,
|
||||
trigger_from_dict,
|
||||
# Sous-workflows
|
||||
ParameterDef,
|
||||
SubWorkflowDefinition,
|
||||
ReferenceNode,
|
||||
# Contexte d'exécution
|
||||
LogEntry,
|
||||
VariableOverride,
|
||||
ExecutionContext,
|
||||
# Validation
|
||||
ValidationResult,
|
||||
# Fusion
|
||||
NodeConflict,
|
||||
MergeCandidate,
|
||||
# Séquences
|
||||
SequenceOccurrence,
|
||||
CommonSequence,
|
||||
# Configuration complète
|
||||
WorkflowCompositionConfig,
|
||||
# Résultats
|
||||
ChainResult,
|
||||
IterationResult,
|
||||
ExecutionResult,
|
||||
)
|
||||
|
||||
from .dependency_graph import (
|
||||
DependencyGraph,
|
||||
CircularDependencyError,
|
||||
UsageStats,
|
||||
)
|
||||
|
||||
from .global_variable_manager import (
|
||||
GlobalVariableManager,
|
||||
)
|
||||
|
||||
from .loop_executor import (
|
||||
LoopExecutor,
|
||||
LoopSafetyLimitError,
|
||||
)
|
||||
|
||||
from .conditional_evaluator import (
|
||||
ConditionalEvaluator,
|
||||
NoMatchingBranchError,
|
||||
ConditionEvaluationError,
|
||||
)
|
||||
|
||||
from .subworkflow_registry import (
|
||||
SubWorkflowRegistry,
|
||||
SubWorkflowNotFoundError,
|
||||
SubWorkflowExecutionError,
|
||||
)
|
||||
|
||||
from .workflow_chainer import (
|
||||
WorkflowChainer,
|
||||
ChainValidationError,
|
||||
ChainExecutionError,
|
||||
)
|
||||
|
||||
from .workflow_merger import WorkflowMerger
|
||||
from .sequence_extractor import SequenceExtractor
|
||||
from .trigger_manager import TriggerManager
|
||||
from .execution_logger import ExecutionLogger
|
||||
|
||||
__all__ = [
|
||||
# Variable Manager
|
||||
'VariableManager',
|
||||
'VariableDefinition',
|
||||
'create_variable_manager_from_workflow',
|
||||
# Semantic Matcher
|
||||
'SemanticMatcher',
|
||||
'WorkflowMatch',
|
||||
'WorkflowMetadata',
|
||||
'create_semantic_matcher',
|
||||
# Composition Enums
|
||||
'FailureAction',
|
||||
'LoopType',
|
||||
'ConditionType',
|
||||
'TriggerType',
|
||||
'ConcurrencyMode',
|
||||
'SequencePriority',
|
||||
# Composition Models
|
||||
'VisualCondition',
|
||||
'ChainConfig',
|
||||
'LoopConfig',
|
||||
'LoopState',
|
||||
'BranchConfig',
|
||||
'ConditionalNode',
|
||||
'ScheduleTrigger',
|
||||
'FileTrigger',
|
||||
'VisualTrigger',
|
||||
'TriggerContext',
|
||||
'TriggerConfig',
|
||||
'trigger_from_dict',
|
||||
'ParameterDef',
|
||||
'SubWorkflowDefinition',
|
||||
'ReferenceNode',
|
||||
'LogEntry',
|
||||
'VariableOverride',
|
||||
'ExecutionContext',
|
||||
'ValidationResult',
|
||||
'NodeConflict',
|
||||
'MergeCandidate',
|
||||
'SequenceOccurrence',
|
||||
'CommonSequence',
|
||||
'WorkflowCompositionConfig',
|
||||
'ChainResult',
|
||||
'IterationResult',
|
||||
'ExecutionResult',
|
||||
# Dependency Graph
|
||||
'DependencyGraph',
|
||||
'CircularDependencyError',
|
||||
'UsageStats',
|
||||
# Global Variable Manager
|
||||
'GlobalVariableManager',
|
||||
# Loop Executor
|
||||
'LoopExecutor',
|
||||
'LoopSafetyLimitError',
|
||||
# Conditional Evaluator
|
||||
'ConditionalEvaluator',
|
||||
'NoMatchingBranchError',
|
||||
'ConditionEvaluationError',
|
||||
# SubWorkflow Registry
|
||||
'SubWorkflowRegistry',
|
||||
'SubWorkflowNotFoundError',
|
||||
'SubWorkflowExecutionError',
|
||||
# Workflow Chainer
|
||||
'WorkflowChainer',
|
||||
'ChainValidationError',
|
||||
'ChainExecutionError',
|
||||
# Workflow Merger
|
||||
'WorkflowMerger',
|
||||
# Sequence Extractor
|
||||
'SequenceExtractor',
|
||||
# Trigger Manager
|
||||
'TriggerManager',
|
||||
# Execution Logger
|
||||
'ExecutionLogger',
|
||||
]
|
||||
986
core/workflow/composition_models.py
Normal file
986
core/workflow/composition_models.py
Normal file
@@ -0,0 +1,986 @@
|
||||
"""
|
||||
Composition Models - Modèles de données pour la composition de workflows
|
||||
|
||||
Ce module contient les dataclasses de configuration pour:
|
||||
- Chaînage de workflows (ChainConfig)
|
||||
- Boucles (LoopConfig, LoopState)
|
||||
- Conditions (ConditionalNode, BranchConfig, VisualCondition)
|
||||
- Déclencheurs (ScheduleTrigger, FileTrigger, VisualTrigger, TriggerContext)
|
||||
- Sous-workflows (SubWorkflowDefinition, ReferenceNode, ParameterDef)
|
||||
- Contexte d'exécution (ExecutionContext, LogEntry)
|
||||
- Validation (ValidationResult)
|
||||
- Fusion (MergeCandidate, NodeConflict)
|
||||
- Séquences (CommonSequence, SequenceOccurrence)
|
||||
|
||||
Chaque classe inclut les méthodes to_dict() et from_dict() pour la sérialisation.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Any, Literal, Union
|
||||
from enum import Enum
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Enums
|
||||
# ============================================================================
|
||||
|
||||
class FailureAction(str, Enum):
|
||||
"""Actions possibles en cas d'échec dans une chaîne"""
|
||||
RETRY = "retry"
|
||||
SKIP = "skip"
|
||||
ABORT = "abort"
|
||||
|
||||
|
||||
class LoopType(str, Enum):
|
||||
"""Types de boucles"""
|
||||
COUNT = "count"
|
||||
CONDITION = "condition"
|
||||
|
||||
|
||||
class ConditionType(str, Enum):
|
||||
"""Types de conditions visuelles"""
|
||||
ELEMENT_PRESENT = "element_present"
|
||||
ELEMENT_ABSENT = "element_absent"
|
||||
TEXT_EQUALS = "text_equals"
|
||||
TEXT_CONTAINS = "text_contains"
|
||||
|
||||
|
||||
class TriggerType(str, Enum):
|
||||
"""Types de déclencheurs"""
|
||||
SCHEDULE = "schedule"
|
||||
FILE = "file"
|
||||
VISUAL = "visual"
|
||||
|
||||
|
||||
class ConcurrencyMode(str, Enum):
|
||||
"""Modes de gestion de la concurrence"""
|
||||
CONCURRENT = "concurrent"
|
||||
QUEUE = "queue"
|
||||
|
||||
|
||||
class SequencePriority(str, Enum):
|
||||
"""Priorité des séquences pour extraction"""
|
||||
HIGH = "high"
|
||||
MEDIUM = "medium"
|
||||
LOW = "low"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Conditions Visuelles
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class VisualCondition:
|
||||
"""
|
||||
Condition basée sur l'état visuel de l'écran.
|
||||
|
||||
Utilisée pour les boucles conditionnelles et les branchements.
|
||||
"""
|
||||
condition_type: str # element_present, element_absent, text_equals, text_contains
|
||||
target_element: Optional[str] = None # ID ou template de l'élément cible
|
||||
expected_text: Optional[str] = None # Texte attendu pour les conditions textuelles
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"condition_type": self.condition_type,
|
||||
"target_element": self.target_element,
|
||||
"expected_text": self.expected_text
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'VisualCondition':
|
||||
return cls(
|
||||
condition_type=data["condition_type"],
|
||||
target_element=data.get("target_element"),
|
||||
expected_text=data.get("expected_text")
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Configuration de Chaînage
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class ChainConfig:
|
||||
"""
|
||||
Configuration de chaînage entre deux workflows.
|
||||
|
||||
Définit comment un workflow se connecte au suivant dans une chaîne.
|
||||
"""
|
||||
source_workflow_id: str
|
||||
target_workflow_id: str
|
||||
variable_mapping: Dict[str, str] = field(default_factory=dict) # source_var -> target_var
|
||||
on_failure: str = "abort" # retry, skip, abort
|
||||
max_retries: int = 3
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"source_workflow_id": self.source_workflow_id,
|
||||
"target_workflow_id": self.target_workflow_id,
|
||||
"variable_mapping": self.variable_mapping,
|
||||
"on_failure": self.on_failure,
|
||||
"max_retries": self.max_retries
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ChainConfig':
|
||||
return cls(
|
||||
source_workflow_id=data["source_workflow_id"],
|
||||
target_workflow_id=data["target_workflow_id"],
|
||||
variable_mapping=data.get("variable_mapping", {}),
|
||||
on_failure=data.get("on_failure", "abort"),
|
||||
max_retries=data.get("max_retries", 3)
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Configuration de Boucles
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class LoopConfig:
|
||||
"""
|
||||
Configuration d'une boucle dans un workflow.
|
||||
|
||||
Supporte les boucles par compteur (count) ou par condition visuelle (condition).
|
||||
"""
|
||||
loop_id: str
|
||||
loop_type: str # count, condition
|
||||
max_iterations: Optional[int] = None # Pour type "count"
|
||||
exit_condition: Optional[VisualCondition] = None # Pour type "condition"
|
||||
body_nodes: List[str] = field(default_factory=list)
|
||||
safety_limit: int = 1000 # Garde de sécurité
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"loop_id": self.loop_id,
|
||||
"loop_type": self.loop_type,
|
||||
"max_iterations": self.max_iterations,
|
||||
"exit_condition": self.exit_condition.to_dict() if self.exit_condition else None,
|
||||
"body_nodes": self.body_nodes,
|
||||
"safety_limit": self.safety_limit
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'LoopConfig':
|
||||
exit_condition = None
|
||||
if data.get("exit_condition"):
|
||||
exit_condition = VisualCondition.from_dict(data["exit_condition"])
|
||||
|
||||
return cls(
|
||||
loop_id=data["loop_id"],
|
||||
loop_type=data["loop_type"],
|
||||
max_iterations=data.get("max_iterations"),
|
||||
exit_condition=exit_condition,
|
||||
body_nodes=data.get("body_nodes", []),
|
||||
safety_limit=data.get("safety_limit", 1000)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoopState:
|
||||
"""
|
||||
État d'exécution d'une boucle.
|
||||
|
||||
Maintient le compteur et l'état courant pendant l'exécution.
|
||||
"""
|
||||
loop_id: str
|
||||
current_iteration: int = 0
|
||||
started_at: datetime = field(default_factory=datetime.now)
|
||||
last_condition_result: Optional[bool] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"loop_id": self.loop_id,
|
||||
"current_iteration": self.current_iteration,
|
||||
"started_at": self.started_at.isoformat(),
|
||||
"last_condition_result": self.last_condition_result
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'LoopState':
|
||||
started_at = datetime.now()
|
||||
if data.get("started_at"):
|
||||
started_at = datetime.fromisoformat(data["started_at"])
|
||||
|
||||
return cls(
|
||||
loop_id=data["loop_id"],
|
||||
current_iteration=data.get("current_iteration", 0),
|
||||
started_at=started_at,
|
||||
last_condition_result=data.get("last_condition_result")
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Configuration de Branchements Conditionnels
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class BranchConfig:
|
||||
"""
|
||||
Configuration d'une branche conditionnelle.
|
||||
|
||||
Définit une condition et le node cible si la condition est vraie.
|
||||
"""
|
||||
branch_id: str
|
||||
condition: VisualCondition
|
||||
target_node: str
|
||||
priority: int = 0 # Ordre d'évaluation (plus petit = évalué en premier)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"branch_id": self.branch_id,
|
||||
"condition": self.condition.to_dict(),
|
||||
"target_node": self.target_node,
|
||||
"priority": self.priority
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'BranchConfig':
|
||||
return cls(
|
||||
branch_id=data["branch_id"],
|
||||
condition=VisualCondition.from_dict(data["condition"]),
|
||||
target_node=data["target_node"],
|
||||
priority=data.get("priority", 0)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConditionalNode:
|
||||
"""
|
||||
Node conditionnel avec plusieurs branches.
|
||||
|
||||
Évalue les branches par ordre de priorité et exécute la première qui match.
|
||||
"""
|
||||
node_id: str
|
||||
branches: List[BranchConfig] = field(default_factory=list)
|
||||
default_branch: Optional[str] = None # Node par défaut si aucune condition ne match
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"node_id": self.node_id,
|
||||
"branches": [b.to_dict() for b in self.branches],
|
||||
"default_branch": self.default_branch
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ConditionalNode':
|
||||
branches = [BranchConfig.from_dict(b) for b in data.get("branches", [])]
|
||||
return cls(
|
||||
node_id=data["node_id"],
|
||||
branches=branches,
|
||||
default_branch=data.get("default_branch")
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Configuration des Déclencheurs
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class ScheduleTrigger:
|
||||
"""
|
||||
Déclencheur basé sur un horaire.
|
||||
|
||||
Supporte les expressions cron ou les intervalles en secondes.
|
||||
"""
|
||||
trigger_id: str
|
||||
workflow_id: str
|
||||
cron_expression: Optional[str] = None
|
||||
interval_seconds: Optional[int] = None
|
||||
enabled: bool = True
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"trigger_id": self.trigger_id,
|
||||
"trigger_type": "schedule",
|
||||
"workflow_id": self.workflow_id,
|
||||
"cron_expression": self.cron_expression,
|
||||
"interval_seconds": self.interval_seconds,
|
||||
"enabled": self.enabled
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ScheduleTrigger':
|
||||
return cls(
|
||||
trigger_id=data["trigger_id"],
|
||||
workflow_id=data["workflow_id"],
|
||||
cron_expression=data.get("cron_expression"),
|
||||
interval_seconds=data.get("interval_seconds"),
|
||||
enabled=data.get("enabled", True)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileTrigger:
|
||||
"""
|
||||
Déclencheur basé sur l'apparition d'un fichier.
|
||||
|
||||
Surveille un répertoire et déclenche le workflow quand un fichier correspondant apparaît.
|
||||
"""
|
||||
trigger_id: str
|
||||
workflow_id: str
|
||||
watch_directory: str
|
||||
file_pattern: str # Pattern glob (ex: "*.csv", "report_*.xlsx")
|
||||
enabled: bool = True
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"trigger_id": self.trigger_id,
|
||||
"trigger_type": "file",
|
||||
"workflow_id": self.workflow_id,
|
||||
"watch_directory": self.watch_directory,
|
||||
"file_pattern": self.file_pattern,
|
||||
"enabled": self.enabled
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'FileTrigger':
|
||||
return cls(
|
||||
trigger_id=data["trigger_id"],
|
||||
workflow_id=data["workflow_id"],
|
||||
watch_directory=data["watch_directory"],
|
||||
file_pattern=data["file_pattern"],
|
||||
enabled=data.get("enabled", True)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VisualTrigger:
|
||||
"""
|
||||
Déclencheur basé sur la détection visuelle.
|
||||
|
||||
Capture périodiquement l'écran et déclenche le workflow quand l'élément cible est détecté.
|
||||
"""
|
||||
trigger_id: str
|
||||
workflow_id: str
|
||||
target_element: str # ID ou template de l'élément à détecter
|
||||
check_interval_seconds: int = 5
|
||||
enabled: bool = True
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"trigger_id": self.trigger_id,
|
||||
"trigger_type": "visual",
|
||||
"workflow_id": self.workflow_id,
|
||||
"target_element": self.target_element,
|
||||
"check_interval_seconds": self.check_interval_seconds,
|
||||
"enabled": self.enabled
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'VisualTrigger':
|
||||
return cls(
|
||||
trigger_id=data["trigger_id"],
|
||||
workflow_id=data["workflow_id"],
|
||||
target_element=data["target_element"],
|
||||
check_interval_seconds=data.get("check_interval_seconds", 5),
|
||||
enabled=data.get("enabled", True)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TriggerContext:
|
||||
"""
|
||||
Contexte passé au workflow lors du déclenchement.
|
||||
|
||||
Contient les informations sur l'événement qui a déclenché le workflow.
|
||||
"""
|
||||
trigger_id: str
|
||||
trigger_type: str # schedule, file, visual
|
||||
fired_at: datetime = field(default_factory=datetime.now)
|
||||
file_path: Optional[str] = None # Pour FileTrigger
|
||||
detected_element: Optional[Dict[str, Any]] = None # Pour VisualTrigger
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"trigger_id": self.trigger_id,
|
||||
"trigger_type": self.trigger_type,
|
||||
"fired_at": self.fired_at.isoformat(),
|
||||
"file_path": self.file_path,
|
||||
"detected_element": self.detected_element
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'TriggerContext':
|
||||
fired_at = datetime.now()
|
||||
if data.get("fired_at"):
|
||||
fired_at = datetime.fromisoformat(data["fired_at"])
|
||||
|
||||
return cls(
|
||||
trigger_id=data["trigger_id"],
|
||||
trigger_type=data["trigger_type"],
|
||||
fired_at=fired_at,
|
||||
file_path=data.get("file_path"),
|
||||
detected_element=data.get("detected_element")
|
||||
)
|
||||
|
||||
|
||||
# Type alias pour les triggers
|
||||
TriggerConfig = Union[ScheduleTrigger, FileTrigger, VisualTrigger]
|
||||
|
||||
|
||||
def trigger_from_dict(data: Dict[str, Any]) -> TriggerConfig:
|
||||
"""Factory pour créer le bon type de trigger depuis un dictionnaire."""
|
||||
trigger_type = data.get("trigger_type", "schedule")
|
||||
|
||||
if trigger_type == "schedule":
|
||||
return ScheduleTrigger.from_dict(data)
|
||||
elif trigger_type == "file":
|
||||
return FileTrigger.from_dict(data)
|
||||
elif trigger_type == "visual":
|
||||
return VisualTrigger.from_dict(data)
|
||||
else:
|
||||
raise ValueError(f"Unknown trigger type: {trigger_type}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Configuration des Sous-Workflows
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class ParameterDef:
|
||||
"""
|
||||
Définition d'un paramètre d'entrée ou de sortie pour un sous-workflow.
|
||||
"""
|
||||
name: str
|
||||
param_type: str = "string" # string, number, boolean, list, dict
|
||||
required: bool = True
|
||||
default_value: Optional[Any] = None
|
||||
description: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"param_type": self.param_type,
|
||||
"required": self.required,
|
||||
"default_value": self.default_value,
|
||||
"description": self.description
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ParameterDef':
|
||||
return cls(
|
||||
name=data["name"],
|
||||
param_type=data.get("param_type", "string"),
|
||||
required=data.get("required", True),
|
||||
default_value=data.get("default_value"),
|
||||
description=data.get("description", "")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SubWorkflowDefinition:
|
||||
"""
|
||||
Définition d'un sous-workflow réutilisable.
|
||||
|
||||
Permet de définir les paramètres d'entrée et de sortie.
|
||||
"""
|
||||
workflow_id: str
|
||||
name: str
|
||||
input_parameters: List[ParameterDef] = field(default_factory=list)
|
||||
output_values: List[ParameterDef] = field(default_factory=list)
|
||||
description: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"workflow_id": self.workflow_id,
|
||||
"name": self.name,
|
||||
"input_parameters": [p.to_dict() for p in self.input_parameters],
|
||||
"output_values": [p.to_dict() for p in self.output_values],
|
||||
"description": self.description
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'SubWorkflowDefinition':
|
||||
input_params = [ParameterDef.from_dict(p) for p in data.get("input_parameters", [])]
|
||||
output_vals = [ParameterDef.from_dict(p) for p in data.get("output_values", [])]
|
||||
|
||||
return cls(
|
||||
workflow_id=data["workflow_id"],
|
||||
name=data["name"],
|
||||
input_parameters=input_params,
|
||||
output_values=output_vals,
|
||||
description=data.get("description", "")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReferenceNode:
|
||||
"""
|
||||
Node qui référence un sous-workflow.
|
||||
|
||||
Permet d'appeler un sous-workflow depuis un workflow parent.
|
||||
"""
|
||||
node_id: str
|
||||
sub_workflow_id: str
|
||||
input_bindings: Dict[str, str] = field(default_factory=dict) # param_name -> variable_name
|
||||
output_bindings: Dict[str, str] = field(default_factory=dict) # param_name -> variable_name
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"node_id": self.node_id,
|
||||
"sub_workflow_id": self.sub_workflow_id,
|
||||
"input_bindings": self.input_bindings,
|
||||
"output_bindings": self.output_bindings
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ReferenceNode':
|
||||
return cls(
|
||||
node_id=data["node_id"],
|
||||
sub_workflow_id=data["sub_workflow_id"],
|
||||
input_bindings=data.get("input_bindings", {}),
|
||||
output_bindings=data.get("output_bindings", {})
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Contexte d'Exécution et Logging
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class LogEntry:
|
||||
"""
|
||||
Entrée du log d'exécution unifié.
|
||||
|
||||
Enregistre un événement pendant l'exécution d'une chaîne de workflows.
|
||||
"""
|
||||
timestamp: datetime
|
||||
workflow_id: str
|
||||
node_id: str
|
||||
event_type: str # start, end, action, error, warning, info
|
||||
details: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"workflow_id": self.workflow_id,
|
||||
"node_id": self.node_id,
|
||||
"event_type": self.event_type,
|
||||
"details": self.details
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'LogEntry':
|
||||
timestamp = datetime.now()
|
||||
if data.get("timestamp"):
|
||||
timestamp = datetime.fromisoformat(data["timestamp"])
|
||||
|
||||
return cls(
|
||||
timestamp=timestamp,
|
||||
workflow_id=data["workflow_id"],
|
||||
node_id=data["node_id"],
|
||||
event_type=data["event_type"],
|
||||
details=data.get("details", {})
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariableOverride:
|
||||
"""
|
||||
Enregistrement d'un override de variable.
|
||||
|
||||
Utilisé pour tracer les conflits de noms de variables.
|
||||
"""
|
||||
variable_name: str
|
||||
old_value: Any
|
||||
new_value: Any
|
||||
source_workflow: str
|
||||
timestamp: datetime = field(default_factory=datetime.now)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"variable_name": self.variable_name,
|
||||
"old_value": self.old_value,
|
||||
"new_value": self.new_value,
|
||||
"source_workflow": self.source_workflow,
|
||||
"timestamp": self.timestamp.isoformat()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'VariableOverride':
|
||||
timestamp = datetime.now()
|
||||
if data.get("timestamp"):
|
||||
timestamp = datetime.fromisoformat(data["timestamp"])
|
||||
|
||||
return cls(
|
||||
variable_name=data["variable_name"],
|
||||
old_value=data.get("old_value"),
|
||||
new_value=data.get("new_value"),
|
||||
source_workflow=data["source_workflow"],
|
||||
timestamp=timestamp
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionContext:
|
||||
"""
|
||||
Contexte d'exécution partagé entre workflows d'une chaîne.
|
||||
|
||||
Maintient l'état global pendant l'exécution.
|
||||
"""
|
||||
chain_id: str
|
||||
current_workflow_id: str
|
||||
global_variables: Dict[str, Any] = field(default_factory=dict)
|
||||
execution_log: List[LogEntry] = field(default_factory=list)
|
||||
trigger_context: Optional[TriggerContext] = None
|
||||
started_at: datetime = field(default_factory=datetime.now)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"chain_id": self.chain_id,
|
||||
"current_workflow_id": self.current_workflow_id,
|
||||
"global_variables": self.global_variables,
|
||||
"execution_log": [e.to_dict() for e in self.execution_log],
|
||||
"trigger_context": self.trigger_context.to_dict() if self.trigger_context else None,
|
||||
"started_at": self.started_at.isoformat()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ExecutionContext':
|
||||
log_entries = [LogEntry.from_dict(e) for e in data.get("execution_log", [])]
|
||||
trigger_ctx = None
|
||||
if data.get("trigger_context"):
|
||||
trigger_ctx = TriggerContext.from_dict(data["trigger_context"])
|
||||
|
||||
started_at = datetime.now()
|
||||
if data.get("started_at"):
|
||||
started_at = datetime.fromisoformat(data["started_at"])
|
||||
|
||||
return cls(
|
||||
chain_id=data["chain_id"],
|
||||
current_workflow_id=data["current_workflow_id"],
|
||||
global_variables=data.get("global_variables", {}),
|
||||
execution_log=log_entries,
|
||||
trigger_context=trigger_ctx,
|
||||
started_at=started_at
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Validation
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""
|
||||
Résultat de validation d'une configuration.
|
||||
|
||||
Utilisé pour valider les chaînes, dépendances, etc.
|
||||
"""
|
||||
is_valid: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"is_valid": self.is_valid,
|
||||
"errors": self.errors,
|
||||
"warnings": self.warnings
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ValidationResult':
|
||||
return cls(
|
||||
is_valid=data["is_valid"],
|
||||
errors=data.get("errors", []),
|
||||
warnings=data.get("warnings", [])
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Fusion de Workflows
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class NodeConflict:
|
||||
"""
|
||||
Conflit entre deux nodes lors d'une fusion.
|
||||
|
||||
Indique que deux workflows ont des actions différentes sur le même node.
|
||||
"""
|
||||
node_id: str
|
||||
action_a: str
|
||||
action_b: str
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"node_id": self.node_id,
|
||||
"action_a": self.action_a,
|
||||
"action_b": self.action_b
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'NodeConflict':
|
||||
return cls(
|
||||
node_id=data["node_id"],
|
||||
action_a=data["action_a"],
|
||||
action_b=data["action_b"]
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MergeCandidate:
|
||||
"""
|
||||
Candidat à la fusion de deux workflows.
|
||||
|
||||
Contient les informations sur la similarité et les conflits potentiels.
|
||||
"""
|
||||
workflow_a_id: str
|
||||
workflow_b_id: str
|
||||
similarity_score: float
|
||||
shared_nodes: List[str] = field(default_factory=list)
|
||||
conflicts: List[NodeConflict] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"workflow_a_id": self.workflow_a_id,
|
||||
"workflow_b_id": self.workflow_b_id,
|
||||
"similarity_score": self.similarity_score,
|
||||
"shared_nodes": self.shared_nodes,
|
||||
"conflicts": [c.to_dict() for c in self.conflicts]
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'MergeCandidate':
|
||||
conflicts = [NodeConflict.from_dict(c) for c in data.get("conflicts", [])]
|
||||
return cls(
|
||||
workflow_a_id=data["workflow_a_id"],
|
||||
workflow_b_id=data["workflow_b_id"],
|
||||
similarity_score=data["similarity_score"],
|
||||
shared_nodes=data.get("shared_nodes", []),
|
||||
conflicts=conflicts
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Extraction de Séquences
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class SequenceOccurrence:
|
||||
"""
|
||||
Occurrence d'une séquence dans un workflow.
|
||||
|
||||
Indique où une séquence commune a été trouvée.
|
||||
"""
|
||||
workflow_id: str
|
||||
start_index: int
|
||||
end_index: int
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"workflow_id": self.workflow_id,
|
||||
"start_index": self.start_index,
|
||||
"end_index": self.end_index
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'SequenceOccurrence':
|
||||
return cls(
|
||||
workflow_id=data["workflow_id"],
|
||||
start_index=data["start_index"],
|
||||
end_index=data["end_index"]
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CommonSequence:
|
||||
"""
|
||||
Séquence commune détectée dans plusieurs workflows.
|
||||
|
||||
Candidate à l'extraction en sous-workflow.
|
||||
"""
|
||||
nodes: List[str]
|
||||
occurrences: List[SequenceOccurrence] = field(default_factory=list)
|
||||
priority: str = "medium" # high, medium, low
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"nodes": self.nodes,
|
||||
"occurrences": [o.to_dict() for o in self.occurrences],
|
||||
"priority": self.priority
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'CommonSequence':
|
||||
occurrences = [SequenceOccurrence.from_dict(o) for o in data.get("occurrences", [])]
|
||||
return cls(
|
||||
nodes=data["nodes"],
|
||||
occurrences=occurrences,
|
||||
priority=data.get("priority", "medium")
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Configuration Complète de Composition
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class WorkflowCompositionConfig:
|
||||
"""
|
||||
Configuration complète de composition pour un ensemble de workflows.
|
||||
|
||||
Regroupe toutes les configurations de chaînage, boucles, conditions, etc.
|
||||
"""
|
||||
chains: List[ChainConfig] = field(default_factory=list)
|
||||
sub_workflows: Dict[str, SubWorkflowDefinition] = field(default_factory=dict)
|
||||
references: Dict[str, List[ReferenceNode]] = field(default_factory=dict)
|
||||
loops: Dict[str, LoopConfig] = field(default_factory=dict)
|
||||
conditionals: Dict[str, ConditionalNode] = field(default_factory=dict)
|
||||
triggers: List[TriggerConfig] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"chains": [c.to_dict() for c in self.chains],
|
||||
"sub_workflows": {k: v.to_dict() for k, v in self.sub_workflows.items()},
|
||||
"references": {k: [r.to_dict() for r in v] for k, v in self.references.items()},
|
||||
"loops": {k: v.to_dict() for k, v in self.loops.items()},
|
||||
"conditionals": {k: v.to_dict() for k, v in self.conditionals.items()},
|
||||
"triggers": [t.to_dict() for t in self.triggers]
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'WorkflowCompositionConfig':
|
||||
chains = [ChainConfig.from_dict(c) for c in data.get("chains", [])]
|
||||
sub_workflows = {
|
||||
k: SubWorkflowDefinition.from_dict(v)
|
||||
for k, v in data.get("sub_workflows", {}).items()
|
||||
}
|
||||
references = {
|
||||
k: [ReferenceNode.from_dict(r) for r in v]
|
||||
for k, v in data.get("references", {}).items()
|
||||
}
|
||||
loops = {
|
||||
k: LoopConfig.from_dict(v)
|
||||
for k, v in data.get("loops", {}).items()
|
||||
}
|
||||
conditionals = {
|
||||
k: ConditionalNode.from_dict(v)
|
||||
for k, v in data.get("conditionals", {}).items()
|
||||
}
|
||||
triggers = [trigger_from_dict(t) for t in data.get("triggers", [])]
|
||||
|
||||
return cls(
|
||||
chains=chains,
|
||||
sub_workflows=sub_workflows,
|
||||
references=references,
|
||||
loops=loops,
|
||||
conditionals=conditionals,
|
||||
triggers=triggers
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Résultats d'Exécution
|
||||
# ============================================================================
|
||||
|
||||
@dataclass
|
||||
class ChainResult:
|
||||
"""
|
||||
Résultat de l'exécution d'une chaîne de workflows.
|
||||
"""
|
||||
chain_id: str
|
||||
success: bool
|
||||
workflows_executed: List[str] = field(default_factory=list)
|
||||
final_variables: Dict[str, Any] = field(default_factory=dict)
|
||||
execution_log: List[LogEntry] = field(default_factory=list)
|
||||
error_message: Optional[str] = None
|
||||
started_at: datetime = field(default_factory=datetime.now)
|
||||
completed_at: Optional[datetime] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"chain_id": self.chain_id,
|
||||
"success": self.success,
|
||||
"workflows_executed": self.workflows_executed,
|
||||
"final_variables": self.final_variables,
|
||||
"execution_log": [e.to_dict() for e in self.execution_log],
|
||||
"error_message": self.error_message,
|
||||
"started_at": self.started_at.isoformat(),
|
||||
"completed_at": self.completed_at.isoformat() if self.completed_at else None
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ChainResult':
|
||||
log_entries = [LogEntry.from_dict(e) for e in data.get("execution_log", [])]
|
||||
|
||||
started_at = datetime.now()
|
||||
if data.get("started_at"):
|
||||
started_at = datetime.fromisoformat(data["started_at"])
|
||||
|
||||
completed_at = None
|
||||
if data.get("completed_at"):
|
||||
completed_at = datetime.fromisoformat(data["completed_at"])
|
||||
|
||||
return cls(
|
||||
chain_id=data["chain_id"],
|
||||
success=data["success"],
|
||||
workflows_executed=data.get("workflows_executed", []),
|
||||
final_variables=data.get("final_variables", {}),
|
||||
execution_log=log_entries,
|
||||
error_message=data.get("error_message"),
|
||||
started_at=started_at,
|
||||
completed_at=completed_at
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class IterationResult:
|
||||
"""
|
||||
Résultat d'une itération de boucle.
|
||||
"""
|
||||
loop_id: str
|
||||
iteration: int
|
||||
success: bool
|
||||
should_continue: bool
|
||||
error_message: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"loop_id": self.loop_id,
|
||||
"iteration": self.iteration,
|
||||
"success": self.success,
|
||||
"should_continue": self.should_continue,
|
||||
"error_message": self.error_message
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'IterationResult':
|
||||
return cls(
|
||||
loop_id=data["loop_id"],
|
||||
iteration=data["iteration"],
|
||||
success=data["success"],
|
||||
should_continue=data["should_continue"],
|
||||
error_message=data.get("error_message")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionResult:
|
||||
"""
|
||||
Résultat générique d'exécution (sous-workflow, action, etc.).
|
||||
"""
|
||||
success: bool
|
||||
output_values: Dict[str, Any] = field(default_factory=dict)
|
||||
error_message: Optional[str] = None
|
||||
error_context: Optional[Dict[str, Any]] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"success": self.success,
|
||||
"output_values": self.output_values,
|
||||
"error_message": self.error_message,
|
||||
"error_context": self.error_context
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ExecutionResult':
|
||||
return cls(
|
||||
success=data["success"],
|
||||
output_values=data.get("output_values", {}),
|
||||
error_message=data.get("error_message"),
|
||||
error_context=data.get("error_context")
|
||||
)
|
||||
259
core/workflow/conditional_evaluator.py
Normal file
259
core/workflow/conditional_evaluator.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Conditional Evaluator - Évaluation des conditions et branchements
|
||||
|
||||
Ce module gère l'évaluation des conditions visuelles et détermine
|
||||
les branches à exécuter dans les workflows conditionnels.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional, Any, Dict
|
||||
|
||||
from .composition_models import (
|
||||
VisualCondition, ConditionalNode, BranchConfig
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NoMatchingBranchError(Exception):
|
||||
"""Erreur levée quand aucune branche ne correspond et pas de défaut."""
|
||||
|
||||
def __init__(self, node_id: str):
|
||||
self.node_id = node_id
|
||||
super().__init__(
|
||||
f"Aucune branche ne correspond pour le node '{node_id}' "
|
||||
f"et aucune branche par défaut n'est définie"
|
||||
)
|
||||
|
||||
|
||||
class ConditionEvaluationError(Exception):
|
||||
"""Erreur lors de l'évaluation d'une condition."""
|
||||
|
||||
def __init__(self, condition: VisualCondition, reason: str):
|
||||
self.condition = condition
|
||||
self.reason = reason
|
||||
super().__init__(f"Erreur d'évaluation de condition: {reason}")
|
||||
|
||||
|
||||
class ConditionalEvaluator:
|
||||
"""
|
||||
Évalue les conditions visuelles et détermine les branches à exécuter.
|
||||
|
||||
Supporte plusieurs types de conditions:
|
||||
- element_present: Vérifie si un élément est présent
|
||||
- element_absent: Vérifie si un élément est absent
|
||||
- text_equals: Vérifie si le texte correspond exactement
|
||||
- text_contains: Vérifie si le texte contient une sous-chaîne
|
||||
"""
|
||||
|
||||
def __init__(self, target_resolver: Optional[Any] = None):
|
||||
"""
|
||||
Initialise l'évaluateur.
|
||||
|
||||
Args:
|
||||
target_resolver: Résolveur de cibles pour la détection visuelle
|
||||
"""
|
||||
self.target_resolver = target_resolver
|
||||
|
||||
def evaluate_condition(
|
||||
self,
|
||||
condition: VisualCondition,
|
||||
screen_state: Optional[Dict[str, Any]] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Évalue une condition visuelle.
|
||||
|
||||
Args:
|
||||
condition: Condition à évaluer
|
||||
screen_state: État de l'écran (éléments détectés, textes, etc.)
|
||||
|
||||
Returns:
|
||||
True si la condition est satisfaite, False sinon
|
||||
"""
|
||||
if screen_state is None:
|
||||
screen_state = {}
|
||||
|
||||
condition_type = condition.condition_type
|
||||
|
||||
if condition_type == "element_present":
|
||||
return self._evaluate_element_present(condition, screen_state)
|
||||
|
||||
elif condition_type == "element_absent":
|
||||
return self._evaluate_element_absent(condition, screen_state)
|
||||
|
||||
elif condition_type == "text_equals":
|
||||
return self._evaluate_text_equals(condition, screen_state)
|
||||
|
||||
elif condition_type == "text_contains":
|
||||
return self._evaluate_text_contains(condition, screen_state)
|
||||
|
||||
else:
|
||||
logger.warning(f"Type de condition inconnu: {condition_type}")
|
||||
return False
|
||||
|
||||
def _evaluate_element_present(
|
||||
self,
|
||||
condition: VisualCondition,
|
||||
screen_state: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Vérifie si un élément est présent."""
|
||||
if not condition.target_element:
|
||||
return False
|
||||
|
||||
elements = screen_state.get("elements", [])
|
||||
element_ids = screen_state.get("element_ids", [])
|
||||
|
||||
# Vérifier par ID
|
||||
if condition.target_element in element_ids:
|
||||
return True
|
||||
|
||||
# Vérifier par nom/type dans les éléments
|
||||
for elem in elements:
|
||||
if isinstance(elem, dict):
|
||||
if elem.get("id") == condition.target_element:
|
||||
return True
|
||||
if elem.get("name") == condition.target_element:
|
||||
return True
|
||||
if elem.get("type") == condition.target_element:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _evaluate_element_absent(
|
||||
self,
|
||||
condition: VisualCondition,
|
||||
screen_state: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Vérifie si un élément est absent."""
|
||||
return not self._evaluate_element_present(condition, screen_state)
|
||||
|
||||
def _evaluate_text_equals(
|
||||
self,
|
||||
condition: VisualCondition,
|
||||
screen_state: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Vérifie si le texte correspond exactement."""
|
||||
if not condition.expected_text:
|
||||
return False
|
||||
|
||||
texts = screen_state.get("texts", [])
|
||||
detected_text = screen_state.get("detected_text", "")
|
||||
|
||||
# Vérifier dans la liste de textes
|
||||
if condition.expected_text in texts:
|
||||
return True
|
||||
|
||||
# Vérifier dans le texte détecté global
|
||||
if detected_text == condition.expected_text:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _evaluate_text_contains(
|
||||
self,
|
||||
condition: VisualCondition,
|
||||
screen_state: Dict[str, Any]
|
||||
) -> bool:
|
||||
"""Vérifie si le texte contient une sous-chaîne."""
|
||||
if not condition.expected_text:
|
||||
return False
|
||||
|
||||
texts = screen_state.get("texts", [])
|
||||
detected_text = screen_state.get("detected_text", "")
|
||||
|
||||
# Vérifier dans la liste de textes
|
||||
for text in texts:
|
||||
if condition.expected_text in text:
|
||||
return True
|
||||
|
||||
# Vérifier dans le texte détecté global
|
||||
if condition.expected_text in detected_text:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def evaluate_node(
|
||||
self,
|
||||
node: ConditionalNode,
|
||||
screen_state: Optional[Dict[str, Any]] = None
|
||||
) -> str:
|
||||
"""
|
||||
Évalue un node conditionnel et retourne l'ID de la branche à exécuter.
|
||||
|
||||
Les branches sont évaluées par ordre de priorité (plus petit = premier).
|
||||
La première branche dont la condition est vraie est sélectionnée.
|
||||
|
||||
Args:
|
||||
node: Node conditionnel à évaluer
|
||||
screen_state: État de l'écran
|
||||
|
||||
Returns:
|
||||
ID du node cible de la branche sélectionnée
|
||||
|
||||
Raises:
|
||||
NoMatchingBranchError: Si aucune branche ne correspond et pas de défaut
|
||||
"""
|
||||
if screen_state is None:
|
||||
screen_state = {}
|
||||
|
||||
# Trier les branches par priorité
|
||||
sorted_branches = sorted(node.branches, key=lambda b: b.priority)
|
||||
|
||||
# Évaluer chaque branche dans l'ordre
|
||||
for branch in sorted_branches:
|
||||
if self.evaluate_condition(branch.condition, screen_state):
|
||||
logger.debug(
|
||||
f"Node '{node.node_id}': branche '{branch.branch_id}' sélectionnée "
|
||||
f"(priorité {branch.priority})"
|
||||
)
|
||||
return branch.target_node
|
||||
|
||||
# Aucune branche ne correspond, utiliser le défaut
|
||||
if node.default_branch:
|
||||
logger.debug(
|
||||
f"Node '{node.node_id}': branche par défaut '{node.default_branch}' sélectionnée"
|
||||
)
|
||||
return node.default_branch
|
||||
|
||||
# Pas de défaut, lever une erreur
|
||||
raise NoMatchingBranchError(node.node_id)
|
||||
|
||||
def evaluate_branches(
|
||||
self,
|
||||
node: ConditionalNode,
|
||||
screen_state: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, bool]:
|
||||
"""
|
||||
Évalue toutes les branches et retourne leurs résultats.
|
||||
|
||||
Utile pour le débogage et la visualisation.
|
||||
|
||||
Args:
|
||||
node: Node conditionnel
|
||||
screen_state: État de l'écran
|
||||
|
||||
Returns:
|
||||
Dictionnaire branch_id -> résultat de la condition
|
||||
"""
|
||||
if screen_state is None:
|
||||
screen_state = {}
|
||||
|
||||
results = {}
|
||||
for branch in node.branches:
|
||||
results[branch.branch_id] = self.evaluate_condition(
|
||||
branch.condition, screen_state
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise la configuration."""
|
||||
return {
|
||||
"has_target_resolver": self.target_resolver is not None
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any], target_resolver: Optional[Any] = None) -> 'ConditionalEvaluator':
|
||||
"""Désérialise la configuration."""
|
||||
return cls(target_resolver=target_resolver)
|
||||
417
core/workflow/dependency_graph.py
Normal file
417
core/workflow/dependency_graph.py
Normal file
@@ -0,0 +1,417 @@
|
||||
"""
|
||||
Dependency Graph - Gestion des dépendances entre workflows
|
||||
|
||||
Ce module gère le graphe de dépendances entre workflows et sous-workflows.
|
||||
Il permet de:
|
||||
- Ajouter/supprimer des dépendances
|
||||
- Détecter les dépendances circulaires
|
||||
- Obtenir les dépendants et dépendances d'un workflow
|
||||
- Calculer les statistiques d'utilisation
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Set, List, Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UsageStats:
|
||||
"""Statistiques d'utilisation d'un workflow."""
|
||||
dependency_count: int = 0 # Nombre de sous-workflows utilisés
|
||||
dependent_count: int = 0 # Nombre de workflows qui l'utilisent
|
||||
total_references: int = 0 # Nombre total de références
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"dependency_count": self.dependency_count,
|
||||
"dependent_count": self.dependent_count,
|
||||
"total_references": self.total_references
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'UsageStats':
|
||||
return cls(
|
||||
dependency_count=data.get("dependency_count", 0),
|
||||
dependent_count=data.get("dependent_count", 0),
|
||||
total_references=data.get("total_references", 0)
|
||||
)
|
||||
|
||||
|
||||
class CircularDependencyError(Exception):
|
||||
"""Erreur levée lors de la détection d'une dépendance circulaire."""
|
||||
|
||||
def __init__(self, cycle: List[str]):
|
||||
self.cycle = cycle
|
||||
cycle_str = " -> ".join(cycle)
|
||||
super().__init__(f"Dépendance circulaire détectée: {cycle_str}")
|
||||
|
||||
|
||||
class DependencyGraph:
|
||||
"""
|
||||
Gère le graphe de dépendances entre workflows.
|
||||
|
||||
Maintient deux vues du graphe:
|
||||
- dependencies: workflow -> sous-workflows qu'il utilise
|
||||
- dependents: sous-workflow -> workflows qui l'utilisent
|
||||
|
||||
Example:
|
||||
>>> graph = DependencyGraph()
|
||||
>>> graph.add_dependency("main_workflow", "login_subworkflow")
|
||||
>>> graph.add_dependency("main_workflow", "logout_subworkflow")
|
||||
>>> graph.get_dependencies("main_workflow")
|
||||
{'login_subworkflow', 'logout_subworkflow'}
|
||||
>>> graph.get_dependents("login_subworkflow")
|
||||
{'main_workflow'}
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# workflow_id -> set of sub_workflow_ids it uses
|
||||
self._dependencies: Dict[str, Set[str]] = {}
|
||||
# sub_workflow_id -> set of workflow_ids that use it
|
||||
self._dependents: Dict[str, Set[str]] = {}
|
||||
# Reference counts for statistics
|
||||
self._reference_counts: Dict[str, int] = {}
|
||||
|
||||
def add_dependency(self, workflow_id: str, sub_workflow_id: str) -> None:
|
||||
"""
|
||||
Ajoute une dépendance entre un workflow et un sous-workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow parent
|
||||
sub_workflow_id: ID du sous-workflow utilisé
|
||||
|
||||
Raises:
|
||||
CircularDependencyError: Si l'ajout créerait une dépendance circulaire
|
||||
"""
|
||||
# Vérifier les dépendances circulaires avant d'ajouter
|
||||
if self.has_circular_dependency(workflow_id, sub_workflow_id):
|
||||
cycle = self._find_cycle(workflow_id, sub_workflow_id)
|
||||
raise CircularDependencyError(cycle)
|
||||
|
||||
# Ajouter la dépendance
|
||||
if workflow_id not in self._dependencies:
|
||||
self._dependencies[workflow_id] = set()
|
||||
self._dependencies[workflow_id].add(sub_workflow_id)
|
||||
|
||||
# Ajouter le dépendant
|
||||
if sub_workflow_id not in self._dependents:
|
||||
self._dependents[sub_workflow_id] = set()
|
||||
self._dependents[sub_workflow_id].add(workflow_id)
|
||||
|
||||
# Incrémenter le compteur de références
|
||||
self._reference_counts[sub_workflow_id] = \
|
||||
self._reference_counts.get(sub_workflow_id, 0) + 1
|
||||
|
||||
logger.debug(f"Dépendance ajoutée: {workflow_id} -> {sub_workflow_id}")
|
||||
|
||||
def remove_dependency(self, workflow_id: str, sub_workflow_id: str) -> bool:
|
||||
"""
|
||||
Supprime une dépendance entre un workflow et un sous-workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow parent
|
||||
sub_workflow_id: ID du sous-workflow
|
||||
|
||||
Returns:
|
||||
True si la dépendance existait et a été supprimée, False sinon
|
||||
"""
|
||||
if workflow_id not in self._dependencies:
|
||||
return False
|
||||
|
||||
if sub_workflow_id not in self._dependencies[workflow_id]:
|
||||
return False
|
||||
|
||||
# Supprimer la dépendance
|
||||
self._dependencies[workflow_id].discard(sub_workflow_id)
|
||||
if not self._dependencies[workflow_id]:
|
||||
del self._dependencies[workflow_id]
|
||||
|
||||
# Supprimer le dépendant
|
||||
if sub_workflow_id in self._dependents:
|
||||
self._dependents[sub_workflow_id].discard(workflow_id)
|
||||
if not self._dependents[sub_workflow_id]:
|
||||
del self._dependents[sub_workflow_id]
|
||||
|
||||
# Décrémenter le compteur de références
|
||||
if sub_workflow_id in self._reference_counts:
|
||||
self._reference_counts[sub_workflow_id] -= 1
|
||||
if self._reference_counts[sub_workflow_id] <= 0:
|
||||
del self._reference_counts[sub_workflow_id]
|
||||
|
||||
logger.debug(f"Dépendance supprimée: {workflow_id} -> {sub_workflow_id}")
|
||||
return True
|
||||
|
||||
|
||||
def get_dependencies(self, workflow_id: str) -> Set[str]:
|
||||
"""
|
||||
Retourne les sous-workflows utilisés par un workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
|
||||
Returns:
|
||||
Set des IDs de sous-workflows utilisés
|
||||
"""
|
||||
return self._dependencies.get(workflow_id, set()).copy()
|
||||
|
||||
def get_dependents(self, workflow_id: str) -> Set[str]:
|
||||
"""
|
||||
Retourne les workflows qui utilisent un sous-workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du sous-workflow
|
||||
|
||||
Returns:
|
||||
Set des IDs de workflows qui l'utilisent
|
||||
"""
|
||||
return self._dependents.get(workflow_id, set()).copy()
|
||||
|
||||
def has_circular_dependency(self, workflow_id: str, sub_workflow_id: str) -> bool:
|
||||
"""
|
||||
Vérifie si l'ajout d'une dépendance créerait un cycle.
|
||||
|
||||
Utilise un parcours DFS pour détecter si sub_workflow_id peut atteindre
|
||||
workflow_id via ses propres dépendances.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow parent
|
||||
sub_workflow_id: ID du sous-workflow à ajouter
|
||||
|
||||
Returns:
|
||||
True si l'ajout créerait un cycle, False sinon
|
||||
"""
|
||||
# Cas trivial: auto-référence
|
||||
if workflow_id == sub_workflow_id:
|
||||
return True
|
||||
|
||||
# DFS pour vérifier si sub_workflow_id peut atteindre workflow_id
|
||||
visited = set()
|
||||
stack = [sub_workflow_id]
|
||||
|
||||
while stack:
|
||||
current = stack.pop()
|
||||
|
||||
if current == workflow_id:
|
||||
return True
|
||||
|
||||
if current in visited:
|
||||
continue
|
||||
|
||||
visited.add(current)
|
||||
|
||||
# Ajouter les dépendances du nœud courant
|
||||
for dep in self._dependencies.get(current, set()):
|
||||
if dep not in visited:
|
||||
stack.append(dep)
|
||||
|
||||
return False
|
||||
|
||||
def _find_cycle(self, workflow_id: str, sub_workflow_id: str) -> List[str]:
|
||||
"""
|
||||
Trouve le chemin du cycle pour le message d'erreur.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow parent
|
||||
sub_workflow_id: ID du sous-workflow
|
||||
|
||||
Returns:
|
||||
Liste des IDs formant le cycle
|
||||
"""
|
||||
if workflow_id == sub_workflow_id:
|
||||
return [workflow_id, workflow_id]
|
||||
|
||||
# BFS pour trouver le chemin le plus court
|
||||
from collections import deque
|
||||
|
||||
queue = deque([(sub_workflow_id, [sub_workflow_id])])
|
||||
visited = set()
|
||||
|
||||
while queue:
|
||||
current, path = queue.popleft()
|
||||
|
||||
if current == workflow_id:
|
||||
return [workflow_id] + path
|
||||
|
||||
if current in visited:
|
||||
continue
|
||||
|
||||
visited.add(current)
|
||||
|
||||
for dep in self._dependencies.get(current, set()):
|
||||
if dep not in visited:
|
||||
queue.append((dep, path + [dep]))
|
||||
|
||||
# Ne devrait pas arriver si has_circular_dependency a retourné True
|
||||
return [workflow_id, sub_workflow_id, workflow_id]
|
||||
|
||||
|
||||
def get_usage_stats(self, workflow_id: str) -> UsageStats:
|
||||
"""
|
||||
Retourne les statistiques d'utilisation d'un workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
|
||||
Returns:
|
||||
UsageStats avec les compteurs de dépendances et dépendants
|
||||
"""
|
||||
return UsageStats(
|
||||
dependency_count=len(self._dependencies.get(workflow_id, set())),
|
||||
dependent_count=len(self._dependents.get(workflow_id, set())),
|
||||
total_references=self._reference_counts.get(workflow_id, 0)
|
||||
)
|
||||
|
||||
def get_all_workflows(self) -> Set[str]:
|
||||
"""
|
||||
Retourne tous les workflows connus dans le graphe.
|
||||
|
||||
Returns:
|
||||
Set de tous les IDs de workflows
|
||||
"""
|
||||
all_ids = set()
|
||||
all_ids.update(self._dependencies.keys())
|
||||
all_ids.update(self._dependents.keys())
|
||||
return all_ids
|
||||
|
||||
def get_root_workflows(self) -> Set[str]:
|
||||
"""
|
||||
Retourne les workflows qui ne sont utilisés par aucun autre.
|
||||
|
||||
Returns:
|
||||
Set des IDs de workflows racines
|
||||
"""
|
||||
all_workflows = self.get_all_workflows()
|
||||
roots = set()
|
||||
|
||||
for wf_id in all_workflows:
|
||||
if not self._dependents.get(wf_id):
|
||||
roots.add(wf_id)
|
||||
|
||||
return roots
|
||||
|
||||
def get_leaf_workflows(self) -> Set[str]:
|
||||
"""
|
||||
Retourne les workflows qui n'utilisent aucun sous-workflow.
|
||||
|
||||
Returns:
|
||||
Set des IDs de workflows feuilles
|
||||
"""
|
||||
all_workflows = self.get_all_workflows()
|
||||
leaves = set()
|
||||
|
||||
for wf_id in all_workflows:
|
||||
if not self._dependencies.get(wf_id):
|
||||
leaves.add(wf_id)
|
||||
|
||||
return leaves
|
||||
|
||||
def get_transitive_dependencies(self, workflow_id: str) -> Set[str]:
|
||||
"""
|
||||
Retourne toutes les dépendances transitives d'un workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
|
||||
Returns:
|
||||
Set de tous les sous-workflows utilisés directement ou indirectement
|
||||
"""
|
||||
result = set()
|
||||
stack = list(self._dependencies.get(workflow_id, set()))
|
||||
|
||||
while stack:
|
||||
current = stack.pop()
|
||||
if current not in result:
|
||||
result.add(current)
|
||||
stack.extend(self._dependencies.get(current, set()))
|
||||
|
||||
return result
|
||||
|
||||
def get_transitive_dependents(self, workflow_id: str) -> Set[str]:
|
||||
"""
|
||||
Retourne tous les workflows qui dépendent transitivement de celui-ci.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
|
||||
Returns:
|
||||
Set de tous les workflows qui l'utilisent directement ou indirectement
|
||||
"""
|
||||
result = set()
|
||||
stack = list(self._dependents.get(workflow_id, set()))
|
||||
|
||||
while stack:
|
||||
current = stack.pop()
|
||||
if current not in result:
|
||||
result.add(current)
|
||||
stack.extend(self._dependents.get(current, set()))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_deletion_warnings(self, workflow_id: str) -> List[str]:
|
||||
"""
|
||||
Retourne les avertissements pour la suppression d'un workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow à supprimer
|
||||
|
||||
Returns:
|
||||
Liste des messages d'avertissement
|
||||
"""
|
||||
warnings = []
|
||||
dependents = self.get_dependents(workflow_id)
|
||||
|
||||
if dependents:
|
||||
warnings.append(
|
||||
f"Le workflow '{workflow_id}' est utilisé par {len(dependents)} workflow(s): "
|
||||
f"{', '.join(sorted(dependents))}"
|
||||
)
|
||||
|
||||
transitive = self.get_transitive_dependents(workflow_id)
|
||||
indirect = transitive - dependents
|
||||
|
||||
if indirect:
|
||||
warnings.append(
|
||||
f"La suppression affectera également {len(indirect)} workflow(s) indirectement: "
|
||||
f"{', '.join(sorted(indirect))}"
|
||||
)
|
||||
|
||||
return warnings
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Efface toutes les dépendances."""
|
||||
self._dependencies.clear()
|
||||
self._dependents.clear()
|
||||
self._reference_counts.clear()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise le graphe en dictionnaire."""
|
||||
return {
|
||||
"dependencies": {k: list(v) for k, v in self._dependencies.items()},
|
||||
"dependents": {k: list(v) for k, v in self._dependents.items()},
|
||||
"reference_counts": self._reference_counts.copy()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'DependencyGraph':
|
||||
"""Désérialise le graphe depuis un dictionnaire."""
|
||||
graph = cls()
|
||||
|
||||
for workflow_id, deps in data.get("dependencies", {}).items():
|
||||
graph._dependencies[workflow_id] = set(deps)
|
||||
|
||||
for workflow_id, deps in data.get("dependents", {}).items():
|
||||
graph._dependents[workflow_id] = set(deps)
|
||||
|
||||
graph._reference_counts = data.get("reference_counts", {}).copy()
|
||||
|
||||
return graph
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Retourne le nombre total de dépendances."""
|
||||
return sum(len(deps) for deps in self._dependencies.values())
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"DependencyGraph(workflows={len(self.get_all_workflows())}, dependencies={len(self)})"
|
||||
101
core/workflow/execution_logger.py
Normal file
101
core/workflow/execution_logger.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
Execution Logger - Log d'exécution unifié pour les chaînes de workflows
|
||||
|
||||
Ce module maintient un log unifié des événements d'exécution.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime
|
||||
|
||||
from .composition_models import LogEntry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExecutionLogger:
|
||||
"""
|
||||
Maintient un log d'exécution unifié pour les chaînes de workflows.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._entries: List[LogEntry] = []
|
||||
self._final_variables: Dict[str, Any] = {}
|
||||
|
||||
def log(
|
||||
self,
|
||||
workflow_id: str,
|
||||
node_id: str,
|
||||
event_type: str,
|
||||
details: Optional[Dict[str, Any]] = None
|
||||
) -> LogEntry:
|
||||
"""Ajoute une entrée au log."""
|
||||
entry = LogEntry(
|
||||
timestamp=datetime.now(),
|
||||
workflow_id=workflow_id,
|
||||
node_id=node_id,
|
||||
event_type=event_type,
|
||||
details=details or {}
|
||||
)
|
||||
self._entries.append(entry)
|
||||
return entry
|
||||
|
||||
def log_start(self, workflow_id: str, node_id: str = "") -> LogEntry:
|
||||
"""Log le démarrage d'un workflow/node."""
|
||||
return self.log(workflow_id, node_id, "start")
|
||||
|
||||
def log_end(self, workflow_id: str, node_id: str = "", success: bool = True) -> LogEntry:
|
||||
"""Log la fin d'un workflow/node."""
|
||||
return self.log(workflow_id, node_id, "end", {"success": success})
|
||||
|
||||
def log_action(self, workflow_id: str, node_id: str, action: str, **kwargs) -> LogEntry:
|
||||
"""Log une action."""
|
||||
return self.log(workflow_id, node_id, "action", {"action": action, **kwargs})
|
||||
|
||||
def log_error(self, workflow_id: str, node_id: str, error: str, **kwargs) -> LogEntry:
|
||||
"""Log une erreur."""
|
||||
return self.log(workflow_id, node_id, "error", {"error": error, **kwargs})
|
||||
|
||||
def log_warning(self, workflow_id: str, node_id: str, warning: str) -> LogEntry:
|
||||
"""Log un avertissement."""
|
||||
return self.log(workflow_id, node_id, "warning", {"warning": warning})
|
||||
|
||||
def get_entries(self) -> List[LogEntry]:
|
||||
"""Retourne toutes les entrées du log."""
|
||||
return self._entries.copy()
|
||||
|
||||
def get_entries_for_workflow(self, workflow_id: str) -> List[LogEntry]:
|
||||
"""Retourne les entrées pour un workflow spécifique."""
|
||||
return [e for e in self._entries if e.workflow_id == workflow_id]
|
||||
|
||||
def get_errors(self) -> List[LogEntry]:
|
||||
"""Retourne toutes les erreurs."""
|
||||
return [e for e in self._entries if e.event_type == "error"]
|
||||
|
||||
def set_final_variable_state(self, variables: Dict[str, Any]) -> None:
|
||||
"""Définit l'état final des variables."""
|
||||
self._final_variables = variables.copy()
|
||||
|
||||
def get_final_variable_state(self) -> Dict[str, Any]:
|
||||
"""Retourne l'état final des variables."""
|
||||
return self._final_variables.copy()
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Efface le log."""
|
||||
self._entries.clear()
|
||||
self._final_variables.clear()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise le logger."""
|
||||
return {
|
||||
"entries": [e.to_dict() for e in self._entries],
|
||||
"final_variables": self._final_variables
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ExecutionLogger':
|
||||
"""Désérialise le logger."""
|
||||
log = cls()
|
||||
log._entries = [LogEntry.from_dict(e) for e in data.get("entries", [])]
|
||||
log._final_variables = data.get("final_variables", {})
|
||||
return log
|
||||
192
core/workflow/global_variable_manager.py
Normal file
192
core/workflow/global_variable_manager.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""
|
||||
Global Variable Manager - Gestion des variables globales entre workflows
|
||||
|
||||
Ce module gère les variables partagées entre workflows d'une même chaîne d'exécution.
|
||||
Il permet de:
|
||||
- Définir et lire des variables globales
|
||||
- Transférer des variables entre workflows
|
||||
- Gérer les conflits de noms
|
||||
- Préserver l'état final des variables
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
|
||||
from .composition_models import VariableOverride
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GlobalVariableManager:
|
||||
"""
|
||||
Gestionnaire de variables globales pour les chaînes de workflows.
|
||||
|
||||
Maintient un état global des variables partagées entre tous les workflows
|
||||
d'une même exécution de chaîne.
|
||||
|
||||
Example:
|
||||
>>> gvm = GlobalVariableManager()
|
||||
>>> gvm.set_global("user_id", "12345", "login_workflow")
|
||||
>>> gvm.get_global("user_id")
|
||||
'12345'
|
||||
>>> gvm.set_global("user_id", "67890", "update_workflow") # Override
|
||||
>>> gvm.get_global("user_id")
|
||||
'67890'
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._global_vars: Dict[str, Any] = {}
|
||||
self._override_log: List[VariableOverride] = []
|
||||
self._source_workflow: Dict[str, str] = {} # var_name -> workflow_id
|
||||
|
||||
|
||||
def set_global(self, name: str, value: Any, source_workflow: str) -> None:
|
||||
"""
|
||||
Définit une variable globale.
|
||||
|
||||
Si la variable existe déjà, l'ancienne valeur est loggée comme override.
|
||||
|
||||
Args:
|
||||
name: Nom de la variable
|
||||
value: Valeur à assigner
|
||||
source_workflow: ID du workflow qui définit la variable
|
||||
"""
|
||||
# Vérifier si c'est un override
|
||||
if name in self._global_vars:
|
||||
old_value = self._global_vars[name]
|
||||
|
||||
# Logger l'override
|
||||
override = VariableOverride(
|
||||
variable_name=name,
|
||||
old_value=old_value,
|
||||
new_value=value,
|
||||
source_workflow=source_workflow,
|
||||
timestamp=datetime.now()
|
||||
)
|
||||
self._override_log.append(override)
|
||||
|
||||
logger.warning(
|
||||
f"Variable '{name}' overridden: {old_value} -> {value} "
|
||||
f"(by workflow '{source_workflow}')"
|
||||
)
|
||||
|
||||
self._global_vars[name] = value
|
||||
self._source_workflow[name] = source_workflow
|
||||
|
||||
logger.debug(f"Global variable set: {name} = {value} (from {source_workflow})")
|
||||
|
||||
def get_global(self, name: str, default: Any = None) -> Any:
|
||||
"""
|
||||
Récupère une variable globale.
|
||||
|
||||
Args:
|
||||
name: Nom de la variable
|
||||
default: Valeur par défaut si la variable n'existe pas
|
||||
|
||||
Returns:
|
||||
Valeur de la variable ou la valeur par défaut
|
||||
"""
|
||||
return self._global_vars.get(name, default)
|
||||
|
||||
def has_global(self, name: str) -> bool:
|
||||
"""Vérifie si une variable globale existe."""
|
||||
return name in self._global_vars
|
||||
|
||||
def get_source_workflow(self, name: str) -> Optional[str]:
|
||||
"""Retourne l'ID du workflow qui a défini la variable."""
|
||||
return self._source_workflow.get(name)
|
||||
|
||||
def transfer_to_workflow(
|
||||
self,
|
||||
target_workflow_id: str,
|
||||
variable_mapping: Optional[Dict[str, str]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Transfère les variables au workflow suivant.
|
||||
|
||||
Args:
|
||||
target_workflow_id: ID du workflow cible
|
||||
variable_mapping: Mapping optionnel source_var -> target_var
|
||||
|
||||
Returns:
|
||||
Dictionnaire des variables transférées
|
||||
"""
|
||||
if variable_mapping:
|
||||
# Transférer uniquement les variables mappées
|
||||
transferred = {}
|
||||
for source_var, target_var in variable_mapping.items():
|
||||
if source_var in self._global_vars:
|
||||
transferred[target_var] = self._global_vars[source_var]
|
||||
return transferred
|
||||
else:
|
||||
# Transférer toutes les variables
|
||||
return self._global_vars.copy()
|
||||
|
||||
|
||||
def get_final_state(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Retourne l'état final de toutes les variables globales.
|
||||
|
||||
Returns:
|
||||
Dictionnaire de toutes les variables et leurs valeurs finales
|
||||
"""
|
||||
return self._global_vars.copy()
|
||||
|
||||
def get_override_log(self) -> List[VariableOverride]:
|
||||
"""
|
||||
Retourne le log des overrides de variables.
|
||||
|
||||
Returns:
|
||||
Liste des overrides dans l'ordre chronologique
|
||||
"""
|
||||
return self._override_log.copy()
|
||||
|
||||
def get_all_variables(self) -> Dict[str, Any]:
|
||||
"""Alias pour get_final_state()."""
|
||||
return self.get_final_state()
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Efface toutes les variables et le log d'overrides."""
|
||||
self._global_vars.clear()
|
||||
self._override_log.clear()
|
||||
self._source_workflow.clear()
|
||||
|
||||
def merge_from(self, other: 'GlobalVariableManager', source_workflow: str) -> None:
|
||||
"""
|
||||
Fusionne les variables d'un autre manager.
|
||||
|
||||
Args:
|
||||
other: Autre GlobalVariableManager
|
||||
source_workflow: ID du workflow source pour le log
|
||||
"""
|
||||
for name, value in other._global_vars.items():
|
||||
self.set_global(name, value, source_workflow)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise le manager en dictionnaire."""
|
||||
return {
|
||||
"global_vars": self._global_vars.copy(),
|
||||
"override_log": [o.to_dict() for o in self._override_log],
|
||||
"source_workflow": self._source_workflow.copy()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'GlobalVariableManager':
|
||||
"""Désérialise le manager depuis un dictionnaire."""
|
||||
manager = cls()
|
||||
manager._global_vars = data.get("global_vars", {}).copy()
|
||||
manager._override_log = [
|
||||
VariableOverride.from_dict(o)
|
||||
for o in data.get("override_log", [])
|
||||
]
|
||||
manager._source_workflow = data.get("source_workflow", {}).copy()
|
||||
return manager
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Retourne le nombre de variables globales."""
|
||||
return len(self._global_vars)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"GlobalVariableManager(variables={len(self)}, overrides={len(self._override_log)})"
|
||||
238
core/workflow/loop_executor.py
Normal file
238
core/workflow/loop_executor.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""
|
||||
Loop Executor - Gestion de l'exécution des boucles dans les workflows
|
||||
|
||||
Ce module gère l'exécution des boucles (count et condition) avec:
|
||||
- Compteur d'itérations
|
||||
- Évaluation des conditions de sortie
|
||||
- Garde de sécurité (limite max d'itérations)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
from datetime import datetime
|
||||
|
||||
from .composition_models import (
|
||||
LoopConfig, LoopState, IterationResult, VisualCondition
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopSafetyLimitError(Exception):
|
||||
"""Erreur levée quand la limite de sécurité est atteinte."""
|
||||
|
||||
def __init__(self, loop_id: str, iterations: int, limit: int):
|
||||
self.loop_id = loop_id
|
||||
self.iterations = iterations
|
||||
self.limit = limit
|
||||
super().__init__(
|
||||
f"Boucle '{loop_id}' a atteint la limite de sécurité: "
|
||||
f"{iterations} itérations (limite: {limit})"
|
||||
)
|
||||
|
||||
|
||||
class LoopExecutor:
|
||||
"""
|
||||
Gère l'exécution des boucles dans les workflows.
|
||||
|
||||
Supporte deux types de boucles:
|
||||
- count: Nombre fixe d'itérations
|
||||
- condition: Itère jusqu'à ce qu'une condition visuelle soit vraie
|
||||
"""
|
||||
|
||||
DEFAULT_SAFETY_LIMIT = 1000
|
||||
|
||||
|
||||
def __init__(self, safety_limit: int = DEFAULT_SAFETY_LIMIT):
|
||||
"""
|
||||
Initialise le LoopExecutor.
|
||||
|
||||
Args:
|
||||
safety_limit: Limite maximale d'itérations par défaut
|
||||
"""
|
||||
self.safety_limit = safety_limit
|
||||
self._active_loops: Dict[str, LoopState] = {}
|
||||
self._configs: Dict[str, LoopConfig] = {}
|
||||
|
||||
def start_loop(self, config: LoopConfig) -> LoopState:
|
||||
"""
|
||||
Démarre une nouvelle boucle.
|
||||
|
||||
Args:
|
||||
config: Configuration de la boucle
|
||||
|
||||
Returns:
|
||||
État initial de la boucle
|
||||
"""
|
||||
state = LoopState(
|
||||
loop_id=config.loop_id,
|
||||
current_iteration=0,
|
||||
started_at=datetime.now(),
|
||||
last_condition_result=None
|
||||
)
|
||||
|
||||
self._active_loops[config.loop_id] = state
|
||||
self._configs[config.loop_id] = config
|
||||
|
||||
logger.info(f"Boucle '{config.loop_id}' démarrée (type: {config.loop_type})")
|
||||
return state
|
||||
|
||||
def execute_iteration(self, loop_id: str) -> IterationResult:
|
||||
"""
|
||||
Exécute une itération de la boucle.
|
||||
|
||||
Args:
|
||||
loop_id: ID de la boucle
|
||||
|
||||
Returns:
|
||||
Résultat de l'itération
|
||||
|
||||
Raises:
|
||||
LoopSafetyLimitError: Si la limite de sécurité est atteinte
|
||||
"""
|
||||
if loop_id not in self._active_loops:
|
||||
return IterationResult(
|
||||
loop_id=loop_id,
|
||||
iteration=0,
|
||||
success=False,
|
||||
should_continue=False,
|
||||
error_message=f"Boucle '{loop_id}' non trouvée"
|
||||
)
|
||||
|
||||
state = self._active_loops[loop_id]
|
||||
config = self._configs[loop_id]
|
||||
|
||||
# Incrémenter le compteur
|
||||
new_iteration = self.increment_counter(loop_id)
|
||||
|
||||
# Vérifier la limite de sécurité
|
||||
effective_limit = min(config.safety_limit, self.safety_limit)
|
||||
if new_iteration > effective_limit:
|
||||
logger.warning(
|
||||
f"Boucle '{loop_id}' a atteint la limite de sécurité ({effective_limit})"
|
||||
)
|
||||
raise LoopSafetyLimitError(loop_id, new_iteration, effective_limit)
|
||||
|
||||
# Déterminer si on doit continuer
|
||||
should_continue = self.should_continue(loop_id)
|
||||
|
||||
return IterationResult(
|
||||
loop_id=loop_id,
|
||||
iteration=new_iteration,
|
||||
success=True,
|
||||
should_continue=should_continue
|
||||
)
|
||||
|
||||
|
||||
def should_continue(self, loop_id: str, condition_result: Optional[bool] = None) -> bool:
|
||||
"""
|
||||
Évalue si la boucle doit continuer.
|
||||
|
||||
Args:
|
||||
loop_id: ID de la boucle
|
||||
condition_result: Résultat de l'évaluation de la condition (pour type condition)
|
||||
|
||||
Returns:
|
||||
True si la boucle doit continuer, False sinon
|
||||
"""
|
||||
if loop_id not in self._active_loops:
|
||||
return False
|
||||
|
||||
state = self._active_loops[loop_id]
|
||||
config = self._configs[loop_id]
|
||||
|
||||
# Mettre à jour le résultat de condition si fourni
|
||||
if condition_result is not None:
|
||||
state.last_condition_result = condition_result
|
||||
|
||||
if config.loop_type == "count":
|
||||
# Boucle par compteur
|
||||
if config.max_iterations is None:
|
||||
return True # Pas de limite (dangereux, mais la garde de sécurité protège)
|
||||
return state.current_iteration < config.max_iterations
|
||||
|
||||
elif config.loop_type == "condition":
|
||||
# Boucle par condition
|
||||
# Continue tant que la condition de sortie n'est pas vraie
|
||||
if state.last_condition_result is None:
|
||||
return True # Pas encore évalué, continuer
|
||||
return not state.last_condition_result # Sortir quand la condition est vraie
|
||||
|
||||
return False
|
||||
|
||||
def increment_counter(self, loop_id: str) -> int:
|
||||
"""
|
||||
Incrémente le compteur de la boucle.
|
||||
|
||||
Args:
|
||||
loop_id: ID de la boucle
|
||||
|
||||
Returns:
|
||||
Nouvelle valeur du compteur
|
||||
"""
|
||||
if loop_id not in self._active_loops:
|
||||
return 0
|
||||
|
||||
state = self._active_loops[loop_id]
|
||||
state.current_iteration += 1
|
||||
|
||||
logger.debug(f"Boucle '{loop_id}': itération {state.current_iteration}")
|
||||
return state.current_iteration
|
||||
|
||||
def get_state(self, loop_id: str) -> Optional[LoopState]:
|
||||
"""Retourne l'état actuel d'une boucle."""
|
||||
return self._active_loops.get(loop_id)
|
||||
|
||||
def get_iteration(self, loop_id: str) -> int:
|
||||
"""Retourne le numéro d'itération actuel."""
|
||||
state = self._active_loops.get(loop_id)
|
||||
return state.current_iteration if state else 0
|
||||
|
||||
def end_loop(self, loop_id: str) -> Optional[LoopState]:
|
||||
"""
|
||||
Termine une boucle et retourne son état final.
|
||||
|
||||
Args:
|
||||
loop_id: ID de la boucle
|
||||
|
||||
Returns:
|
||||
État final de la boucle ou None si non trouvée
|
||||
"""
|
||||
state = self._active_loops.pop(loop_id, None)
|
||||
self._configs.pop(loop_id, None)
|
||||
|
||||
if state:
|
||||
logger.info(
|
||||
f"Boucle '{loop_id}' terminée après {state.current_iteration} itérations"
|
||||
)
|
||||
|
||||
return state
|
||||
|
||||
def is_active(self, loop_id: str) -> bool:
|
||||
"""Vérifie si une boucle est active."""
|
||||
return loop_id in self._active_loops
|
||||
|
||||
def get_active_loops(self) -> Dict[str, LoopState]:
|
||||
"""Retourne toutes les boucles actives."""
|
||||
return self._active_loops.copy()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise l'état des boucles."""
|
||||
return {
|
||||
"safety_limit": self.safety_limit,
|
||||
"active_loops": {k: v.to_dict() for k, v in self._active_loops.items()},
|
||||
"configs": {k: v.to_dict() for k, v in self._configs.items()}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'LoopExecutor':
|
||||
"""Désérialise l'état des boucles."""
|
||||
executor = cls(safety_limit=data.get("safety_limit", cls.DEFAULT_SAFETY_LIMIT))
|
||||
|
||||
for loop_id, state_data in data.get("active_loops", {}).items():
|
||||
executor._active_loops[loop_id] = LoopState.from_dict(state_data)
|
||||
|
||||
for loop_id, config_data in data.get("configs", {}).items():
|
||||
executor._configs[loop_id] = LoopConfig.from_dict(config_data)
|
||||
|
||||
return executor
|
||||
463
core/workflow/semantic_matcher.py
Normal file
463
core/workflow/semantic_matcher.py
Normal file
@@ -0,0 +1,463 @@
|
||||
"""
|
||||
Semantic Matcher - Matching sémantique des commandes en langage naturel
|
||||
|
||||
Permet de :
|
||||
- Trouver le workflow correspondant à une commande en langage naturel
|
||||
- Extraire les paramètres de la commande
|
||||
- Utiliser des embeddings pour le matching sémantique
|
||||
"""
|
||||
|
||||
import re
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkflowMatch:
|
||||
"""Résultat d'un matching de workflow."""
|
||||
workflow_id: str
|
||||
workflow_name: str
|
||||
workflow_path: str
|
||||
confidence: float
|
||||
extracted_params: Dict[str, str]
|
||||
match_reason: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkflowMetadata:
|
||||
"""Métadonnées d'un workflow pour le matching."""
|
||||
workflow_id: str
|
||||
name: str
|
||||
description: str
|
||||
tags: List[str]
|
||||
keywords: List[str]
|
||||
param_patterns: List[str] # Patterns pour extraire les paramètres
|
||||
path: str
|
||||
|
||||
|
||||
class SemanticMatcher:
|
||||
"""
|
||||
Matcher sémantique pour trouver des workflows depuis des commandes.
|
||||
|
||||
Utilise plusieurs stratégies :
|
||||
1. Matching exact par nom/tags
|
||||
2. Matching par mots-clés
|
||||
3. Matching par embeddings (si disponible)
|
||||
4. Extraction de paramètres
|
||||
|
||||
Example:
|
||||
>>> matcher = SemanticMatcher("data/workflows")
|
||||
>>> result = matcher.find_workflow("facturer le client Acme")
|
||||
>>> print(result.workflow_name) # "Facturation Client"
|
||||
>>> print(result.extracted_params) # {"client": "Acme"}
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
workflows_dir: str = "data/workflows",
|
||||
use_embeddings: bool = True
|
||||
):
|
||||
"""
|
||||
Initialiser le matcher.
|
||||
|
||||
Args:
|
||||
workflows_dir: Répertoire des workflows
|
||||
use_embeddings: Utiliser les embeddings pour le matching
|
||||
"""
|
||||
self.workflows_dir = Path(workflows_dir)
|
||||
self.use_embeddings = use_embeddings
|
||||
|
||||
# Cache des métadonnées
|
||||
self._workflows: Dict[str, WorkflowMetadata] = {}
|
||||
|
||||
# Embedder (chargé à la demande)
|
||||
self._embedder = None
|
||||
self._workflow_embeddings: Dict[str, Any] = {}
|
||||
|
||||
# Charger les workflows
|
||||
self._load_workflows()
|
||||
|
||||
def _load_workflows(self) -> None:
|
||||
"""Charger les métadonnées de tous les workflows."""
|
||||
if not self.workflows_dir.exists():
|
||||
logger.warning(f"Workflows directory not found: {self.workflows_dir}")
|
||||
return
|
||||
|
||||
for workflow_path in self.workflows_dir.glob("*.json"):
|
||||
try:
|
||||
with open(workflow_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
workflow_id = workflow_path.stem
|
||||
|
||||
# Extraire les métadonnées
|
||||
metadata = WorkflowMetadata(
|
||||
workflow_id=workflow_id,
|
||||
name=data.get("name", workflow_id),
|
||||
description=data.get("description", ""),
|
||||
tags=data.get("tags", []),
|
||||
keywords=self._extract_keywords(data),
|
||||
param_patterns=data.get("param_patterns", []),
|
||||
path=str(workflow_path)
|
||||
)
|
||||
|
||||
self._workflows[workflow_id] = metadata
|
||||
logger.debug(f"Loaded workflow: {metadata.name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading workflow {workflow_path}: {e}")
|
||||
|
||||
logger.info(f"Loaded {len(self._workflows)} workflows")
|
||||
|
||||
def _extract_keywords(self, workflow_data: Dict[str, Any]) -> List[str]:
|
||||
"""Extraire les mots-clés d'un workflow."""
|
||||
keywords = set()
|
||||
|
||||
# Nom
|
||||
name = workflow_data.get("name", "")
|
||||
keywords.update(self._tokenize(name))
|
||||
|
||||
# Description
|
||||
description = workflow_data.get("description", "")
|
||||
keywords.update(self._tokenize(description))
|
||||
|
||||
# Tags
|
||||
keywords.update(workflow_data.get("tags", []))
|
||||
|
||||
# Actions (noms des actions)
|
||||
for edge in workflow_data.get("edges", []):
|
||||
action = edge.get("action", {})
|
||||
if isinstance(action, dict):
|
||||
action_type = action.get("type", "")
|
||||
keywords.add(action_type)
|
||||
|
||||
return list(keywords)
|
||||
|
||||
def _tokenize(self, text: str) -> List[str]:
|
||||
"""Tokeniser un texte en mots-clés."""
|
||||
# Normaliser
|
||||
text = text.lower()
|
||||
|
||||
# Supprimer la ponctuation
|
||||
text = re.sub(r'[^\w\s]', ' ', text)
|
||||
|
||||
# Découper en mots
|
||||
words = text.split()
|
||||
|
||||
# Filtrer les mots courts et les stop words
|
||||
stop_words = {
|
||||
'le', 'la', 'les', 'un', 'une', 'des', 'de', 'du', 'à', 'au', 'aux',
|
||||
'et', 'ou', 'mais', 'donc', 'or', 'ni', 'car', 'que', 'qui', 'quoi',
|
||||
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
|
||||
'of', 'with', 'by', 'from', 'is', 'are', 'was', 'were', 'be', 'been'
|
||||
}
|
||||
|
||||
return [w for w in words if len(w) > 2 and w not in stop_words]
|
||||
|
||||
# =========================================================================
|
||||
# Matching
|
||||
# =========================================================================
|
||||
|
||||
def find_workflow(
|
||||
self,
|
||||
command: str,
|
||||
min_confidence: float = 0.3
|
||||
) -> Optional[WorkflowMatch]:
|
||||
"""
|
||||
Trouver le workflow correspondant à une commande.
|
||||
|
||||
Args:
|
||||
command: Commande en langage naturel
|
||||
min_confidence: Confiance minimale requise
|
||||
|
||||
Returns:
|
||||
WorkflowMatch ou None si aucun match
|
||||
"""
|
||||
matches = self.find_workflows(command, limit=1, min_confidence=min_confidence)
|
||||
return matches[0] if matches else None
|
||||
|
||||
def find_workflows(
|
||||
self,
|
||||
command: str,
|
||||
limit: int = 5,
|
||||
min_confidence: float = 0.3
|
||||
) -> List[WorkflowMatch]:
|
||||
"""
|
||||
Trouver les workflows correspondant à une commande.
|
||||
|
||||
Args:
|
||||
command: Commande en langage naturel
|
||||
limit: Nombre max de résultats
|
||||
min_confidence: Confiance minimale requise
|
||||
|
||||
Returns:
|
||||
Liste de WorkflowMatch triés par confiance
|
||||
"""
|
||||
if not self._workflows:
|
||||
logger.warning("No workflows loaded")
|
||||
return []
|
||||
|
||||
command_lower = command.lower()
|
||||
command_tokens = set(self._tokenize(command))
|
||||
|
||||
matches = []
|
||||
|
||||
for workflow_id, metadata in self._workflows.items():
|
||||
# Calculer le score de matching
|
||||
score, reason, params = self._calculate_match_score(
|
||||
command_lower, command_tokens, metadata
|
||||
)
|
||||
|
||||
if score >= min_confidence:
|
||||
matches.append(WorkflowMatch(
|
||||
workflow_id=workflow_id,
|
||||
workflow_name=metadata.name,
|
||||
workflow_path=metadata.path,
|
||||
confidence=score,
|
||||
extracted_params=params,
|
||||
match_reason=reason
|
||||
))
|
||||
|
||||
# Trier par confiance décroissante
|
||||
matches.sort(key=lambda m: m.confidence, reverse=True)
|
||||
|
||||
return matches[:limit]
|
||||
|
||||
def _calculate_match_score(
|
||||
self,
|
||||
command: str,
|
||||
command_tokens: set,
|
||||
metadata: WorkflowMetadata
|
||||
) -> Tuple[float, str, Dict[str, str]]:
|
||||
"""
|
||||
Calculer le score de matching entre une commande et un workflow.
|
||||
|
||||
Returns:
|
||||
(score, reason, extracted_params)
|
||||
"""
|
||||
score = 0.0
|
||||
reasons = []
|
||||
params = {}
|
||||
|
||||
# 1. Matching exact du nom
|
||||
if metadata.name.lower() in command:
|
||||
score += 0.5
|
||||
reasons.append("exact_name")
|
||||
|
||||
# 2. Matching des tags
|
||||
for tag in metadata.tags:
|
||||
if tag.lower() in command:
|
||||
score += 0.3
|
||||
reasons.append(f"tag:{tag}")
|
||||
|
||||
# 3. Matching des mots-clés (Jaccard similarity)
|
||||
workflow_tokens = set(metadata.keywords)
|
||||
if workflow_tokens and command_tokens:
|
||||
intersection = command_tokens & workflow_tokens
|
||||
union = command_tokens | workflow_tokens
|
||||
jaccard = len(intersection) / len(union) if union else 0
|
||||
score += jaccard * 0.4
|
||||
if intersection:
|
||||
reasons.append(f"keywords:{','.join(intersection)}")
|
||||
|
||||
# 4. Matching de la description
|
||||
if metadata.description:
|
||||
desc_tokens = set(self._tokenize(metadata.description))
|
||||
if desc_tokens and command_tokens:
|
||||
intersection = command_tokens & desc_tokens
|
||||
if intersection:
|
||||
score += 0.2
|
||||
reasons.append("description_match")
|
||||
|
||||
# 5. Extraction des paramètres
|
||||
params = self._extract_params(command, metadata)
|
||||
if params:
|
||||
score += 0.1
|
||||
reasons.append(f"params:{','.join(params.keys())}")
|
||||
|
||||
# Normaliser le score (max 1.0)
|
||||
score = min(score, 1.0)
|
||||
|
||||
return score, " | ".join(reasons), params
|
||||
|
||||
def _extract_params(
|
||||
self,
|
||||
command: str,
|
||||
metadata: WorkflowMetadata
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Extraire les paramètres d'une commande.
|
||||
|
||||
Utilise les patterns définis dans le workflow et des heuristiques.
|
||||
"""
|
||||
params = {}
|
||||
|
||||
# 1. Utiliser les patterns définis
|
||||
for pattern in metadata.param_patterns:
|
||||
try:
|
||||
match = re.search(pattern, command, re.IGNORECASE)
|
||||
if match:
|
||||
params.update(match.groupdict())
|
||||
except Exception as e:
|
||||
logger.warning(f"Invalid pattern '{pattern}': {e}")
|
||||
|
||||
# 2. Heuristiques communes
|
||||
# Pattern: "de X à Y" ou "from X to Y"
|
||||
range_pattern = r'(?:de|from)\s+(\w+)\s+(?:à|to)\s+(\w+)'
|
||||
match = re.search(range_pattern, command, re.IGNORECASE)
|
||||
if match:
|
||||
params['start'] = match.group(1)
|
||||
params['end'] = match.group(2)
|
||||
|
||||
# Pattern: "client X" ou "customer X"
|
||||
client_pattern = r'(?:client|customer|compte)\s+([A-Za-z0-9_\-]+)'
|
||||
match = re.search(client_pattern, command, re.IGNORECASE)
|
||||
if match:
|
||||
params['client'] = match.group(1)
|
||||
|
||||
# Pattern: "facture N" ou "invoice N"
|
||||
invoice_pattern = r'(?:facture|invoice|commande|order)\s+([A-Za-z0-9_\-]+)'
|
||||
match = re.search(invoice_pattern, command, re.IGNORECASE)
|
||||
if match:
|
||||
params['invoice'] = match.group(1)
|
||||
|
||||
# Pattern: valeurs entre guillemets
|
||||
quoted_pattern = r'"([^"]+)"'
|
||||
quoted_values = re.findall(quoted_pattern, command)
|
||||
for i, value in enumerate(quoted_values):
|
||||
if f'value{i}' not in params:
|
||||
params[f'value{i}'] = value
|
||||
|
||||
return params
|
||||
|
||||
# =========================================================================
|
||||
# Gestion des workflows
|
||||
# =========================================================================
|
||||
|
||||
def reload_workflows(self) -> None:
|
||||
"""Recharger tous les workflows."""
|
||||
self._workflows.clear()
|
||||
self._workflow_embeddings.clear()
|
||||
self._load_workflows()
|
||||
|
||||
def add_workflow(
|
||||
self,
|
||||
workflow_id: str,
|
||||
name: str,
|
||||
description: str = "",
|
||||
tags: List[str] = None,
|
||||
param_patterns: List[str] = None,
|
||||
path: str = ""
|
||||
) -> None:
|
||||
"""
|
||||
Ajouter un workflow au matcher.
|
||||
|
||||
Args:
|
||||
workflow_id: ID unique du workflow
|
||||
name: Nom du workflow
|
||||
description: Description
|
||||
tags: Tags pour le matching
|
||||
param_patterns: Patterns regex pour extraire les paramètres
|
||||
path: Chemin vers le fichier
|
||||
"""
|
||||
metadata = WorkflowMetadata(
|
||||
workflow_id=workflow_id,
|
||||
name=name,
|
||||
description=description,
|
||||
tags=tags or [],
|
||||
keywords=self._tokenize(name) + self._tokenize(description) + (tags or []),
|
||||
param_patterns=param_patterns or [],
|
||||
path=path
|
||||
)
|
||||
|
||||
self._workflows[workflow_id] = metadata
|
||||
logger.info(f"Added workflow: {name}")
|
||||
|
||||
def get_all_workflows(self) -> List[WorkflowMetadata]:
|
||||
"""Obtenir tous les workflows."""
|
||||
return list(self._workflows.values())
|
||||
|
||||
def get_workflow(self, workflow_id: str) -> Optional[WorkflowMetadata]:
|
||||
"""Obtenir un workflow par ID."""
|
||||
return self._workflows.get(workflow_id)
|
||||
|
||||
# =========================================================================
|
||||
# Suggestions
|
||||
# =========================================================================
|
||||
|
||||
def suggest_commands(self, partial_command: str, limit: int = 5) -> List[str]:
|
||||
"""
|
||||
Suggérer des commandes basées sur une entrée partielle.
|
||||
|
||||
Args:
|
||||
partial_command: Début de commande
|
||||
limit: Nombre max de suggestions
|
||||
|
||||
Returns:
|
||||
Liste de suggestions
|
||||
"""
|
||||
suggestions = []
|
||||
partial_lower = partial_command.lower()
|
||||
|
||||
for metadata in self._workflows.values():
|
||||
# Suggérer basé sur le nom
|
||||
if metadata.name.lower().startswith(partial_lower):
|
||||
suggestions.append(metadata.name)
|
||||
|
||||
# Suggérer basé sur les tags
|
||||
for tag in metadata.tags:
|
||||
if tag.lower().startswith(partial_lower):
|
||||
suggestions.append(f"{tag} ({metadata.name})")
|
||||
|
||||
return suggestions[:limit]
|
||||
|
||||
def get_workflow_help(self, workflow_id: str) -> str:
|
||||
"""
|
||||
Obtenir l'aide pour un workflow.
|
||||
|
||||
Args:
|
||||
workflow_id: ID du workflow
|
||||
|
||||
Returns:
|
||||
Texte d'aide
|
||||
"""
|
||||
metadata = self._workflows.get(workflow_id)
|
||||
if not metadata:
|
||||
return f"Workflow '{workflow_id}' not found"
|
||||
|
||||
help_text = f"📋 {metadata.name}\n"
|
||||
|
||||
if metadata.description:
|
||||
help_text += f"\n{metadata.description}\n"
|
||||
|
||||
if metadata.tags:
|
||||
help_text += f"\n🏷️ Tags: {', '.join(metadata.tags)}\n"
|
||||
|
||||
if metadata.param_patterns:
|
||||
help_text += f"\n📝 Paramètres supportés:\n"
|
||||
for pattern in metadata.param_patterns:
|
||||
help_text += f" - {pattern}\n"
|
||||
|
||||
return help_text
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Fonctions utilitaires
|
||||
# =============================================================================
|
||||
|
||||
def create_semantic_matcher(workflows_dir: str = "data/workflows") -> SemanticMatcher:
|
||||
"""
|
||||
Créer un matcher sémantique.
|
||||
|
||||
Args:
|
||||
workflows_dir: Répertoire des workflows
|
||||
|
||||
Returns:
|
||||
SemanticMatcher configuré
|
||||
"""
|
||||
return SemanticMatcher(workflows_dir=workflows_dir)
|
||||
86
core/workflow/sequence_extractor.py
Normal file
86
core/workflow/sequence_extractor.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
Sequence Extractor - Détection et extraction de séquences communes
|
||||
|
||||
Ce module détecte les séquences communes entre workflows et permet de les extraire.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
from .composition_models import CommonSequence, SequenceOccurrence, SubWorkflowDefinition
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SequenceExtractor:
|
||||
"""Détecte et extrait les séquences communes entre workflows."""
|
||||
|
||||
def __init__(self, min_sequence_length: int = 3):
|
||||
self.min_sequence_length = min_sequence_length
|
||||
|
||||
def find_common_sequences(self, workflows: Dict[str, List[str]]) -> List[CommonSequence]:
|
||||
"""Trouve les séquences communes entre workflows."""
|
||||
sequences = []
|
||||
workflow_ids = list(workflows.keys())
|
||||
|
||||
# Trouver les sous-séquences communes
|
||||
for i, wf_a_id in enumerate(workflow_ids):
|
||||
for wf_b_id in workflow_ids[i+1:]:
|
||||
common = self._find_common_subsequences(
|
||||
workflows[wf_a_id], workflows[wf_b_id],
|
||||
wf_a_id, wf_b_id
|
||||
)
|
||||
sequences.extend(common)
|
||||
|
||||
# Déterminer la priorité
|
||||
for seq in sequences:
|
||||
if len(seq.occurrences) >= 3:
|
||||
seq.priority = "high"
|
||||
elif len(seq.occurrences) >= 2:
|
||||
seq.priority = "medium"
|
||||
else:
|
||||
seq.priority = "low"
|
||||
|
||||
return sequences
|
||||
|
||||
def _find_common_subsequences(
|
||||
self, nodes_a: List[str], nodes_b: List[str],
|
||||
wf_a_id: str, wf_b_id: str
|
||||
) -> List[CommonSequence]:
|
||||
"""Trouve les sous-séquences communes entre deux listes de nodes."""
|
||||
sequences = []
|
||||
|
||||
for i in range(len(nodes_a) - self.min_sequence_length + 1):
|
||||
for length in range(self.min_sequence_length, len(nodes_a) - i + 1):
|
||||
subseq = nodes_a[i:i+length]
|
||||
|
||||
# Chercher dans nodes_b
|
||||
for j in range(len(nodes_b) - length + 1):
|
||||
if nodes_b[j:j+length] == subseq:
|
||||
sequences.append(CommonSequence(
|
||||
nodes=subseq,
|
||||
occurrences=[
|
||||
SequenceOccurrence(wf_a_id, i, i+length),
|
||||
SequenceOccurrence(wf_b_id, j, j+length)
|
||||
],
|
||||
priority="medium"
|
||||
))
|
||||
|
||||
return sequences
|
||||
|
||||
def extract_as_subworkflow(self, sequence: CommonSequence, name: str) -> SubWorkflowDefinition:
|
||||
"""Extrait une séquence comme sous-workflow."""
|
||||
return SubWorkflowDefinition(
|
||||
workflow_id=f"sub_{name}",
|
||||
name=name,
|
||||
input_parameters=[],
|
||||
output_values=[],
|
||||
description=f"Extracted from {len(sequence.occurrences)} workflows"
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {"min_sequence_length": self.min_sequence_length}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'SequenceExtractor':
|
||||
return cls(min_sequence_length=data.get("min_sequence_length", 3))
|
||||
161
core/workflow/subworkflow_registry.py
Normal file
161
core/workflow/subworkflow_registry.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
SubWorkflow Registry - Gestion des sous-workflows réutilisables
|
||||
|
||||
Ce module gère l'enregistrement et l'exécution des sous-workflows.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Any
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from .composition_models import (
|
||||
SubWorkflowDefinition, ReferenceNode, ExecutionResult, ExecutionContext
|
||||
)
|
||||
from .dependency_graph import DependencyGraph
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SubWorkflowNotFoundError(Exception):
|
||||
"""Erreur levée quand un sous-workflow n'est pas trouvé."""
|
||||
def __init__(self, workflow_id: str):
|
||||
self.workflow_id = workflow_id
|
||||
super().__init__(f"Sous-workflow '{workflow_id}' non trouvé")
|
||||
|
||||
|
||||
class SubWorkflowExecutionError(Exception):
|
||||
"""Erreur lors de l'exécution d'un sous-workflow."""
|
||||
def __init__(self, workflow_id: str, reason: str, context: Optional[Dict] = None):
|
||||
self.workflow_id = workflow_id
|
||||
self.reason = reason
|
||||
self.context = context or {}
|
||||
super().__init__(f"Erreur dans sous-workflow '{workflow_id}': {reason}")
|
||||
|
||||
|
||||
class SubWorkflowRegistry:
|
||||
"""
|
||||
Gère les sous-workflows réutilisables et leurs références.
|
||||
"""
|
||||
|
||||
def __init__(self, dependency_graph: Optional[DependencyGraph] = None):
|
||||
self.dependency_graph = dependency_graph or DependencyGraph()
|
||||
self._definitions: Dict[str, SubWorkflowDefinition] = {}
|
||||
self._references: Dict[str, List[ReferenceNode]] = {} # parent_workflow_id -> refs
|
||||
self._execution_stack: List[str] = [] # Pour le retour de contrôle
|
||||
|
||||
def register(self, definition: SubWorkflowDefinition) -> None:
|
||||
"""Enregistre un sous-workflow."""
|
||||
self._definitions[definition.workflow_id] = definition
|
||||
logger.info(f"Sous-workflow '{definition.name}' enregistré (ID: {definition.workflow_id})")
|
||||
|
||||
def unregister(self, workflow_id: str) -> bool:
|
||||
"""Désenregistre un sous-workflow."""
|
||||
if workflow_id in self._definitions:
|
||||
del self._definitions[workflow_id]
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_definition(self, workflow_id: str) -> Optional[SubWorkflowDefinition]:
|
||||
"""Récupère la définition d'un sous-workflow."""
|
||||
return self._definitions.get(workflow_id)
|
||||
|
||||
def create_reference(self, parent_workflow_id: str, ref: ReferenceNode) -> None:
|
||||
"""Crée une référence vers un sous-workflow."""
|
||||
if ref.sub_workflow_id not in self._definitions:
|
||||
raise SubWorkflowNotFoundError(ref.sub_workflow_id)
|
||||
|
||||
if parent_workflow_id not in self._references:
|
||||
self._references[parent_workflow_id] = []
|
||||
|
||||
self._references[parent_workflow_id].append(ref)
|
||||
self.dependency_graph.add_dependency(parent_workflow_id, ref.sub_workflow_id)
|
||||
|
||||
logger.debug(f"Référence créée: {parent_workflow_id} -> {ref.sub_workflow_id}")
|
||||
|
||||
def execute_reference(
|
||||
self,
|
||||
ref: ReferenceNode,
|
||||
context: ExecutionContext,
|
||||
executor_func: Optional[callable] = None
|
||||
) -> ExecutionResult:
|
||||
"""
|
||||
Exécute un sous-workflow via sa référence.
|
||||
|
||||
Args:
|
||||
ref: Référence au sous-workflow
|
||||
context: Contexte d'exécution
|
||||
executor_func: Fonction optionnelle pour exécuter le workflow
|
||||
|
||||
Returns:
|
||||
Résultat de l'exécution
|
||||
"""
|
||||
if ref.sub_workflow_id not in self._definitions:
|
||||
raise SubWorkflowNotFoundError(ref.sub_workflow_id)
|
||||
|
||||
definition = self._definitions[ref.sub_workflow_id]
|
||||
|
||||
# Sauvegarder le contexte parent
|
||||
parent_workflow = context.current_workflow_id
|
||||
self._execution_stack.append(parent_workflow)
|
||||
|
||||
try:
|
||||
# Préparer les inputs
|
||||
inputs = {}
|
||||
for param_name, var_name in ref.input_bindings.items():
|
||||
inputs[param_name] = context.global_variables.get(var_name)
|
||||
|
||||
# Mettre à jour le contexte
|
||||
context.current_workflow_id = ref.sub_workflow_id
|
||||
|
||||
# Exécuter (simulation si pas de fonction fournie)
|
||||
if executor_func:
|
||||
result = executor_func(ref.sub_workflow_id, inputs)
|
||||
else:
|
||||
result = ExecutionResult(success=True, output_values={})
|
||||
|
||||
# Mapper les outputs
|
||||
for param_name, var_name in ref.output_bindings.items():
|
||||
if param_name in result.output_values:
|
||||
context.global_variables[var_name] = result.output_values[param_name]
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
raise SubWorkflowExecutionError(
|
||||
ref.sub_workflow_id,
|
||||
str(e),
|
||||
{"parent": parent_workflow, "inputs": ref.input_bindings}
|
||||
)
|
||||
finally:
|
||||
# Restaurer le contexte parent
|
||||
context.current_workflow_id = self._execution_stack.pop()
|
||||
|
||||
def get_dependents(self, workflow_id: str) -> List[str]:
|
||||
"""Retourne les workflows qui dépendent de ce sous-workflow."""
|
||||
return list(self.dependency_graph.get_dependents(workflow_id))
|
||||
|
||||
def get_references(self, parent_workflow_id: str) -> List[ReferenceNode]:
|
||||
"""Retourne les références d'un workflow parent."""
|
||||
return self._references.get(parent_workflow_id, [])
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise le registre."""
|
||||
return {
|
||||
"definitions": {k: v.to_dict() for k, v in self._definitions.items()},
|
||||
"references": {k: [r.to_dict() for r in v] for k, v in self._references.items()},
|
||||
"dependency_graph": self.dependency_graph.to_dict()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'SubWorkflowRegistry':
|
||||
"""Désérialise le registre."""
|
||||
graph = DependencyGraph.from_dict(data.get("dependency_graph", {}))
|
||||
registry = cls(dependency_graph=graph)
|
||||
|
||||
for wf_id, defn_data in data.get("definitions", {}).items():
|
||||
registry._definitions[wf_id] = SubWorkflowDefinition.from_dict(defn_data)
|
||||
|
||||
for parent_id, refs_data in data.get("references", {}).items():
|
||||
registry._references[parent_id] = [ReferenceNode.from_dict(r) for r in refs_data]
|
||||
|
||||
return registry
|
||||
103
core/workflow/trigger_manager.py
Normal file
103
core/workflow/trigger_manager.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""
|
||||
Trigger Manager - Gestion des déclencheurs automatiques
|
||||
|
||||
Ce module gère les déclencheurs (schedule, file, visual) pour les workflows.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Any, Union
|
||||
from datetime import datetime
|
||||
from queue import Queue
|
||||
|
||||
from .composition_models import (
|
||||
ScheduleTrigger, FileTrigger, VisualTrigger, TriggerContext, trigger_from_dict
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TriggerConfig = Union[ScheduleTrigger, FileTrigger, VisualTrigger]
|
||||
|
||||
|
||||
class TriggerManager:
|
||||
"""Gère les déclencheurs automatiques pour les workflows."""
|
||||
|
||||
def __init__(self, concurrency_mode: str = "queue"):
|
||||
self.concurrency_mode = concurrency_mode # "concurrent" or "queue"
|
||||
self._triggers: Dict[str, TriggerConfig] = {}
|
||||
self._execution_queue: Queue = Queue()
|
||||
self._active_executions: Dict[str, List[TriggerContext]] = {}
|
||||
|
||||
def register_trigger(self, trigger: TriggerConfig) -> None:
|
||||
"""Enregistre un déclencheur."""
|
||||
self._triggers[trigger.trigger_id] = trigger
|
||||
logger.info(f"Trigger '{trigger.trigger_id}' enregistré pour workflow '{trigger.workflow_id}'")
|
||||
|
||||
def unregister_trigger(self, trigger_id: str) -> bool:
|
||||
"""Désenregistre un déclencheur."""
|
||||
if trigger_id in self._triggers:
|
||||
del self._triggers[trigger_id]
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_trigger(self, trigger_id: str) -> Optional[TriggerConfig]:
|
||||
"""Récupère un déclencheur par ID."""
|
||||
return self._triggers.get(trigger_id)
|
||||
|
||||
def fire_trigger(self, trigger_id: str, **kwargs) -> TriggerContext:
|
||||
"""Déclenche manuellement un trigger et retourne le contexte."""
|
||||
trigger = self._triggers.get(trigger_id)
|
||||
if not trigger:
|
||||
raise ValueError(f"Trigger '{trigger_id}' non trouvé")
|
||||
|
||||
context = TriggerContext(
|
||||
trigger_id=trigger_id,
|
||||
trigger_type=trigger.to_dict().get("trigger_type", "unknown"),
|
||||
fired_at=datetime.now(),
|
||||
file_path=kwargs.get("file_path"),
|
||||
detected_element=kwargs.get("detected_element")
|
||||
)
|
||||
|
||||
self._handle_trigger_fire(trigger.workflow_id, context)
|
||||
return context
|
||||
|
||||
def _handle_trigger_fire(self, workflow_id: str, context: TriggerContext) -> None:
|
||||
"""Gère le déclenchement selon le mode de concurrence."""
|
||||
if self.concurrency_mode == "queue":
|
||||
self._execution_queue.put((workflow_id, context))
|
||||
logger.debug(f"Trigger ajouté à la queue: {context.trigger_id}")
|
||||
else: # concurrent
|
||||
if workflow_id not in self._active_executions:
|
||||
self._active_executions[workflow_id] = []
|
||||
self._active_executions[workflow_id].append(context)
|
||||
logger.debug(f"Trigger en exécution concurrente: {context.trigger_id}")
|
||||
|
||||
def handle_concurrent_triggers(self, workflow_id: str) -> List[TriggerContext]:
|
||||
"""Retourne et efface les contextes de déclenchements concurrents."""
|
||||
contexts = self._active_executions.pop(workflow_id, [])
|
||||
return contexts
|
||||
|
||||
def get_queued_triggers(self) -> List[tuple]:
|
||||
"""Retourne les triggers en attente dans la queue."""
|
||||
items = []
|
||||
while not self._execution_queue.empty():
|
||||
items.append(self._execution_queue.get())
|
||||
return items
|
||||
|
||||
def get_triggers_for_workflow(self, workflow_id: str) -> List[TriggerConfig]:
|
||||
"""Retourne tous les triggers pour un workflow."""
|
||||
return [t for t in self._triggers.values() if t.workflow_id == workflow_id]
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise la configuration."""
|
||||
return {
|
||||
"concurrency_mode": self.concurrency_mode,
|
||||
"triggers": {k: v.to_dict() for k, v in self._triggers.items()}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'TriggerManager':
|
||||
"""Désérialise la configuration."""
|
||||
manager = cls(concurrency_mode=data.get("concurrency_mode", "queue"))
|
||||
for trigger_id, trigger_data in data.get("triggers", {}).items():
|
||||
manager._triggers[trigger_id] = trigger_from_dict(trigger_data)
|
||||
return manager
|
||||
450
core/workflow/variable_manager.py
Normal file
450
core/workflow/variable_manager.py
Normal file
@@ -0,0 +1,450 @@
|
||||
"""
|
||||
Variable Manager - Gestion des variables et paramètres dans les workflows
|
||||
|
||||
Permet de :
|
||||
- Définir des variables dans un workflow (ex: {{client_name}})
|
||||
- Substituer les variables lors de l'exécution
|
||||
- Valider les paramètres requis
|
||||
- Gérer les valeurs par défaut
|
||||
"""
|
||||
|
||||
import re
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional, Set
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariableDefinition:
|
||||
"""Définition d'une variable de workflow."""
|
||||
name: str
|
||||
description: str = ""
|
||||
required: bool = True
|
||||
default_value: Optional[str] = None
|
||||
var_type: str = "string" # string, number, boolean, list
|
||||
validation_pattern: Optional[str] = None # Regex de validation
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"required": self.required,
|
||||
"default_value": self.default_value,
|
||||
"type": self.var_type,
|
||||
"validation_pattern": self.validation_pattern
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'VariableDefinition':
|
||||
return cls(
|
||||
name=data["name"],
|
||||
description=data.get("description", ""),
|
||||
required=data.get("required", True),
|
||||
default_value=data.get("default_value"),
|
||||
var_type=data.get("type", "string"),
|
||||
validation_pattern=data.get("validation_pattern")
|
||||
)
|
||||
|
||||
|
||||
class VariableManager:
|
||||
"""
|
||||
Gestionnaire de variables pour les workflows.
|
||||
|
||||
Gère la substitution de variables dans les actions et paramètres.
|
||||
|
||||
Syntaxe des variables:
|
||||
{{variable_name}} - Variable simple
|
||||
{{variable_name|default}} - Variable avec valeur par défaut
|
||||
{{variable_name:type}} - Variable avec type
|
||||
|
||||
Example:
|
||||
>>> vm = VariableManager()
|
||||
>>> vm.set_variable("client", "Acme Corp")
|
||||
>>> result = vm.substitute("Facturer {{client}}")
|
||||
>>> print(result) # "Facturer Acme Corp"
|
||||
"""
|
||||
|
||||
# Pattern pour détecter les variables {{name}} ou {{name|default}}
|
||||
VARIABLE_PATTERN = re.compile(r'\{\{(\w+)(?:\|([^}]*))?\}\}')
|
||||
|
||||
def __init__(self):
|
||||
self._variables: Dict[str, Any] = {}
|
||||
self._definitions: Dict[str, VariableDefinition] = {}
|
||||
|
||||
# =========================================================================
|
||||
# Définition des variables
|
||||
# =========================================================================
|
||||
|
||||
def define_variable(
|
||||
self,
|
||||
name: str,
|
||||
description: str = "",
|
||||
required: bool = True,
|
||||
default_value: Optional[str] = None,
|
||||
var_type: str = "string",
|
||||
validation_pattern: Optional[str] = None
|
||||
) -> None:
|
||||
"""
|
||||
Définir une variable pour le workflow.
|
||||
|
||||
Args:
|
||||
name: Nom de la variable
|
||||
description: Description pour l'utilisateur
|
||||
required: Si la variable est obligatoire
|
||||
default_value: Valeur par défaut
|
||||
var_type: Type de la variable
|
||||
validation_pattern: Pattern regex de validation
|
||||
"""
|
||||
self._definitions[name] = VariableDefinition(
|
||||
name=name,
|
||||
description=description,
|
||||
required=required,
|
||||
default_value=default_value,
|
||||
var_type=var_type,
|
||||
validation_pattern=validation_pattern
|
||||
)
|
||||
|
||||
# Appliquer la valeur par défaut si définie
|
||||
if default_value is not None and name not in self._variables:
|
||||
self._variables[name] = default_value
|
||||
|
||||
logger.debug(f"Variable defined: {name} (required={required})")
|
||||
|
||||
def get_definitions(self) -> Dict[str, VariableDefinition]:
|
||||
"""Obtenir toutes les définitions de variables."""
|
||||
return self._definitions.copy()
|
||||
|
||||
def get_required_variables(self) -> List[str]:
|
||||
"""Obtenir la liste des variables requises."""
|
||||
return [
|
||||
name for name, defn in self._definitions.items()
|
||||
if defn.required and defn.default_value is None
|
||||
]
|
||||
|
||||
# =========================================================================
|
||||
# Gestion des valeurs
|
||||
# =========================================================================
|
||||
|
||||
def set_variable(self, name: str, value: Any) -> None:
|
||||
"""
|
||||
Définir la valeur d'une variable.
|
||||
|
||||
Args:
|
||||
name: Nom de la variable
|
||||
value: Valeur à assigner
|
||||
"""
|
||||
# Valider si une définition existe
|
||||
if name in self._definitions:
|
||||
defn = self._definitions[name]
|
||||
|
||||
# Valider le pattern si défini
|
||||
if defn.validation_pattern and isinstance(value, str):
|
||||
if not re.match(defn.validation_pattern, value):
|
||||
raise ValueError(
|
||||
f"Variable '{name}' value '{value}' doesn't match pattern '{defn.validation_pattern}'"
|
||||
)
|
||||
|
||||
# Convertir le type si nécessaire
|
||||
value = self._convert_type(value, defn.var_type)
|
||||
|
||||
self._variables[name] = value
|
||||
logger.debug(f"Variable set: {name} = {value}")
|
||||
|
||||
def set_variables(self, variables: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Définir plusieurs variables à la fois.
|
||||
|
||||
Args:
|
||||
variables: Dictionnaire de variables
|
||||
"""
|
||||
for name, value in variables.items():
|
||||
self.set_variable(name, value)
|
||||
|
||||
def get_variable(self, name: str, default: Any = None) -> Any:
|
||||
"""
|
||||
Obtenir la valeur d'une variable.
|
||||
|
||||
Args:
|
||||
name: Nom de la variable
|
||||
default: Valeur par défaut si non définie
|
||||
|
||||
Returns:
|
||||
Valeur de la variable
|
||||
"""
|
||||
if name in self._variables:
|
||||
return self._variables[name]
|
||||
|
||||
# Chercher dans les définitions pour la valeur par défaut
|
||||
if name in self._definitions and self._definitions[name].default_value is not None:
|
||||
return self._definitions[name].default_value
|
||||
|
||||
return default
|
||||
|
||||
def get_all_variables(self) -> Dict[str, Any]:
|
||||
"""Obtenir toutes les variables définies."""
|
||||
return self._variables.copy()
|
||||
|
||||
def clear_variables(self) -> None:
|
||||
"""Effacer toutes les valeurs de variables."""
|
||||
self._variables.clear()
|
||||
|
||||
# =========================================================================
|
||||
# Substitution
|
||||
# =========================================================================
|
||||
|
||||
def substitute(self, text: str) -> str:
|
||||
"""
|
||||
Substituer les variables dans un texte.
|
||||
|
||||
Args:
|
||||
text: Texte contenant des variables {{name}}
|
||||
|
||||
Returns:
|
||||
Texte avec variables substituées
|
||||
"""
|
||||
def replace_var(match):
|
||||
var_name = match.group(1)
|
||||
default_value = match.group(2) # Peut être None
|
||||
|
||||
value = self.get_variable(var_name)
|
||||
|
||||
if value is None:
|
||||
if default_value is not None:
|
||||
return default_value
|
||||
# Garder la variable non substituée si pas de valeur
|
||||
logger.warning(f"Variable '{var_name}' not defined")
|
||||
return match.group(0)
|
||||
|
||||
return str(value)
|
||||
|
||||
return self.VARIABLE_PATTERN.sub(replace_var, text)
|
||||
|
||||
def substitute_dict(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Substituer les variables dans un dictionnaire (récursif).
|
||||
|
||||
Args:
|
||||
data: Dictionnaire contenant potentiellement des variables
|
||||
|
||||
Returns:
|
||||
Dictionnaire avec variables substituées
|
||||
"""
|
||||
result = {}
|
||||
|
||||
for key, value in data.items():
|
||||
if isinstance(value, str):
|
||||
result[key] = self.substitute(value)
|
||||
elif isinstance(value, dict):
|
||||
result[key] = self.substitute_dict(value)
|
||||
elif isinstance(value, list):
|
||||
result[key] = self.substitute_list(value)
|
||||
else:
|
||||
result[key] = value
|
||||
|
||||
return result
|
||||
|
||||
def substitute_list(self, data: List[Any]) -> List[Any]:
|
||||
"""
|
||||
Substituer les variables dans une liste (récursif).
|
||||
|
||||
Args:
|
||||
data: Liste contenant potentiellement des variables
|
||||
|
||||
Returns:
|
||||
Liste avec variables substituées
|
||||
"""
|
||||
result = []
|
||||
|
||||
for item in data:
|
||||
if isinstance(item, str):
|
||||
result.append(self.substitute(item))
|
||||
elif isinstance(item, dict):
|
||||
result.append(self.substitute_dict(item))
|
||||
elif isinstance(item, list):
|
||||
result.append(self.substitute_list(item))
|
||||
else:
|
||||
result.append(item)
|
||||
|
||||
return result
|
||||
|
||||
# =========================================================================
|
||||
# Extraction de variables
|
||||
# =========================================================================
|
||||
|
||||
def extract_variables(self, text: str) -> Set[str]:
|
||||
"""
|
||||
Extraire les noms de variables d'un texte.
|
||||
|
||||
Args:
|
||||
text: Texte contenant des variables {{name}}
|
||||
|
||||
Returns:
|
||||
Set des noms de variables trouvées
|
||||
"""
|
||||
matches = self.VARIABLE_PATTERN.findall(text)
|
||||
return {match[0] for match in matches}
|
||||
|
||||
def extract_variables_from_dict(self, data: Dict[str, Any]) -> Set[str]:
|
||||
"""
|
||||
Extraire les variables d'un dictionnaire (récursif).
|
||||
|
||||
Args:
|
||||
data: Dictionnaire à analyser
|
||||
|
||||
Returns:
|
||||
Set des noms de variables trouvées
|
||||
"""
|
||||
variables = set()
|
||||
|
||||
for value in data.values():
|
||||
if isinstance(value, str):
|
||||
variables.update(self.extract_variables(value))
|
||||
elif isinstance(value, dict):
|
||||
variables.update(self.extract_variables_from_dict(value))
|
||||
elif isinstance(value, list):
|
||||
variables.update(self.extract_variables_from_list(value))
|
||||
|
||||
return variables
|
||||
|
||||
def extract_variables_from_list(self, data: List[Any]) -> Set[str]:
|
||||
"""
|
||||
Extraire les variables d'une liste (récursif).
|
||||
|
||||
Args:
|
||||
data: Liste à analyser
|
||||
|
||||
Returns:
|
||||
Set des noms de variables trouvées
|
||||
"""
|
||||
variables = set()
|
||||
|
||||
for item in data:
|
||||
if isinstance(item, str):
|
||||
variables.update(self.extract_variables(item))
|
||||
elif isinstance(item, dict):
|
||||
variables.update(self.extract_variables_from_dict(item))
|
||||
elif isinstance(item, list):
|
||||
variables.update(self.extract_variables_from_list(item))
|
||||
|
||||
return variables
|
||||
|
||||
# =========================================================================
|
||||
# Validation
|
||||
# =========================================================================
|
||||
|
||||
def validate(self) -> List[str]:
|
||||
"""
|
||||
Valider que toutes les variables requises sont définies.
|
||||
|
||||
Returns:
|
||||
Liste des erreurs de validation (vide si OK)
|
||||
"""
|
||||
errors = []
|
||||
|
||||
for name, defn in self._definitions.items():
|
||||
if defn.required:
|
||||
value = self.get_variable(name)
|
||||
if value is None:
|
||||
errors.append(f"Required variable '{name}' is not defined")
|
||||
|
||||
return errors
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
"""Vérifier si toutes les variables requises sont définies."""
|
||||
return len(self.validate()) == 0
|
||||
|
||||
# =========================================================================
|
||||
# Utilitaires
|
||||
# =========================================================================
|
||||
|
||||
def _convert_type(self, value: Any, var_type: str) -> Any:
|
||||
"""Convertir une valeur vers le type spécifié."""
|
||||
if var_type == "string":
|
||||
return str(value)
|
||||
elif var_type == "number":
|
||||
return float(value)
|
||||
elif var_type == "integer":
|
||||
return int(value)
|
||||
elif var_type == "boolean":
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
return str(value).lower() in ("true", "1", "yes", "oui")
|
||||
elif var_type == "list":
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return [value]
|
||||
else:
|
||||
return value
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialiser le manager en dictionnaire."""
|
||||
return {
|
||||
"definitions": {
|
||||
name: defn.to_dict()
|
||||
for name, defn in self._definitions.items()
|
||||
},
|
||||
"values": self._variables.copy()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'VariableManager':
|
||||
"""Créer un manager depuis un dictionnaire."""
|
||||
manager = cls()
|
||||
|
||||
# Charger les définitions
|
||||
for name, defn_data in data.get("definitions", {}).items():
|
||||
defn = VariableDefinition.from_dict(defn_data)
|
||||
manager._definitions[name] = defn
|
||||
|
||||
# Charger les valeurs
|
||||
for name, value in data.get("values", {}).items():
|
||||
manager._variables[name] = value
|
||||
|
||||
return manager
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Fonctions utilitaires
|
||||
# =============================================================================
|
||||
|
||||
def create_variable_manager_from_workflow(workflow_data: Dict[str, Any]) -> VariableManager:
|
||||
"""
|
||||
Créer un VariableManager depuis les données d'un workflow.
|
||||
|
||||
Analyse le workflow pour extraire automatiquement les variables utilisées.
|
||||
|
||||
Args:
|
||||
workflow_data: Données du workflow (JSON)
|
||||
|
||||
Returns:
|
||||
VariableManager configuré
|
||||
"""
|
||||
manager = VariableManager()
|
||||
|
||||
# Charger les définitions existantes si présentes
|
||||
if "variables" in workflow_data:
|
||||
for var_data in workflow_data["variables"]:
|
||||
manager.define_variable(
|
||||
name=var_data["name"],
|
||||
description=var_data.get("description", ""),
|
||||
required=var_data.get("required", True),
|
||||
default_value=var_data.get("default_value"),
|
||||
var_type=var_data.get("type", "string")
|
||||
)
|
||||
|
||||
# Extraire les variables utilisées dans les actions
|
||||
used_variables = manager.extract_variables_from_dict(workflow_data)
|
||||
|
||||
# Ajouter les variables non définies comme optionnelles
|
||||
for var_name in used_variables:
|
||||
if var_name not in manager._definitions:
|
||||
manager.define_variable(
|
||||
name=var_name,
|
||||
description=f"Auto-detected variable: {var_name}",
|
||||
required=False
|
||||
)
|
||||
logger.info(f"Auto-detected variable: {var_name}")
|
||||
|
||||
return manager
|
||||
188
core/workflow/workflow_chainer.py
Normal file
188
core/workflow/workflow_chainer.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""
|
||||
Workflow Chainer - Gestion du chaînage de workflows
|
||||
|
||||
Ce module gère l'exécution séquentielle de workflows chaînés.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
|
||||
from .composition_models import (
|
||||
ChainConfig, ChainResult, ExecutionContext, LogEntry, ValidationResult
|
||||
)
|
||||
from .global_variable_manager import GlobalVariableManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChainValidationError(Exception):
|
||||
"""Erreur de validation de chaîne."""
|
||||
def __init__(self, source_id: str, target_id: str, reason: str):
|
||||
self.source_id = source_id
|
||||
self.target_id = target_id
|
||||
self.reason = reason
|
||||
super().__init__(f"Chaîne invalide {source_id} -> {target_id}: {reason}")
|
||||
|
||||
|
||||
class ChainExecutionError(Exception):
|
||||
"""Erreur d'exécution de chaîne."""
|
||||
def __init__(self, workflow_id: str, reason: str):
|
||||
self.workflow_id = workflow_id
|
||||
self.reason = reason
|
||||
super().__init__(f"Erreur dans chaîne au workflow '{workflow_id}': {reason}")
|
||||
|
||||
|
||||
class WorkflowChainer:
|
||||
"""Gère l'exécution séquentielle de workflows chaînés."""
|
||||
|
||||
def __init__(self, variable_manager: Optional[GlobalVariableManager] = None):
|
||||
self.variable_manager = variable_manager or GlobalVariableManager()
|
||||
self._chains: Dict[str, List[ChainConfig]] = {} # source_id -> configs
|
||||
self._execution_log: List[LogEntry] = []
|
||||
|
||||
def add_chain(self, config: ChainConfig) -> None:
|
||||
"""Ajoute une configuration de chaînage."""
|
||||
if config.source_workflow_id not in self._chains:
|
||||
self._chains[config.source_workflow_id] = []
|
||||
self._chains[config.source_workflow_id].append(config)
|
||||
logger.info(f"Chaîne ajoutée: {config.source_workflow_id} -> {config.target_workflow_id}")
|
||||
|
||||
def get_next_workflow(self, current_workflow_id: str) -> Optional[str]:
|
||||
"""Retourne le prochain workflow dans la chaîne."""
|
||||
configs = self._chains.get(current_workflow_id, [])
|
||||
if configs:
|
||||
return configs[0].target_workflow_id
|
||||
return None
|
||||
|
||||
def validate_chain(self, source_id: str, target_id: str,
|
||||
source_outputs: Optional[Dict] = None,
|
||||
target_inputs: Optional[Dict] = None) -> ValidationResult:
|
||||
"""Valide la compatibilité entre deux workflows."""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
# Vérifier que la chaîne existe
|
||||
configs = self._chains.get(source_id, [])
|
||||
matching = [c for c in configs if c.target_workflow_id == target_id]
|
||||
|
||||
if not matching:
|
||||
errors.append(f"Aucune chaîne définie de {source_id} vers {target_id}")
|
||||
|
||||
# Vérifier le mapping de variables si fourni
|
||||
if matching and source_outputs and target_inputs:
|
||||
config = matching[0]
|
||||
for source_var, target_var in config.variable_mapping.items():
|
||||
if source_var not in source_outputs:
|
||||
warnings.append(f"Variable source '{source_var}' non trouvée")
|
||||
if target_var not in target_inputs:
|
||||
warnings.append(f"Variable cible '{target_var}' non attendue")
|
||||
|
||||
return ValidationResult(
|
||||
is_valid=len(errors) == 0,
|
||||
errors=errors,
|
||||
warnings=warnings
|
||||
)
|
||||
|
||||
def execute_chain(
|
||||
self,
|
||||
start_workflow_id: str,
|
||||
executor_func: Optional[callable] = None
|
||||
) -> ChainResult:
|
||||
"""Exécute une chaîne de workflows."""
|
||||
chain_id = f"chain_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||
workflows_executed = []
|
||||
current_id = start_workflow_id
|
||||
started_at = datetime.now()
|
||||
|
||||
self._log_event(chain_id, current_id, "chain_start", {"start": start_workflow_id})
|
||||
|
||||
try:
|
||||
while current_id:
|
||||
workflows_executed.append(current_id)
|
||||
self._log_event(chain_id, current_id, "workflow_start", {})
|
||||
|
||||
# Exécuter le workflow (simulation si pas de fonction)
|
||||
if executor_func:
|
||||
result = executor_func(current_id, self.variable_manager.get_all_variables())
|
||||
if not result.get("success", True):
|
||||
raise ChainExecutionError(current_id, result.get("error", "Unknown"))
|
||||
|
||||
self._log_event(chain_id, current_id, "workflow_end", {})
|
||||
|
||||
# Passer au suivant
|
||||
configs = self._chains.get(current_id, [])
|
||||
if configs:
|
||||
config = configs[0]
|
||||
# Transférer les variables
|
||||
transferred = self.variable_manager.transfer_to_workflow(
|
||||
config.target_workflow_id,
|
||||
config.variable_mapping if config.variable_mapping else None
|
||||
)
|
||||
current_id = config.target_workflow_id
|
||||
else:
|
||||
current_id = None
|
||||
|
||||
self._log_event(chain_id, workflows_executed[-1], "chain_end", {"success": True})
|
||||
|
||||
return ChainResult(
|
||||
chain_id=chain_id,
|
||||
success=True,
|
||||
workflows_executed=workflows_executed,
|
||||
final_variables=self.variable_manager.get_final_state(),
|
||||
execution_log=self._execution_log.copy(),
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now()
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._log_event(chain_id, current_id or "unknown", "chain_error", {"error": str(e)})
|
||||
return ChainResult(
|
||||
chain_id=chain_id,
|
||||
success=False,
|
||||
workflows_executed=workflows_executed,
|
||||
final_variables=self.variable_manager.get_final_state(),
|
||||
execution_log=self._execution_log.copy(),
|
||||
error_message=str(e),
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now()
|
||||
)
|
||||
|
||||
def _log_event(self, chain_id: str, workflow_id: str, event_type: str, details: Dict) -> None:
|
||||
"""Ajoute une entrée au log."""
|
||||
entry = LogEntry(
|
||||
timestamp=datetime.now(),
|
||||
workflow_id=workflow_id,
|
||||
node_id=chain_id,
|
||||
event_type=event_type,
|
||||
details=details
|
||||
)
|
||||
self._execution_log.append(entry)
|
||||
|
||||
def get_execution_log(self) -> List[LogEntry]:
|
||||
"""Retourne le log d'exécution unifié."""
|
||||
return self._execution_log.copy()
|
||||
|
||||
def clear_log(self) -> None:
|
||||
"""Efface le log d'exécution."""
|
||||
self._execution_log.clear()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Sérialise la configuration."""
|
||||
return {
|
||||
"chains": {k: [c.to_dict() for c in v] for k, v in self._chains.items()},
|
||||
"variable_manager": self.variable_manager.to_dict(),
|
||||
"execution_log": [e.to_dict() for e in self._execution_log]
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'WorkflowChainer':
|
||||
"""Désérialise la configuration."""
|
||||
vm = GlobalVariableManager.from_dict(data.get("variable_manager", {}))
|
||||
chainer = cls(variable_manager=vm)
|
||||
|
||||
for source_id, configs_data in data.get("chains", {}).items():
|
||||
chainer._chains[source_id] = [ChainConfig.from_dict(c) for c in configs_data]
|
||||
|
||||
chainer._execution_log = [LogEntry.from_dict(e) for e in data.get("execution_log", [])]
|
||||
return chainer
|
||||
75
core/workflow/workflow_merger.py
Normal file
75
core/workflow/workflow_merger.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
Workflow Merger - Détection et fusion de workflows similaires
|
||||
|
||||
Ce module détecte les workflows similaires et permet de les fusionner.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Set, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
from .composition_models import MergeCandidate, NodeConflict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WorkflowMerger:
|
||||
"""Détecte et fusionne les workflows similaires."""
|
||||
|
||||
def __init__(self, similarity_threshold: float = 0.9):
|
||||
self.similarity_threshold = similarity_threshold
|
||||
|
||||
def calculate_similarity(self, workflow_a_nodes: List[str], workflow_b_nodes: List[str]) -> float:
|
||||
"""Calcule la similarité entre deux workflows basée sur leurs nodes."""
|
||||
if not workflow_a_nodes or not workflow_b_nodes:
|
||||
return 0.0
|
||||
|
||||
set_a = set(workflow_a_nodes)
|
||||
set_b = set(workflow_b_nodes)
|
||||
|
||||
intersection = len(set_a & set_b)
|
||||
union = len(set_a | set_b)
|
||||
|
||||
return intersection / union if union > 0 else 0.0
|
||||
|
||||
def find_merge_candidates(self, workflows: Dict[str, List[str]]) -> List[MergeCandidate]:
|
||||
"""Trouve les paires de workflows candidats à la fusion."""
|
||||
candidates = []
|
||||
workflow_ids = list(workflows.keys())
|
||||
|
||||
for i, wf_a_id in enumerate(workflow_ids):
|
||||
for wf_b_id in workflow_ids[i+1:]:
|
||||
similarity = self.calculate_similarity(workflows[wf_a_id], workflows[wf_b_id])
|
||||
|
||||
if similarity >= self.similarity_threshold:
|
||||
shared = list(set(workflows[wf_a_id]) & set(workflows[wf_b_id]))
|
||||
candidates.append(MergeCandidate(
|
||||
workflow_a_id=wf_a_id,
|
||||
workflow_b_id=wf_b_id,
|
||||
similarity_score=similarity,
|
||||
shared_nodes=shared,
|
||||
conflicts=[]
|
||||
))
|
||||
|
||||
return candidates
|
||||
|
||||
def get_unique_paths(self, workflow_nodes: List[str]) -> List[List[str]]:
|
||||
"""Extrait tous les chemins uniques d'un workflow (simplifié)."""
|
||||
if not workflow_nodes:
|
||||
return []
|
||||
return [workflow_nodes] # Simplifié: retourne la liste comme un seul chemin
|
||||
|
||||
def merge(self, nodes_a: List[str], nodes_b: List[str]) -> List[str]:
|
||||
"""Fusionne deux workflows en préservant tous les nodes uniques."""
|
||||
merged = list(nodes_a)
|
||||
for node in nodes_b:
|
||||
if node not in merged:
|
||||
merged.append(node)
|
||||
return merged
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {"similarity_threshold": self.similarity_threshold}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'WorkflowMerger':
|
||||
return cls(similarity_threshold=data.get("similarity_threshold", 0.9))
|
||||
Reference in New Issue
Block a user