feat: process mining BPMN, détection changement écran pHash, OCR docTR
Some checks failed
security-audit / Bandit (scan statique) (push) Successful in 12s
security-audit / pip-audit (CVE dépendances) (push) Successful in 10s
security-audit / Scan secrets (grep) (push) Successful in 8s
tests / Lint (ruff + black) (push) Successful in 15s
tests / Tests unitaires (sans GPU) (push) Failing after 13s
tests / Tests sécurité (critique) (push) Has been skipped

Process Mining (core/analytics/process_mining_bridge.py) :
- Bridge PM4Py : conversion sessions Shadow → event log → BPMN XML + PNG
- KPIs automatiques : durée, variantes, goulots, distribution par app
- Support sessions JSONL brutes et workflows core JSON
- 42 tests (dont 1 sur données réelles)

Détection changement d'écran (core/analytics/screen_change_detector.py) :
- pHash (imagehash) : ~16ms par screenshot, seuils SAME/MINOR/MAJOR
- 8 tests sur screenshots réels

OCR docTR dans execute_extract_text :
- docTR par défaut pour lecture simple (rapide, CPU)
- Ollama VLM en fallback ou sur demande explicite (mode "vlm"/"ai")
- Dual-mode adaptatif selon extraction_mode

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Dom
2026-04-18 13:07:56 +02:00
parent f5a672d7b9
commit 309dfd5287
6 changed files with 1684 additions and 36 deletions

View File

@@ -0,0 +1,621 @@
"""
Bridge entre les workflows Lea (core) et PM4Py pour le process mining.
Genere des diagrammes BPMN et KPIs depuis les traces Shadow.
Usage:
from core.analytics.process_mining_bridge import (
sessions_to_event_log,
workflow_to_event_log,
discover_bpmn,
compute_kpis,
)
# Depuis des sessions JSONL brutes
df = sessions_to_event_log(sessions_data)
result = discover_bpmn(df, output_dir="data/analytics/bpmn")
kpis = compute_kpis(df)
# Depuis un workflow core (dict JSON)
df = workflow_to_event_log(workflow_dict)
"""
import json
import logging
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
logger = logging.getLogger(__name__)
# ---- Import conditionnel PM4Py -----------------------------------------
try:
import pm4py
PM4PY_AVAILABLE = True
except ImportError:
PM4PY_AVAILABLE = False
logger.warning("pm4py non installe -- le process mining est desactive")
def _sanitize_label(label: str) -> str:
"""
Supprime les caracteres de controle (0x00-0x1F sauf tab/newline)
qui sont invalides en XML et font planter PM4Py.
"""
return "".join(
c if c in ("\t", "\n", "\r") or ord(c) >= 0x20 else f"<0x{ord(c):02x}>"
for c in label
)
# ---- Types d'evenements a ignorer (bruit) --------------------------------
_NOISE_EVENT_TYPES = frozenset({
"heartbeat",
"action_result",
"screenshot",
})
# Types d'evenements significatifs pour le process mining
_RELEVANT_EVENT_TYPES = frozenset({
"mouse_click",
"text_input",
"key_press",
"key_combo",
"window_focus_change",
})
# ===========================================================================
# Conversion sessions JSONL -> event log PM4Py
# ===========================================================================
def _build_activity_label(event: dict) -> Optional[str]:
"""
Construit un label d'activite lisible depuis un event JSONL brut.
Regles :
- mouse_click -> "Clic - <app_name> (<window_title tronque>)"
- text_input -> "Saisie '<text>' - <app_name>"
- key_press -> "Touche <key> - <app_name>"
- key_combo -> "Raccourci <keys> - <app_name>"
- window_focus_change -> "Fenetre <to.title> (<to.app_name>)"
Tous les labels sont sanitises pour supprimer les caracteres de controle
(ex: \\x13 pour Ctrl+S) qui sont invalides en XML/BPMN.
"""
evt = event.get("event", event)
etype = evt.get("type", "")
if etype in _NOISE_EVENT_TYPES:
return None
# Extraction fenetre
window = evt.get("window", {})
app_name = window.get("app_name", "inconnu")
win_title = window.get("title", "")
# Tronquer le titre a 40 caracteres
short_title = (win_title[:40] + "...") if len(win_title) > 40 else win_title
label: Optional[str] = None
if etype == "mouse_click":
label = f"Clic - {app_name} ({short_title})"
elif etype == "text_input":
text = evt.get("text", "")
# Tronquer le texte a 20 caracteres pour rester lisible
short_text = (text[:20] + "...") if len(text) > 20 else text
label = f"Saisie '{short_text}' - {app_name}"
elif etype == "key_press":
key = evt.get("key", "?")
label = f"Touche {key} - {app_name}"
elif etype == "key_combo":
keys = evt.get("keys", [])
combo = "+".join(str(k) for k in keys)
label = f"Raccourci {combo} - {app_name}"
elif etype == "window_focus_change":
to_info = evt.get("to", {})
if not to_info:
return None
to_title = to_info.get("title", "?")
to_app = to_info.get("app_name", "?")
label = f"Fenetre {to_title} ({to_app})"
else:
# Types non reconnus : label generique
label = f"{etype} - {app_name}"
return _sanitize_label(label) if label else None
def _extract_timestamp(event: dict) -> Optional[float]:
"""Extrait le timestamp unix depuis un event JSONL."""
# Le timestamp peut etre au niveau racine ou dans event.timestamp
evt = event.get("event", event)
ts = evt.get("timestamp") or event.get("timestamp")
if ts is not None:
return float(ts)
# Fallback sur le champ 't' (format simplifie)
t = evt.get("t") or event.get("t")
if t is not None:
return float(t)
return None
def sessions_to_event_log(
sessions_data: List[dict],
deduplicate_windows: bool = True,
) -> pd.DataFrame:
"""
Convertit des traces de sessions brutes (events JSONL) en event log PM4Py.
Chaque event pertinent devient une ligne :
- case:concept:name = session_id
- concept:name = label d'activite (ex: "Clic - Notepad.exe (Bloc-notes)")
- time:timestamp = timestamp UTC
Args:
sessions_data: liste de dicts, chaque dict est une ligne JSONL parsee.
deduplicate_windows: si True, supprime les window_focus_change
consecutifs vers la meme fenetre (bruit typique de Windows).
Returns:
DataFrame pret pour PM4Py.
"""
rows: List[Dict[str, Any]] = []
# Regrouper par session_id pour le deduplication
sessions: Dict[str, List[dict]] = {}
for event in sessions_data:
sid = event.get("session_id", "unknown")
sessions.setdefault(sid, []).append(event)
for sid, events in sessions.items():
# Trier par timestamp
events.sort(key=lambda e: _extract_timestamp(e) or 0.0)
last_window_label: Optional[str] = None
for event in events:
label = _build_activity_label(event)
if label is None:
continue
ts = _extract_timestamp(event)
if ts is None:
continue
# Deduplication des changements de fenetre consecutifs
evt = event.get("event", event)
if deduplicate_windows and evt.get("type") == "window_focus_change":
if label == last_window_label:
continue
last_window_label = label
else:
last_window_label = None
rows.append({
"case:concept:name": sid,
"concept:name": label,
"time:timestamp": pd.Timestamp(
datetime.fromtimestamp(ts, tz=timezone.utc)
),
"event_type": evt.get("type", ""),
"app_name": evt.get("window", {}).get("app_name", ""),
})
if not rows:
logger.warning("Aucun evenement pertinent trouve dans les sessions")
return pd.DataFrame(columns=[
"case:concept:name",
"concept:name",
"time:timestamp",
"event_type",
"app_name",
])
df = pd.DataFrame(rows)
df = df.sort_values(["case:concept:name", "time:timestamp"]).reset_index(drop=True)
logger.info(
"Event log cree : %d evenements, %d sessions, %d activites distinctes",
len(df),
df["case:concept:name"].nunique(),
df["concept:name"].nunique(),
)
return df
# ===========================================================================
# Conversion workflow core (dict JSON) -> event log PM4Py
# ===========================================================================
def workflow_to_event_log(workflow_dict: dict) -> pd.DataFrame:
"""
Convertit un workflow core (dict JSON) en DataFrame PM4Py.
Utilise les nodes et edges pour reconstituer une trace.
Chaque chemin du entry_node vers un end_node = un case.
Mapping :
- case:concept:name = workflow_id + suffixe de chemin
- concept:name = node.name
- time:timestamp = deduced from edge stats ou created_at
"""
wf_id = workflow_dict.get("workflow_id", "wf_unknown")
nodes = {n["node_id"]: n for n in workflow_dict.get("nodes", [])}
edges = workflow_dict.get("edges", [])
entry_nodes = workflow_dict.get("entry_nodes", [])
created_at = workflow_dict.get("created_at", datetime.now(timezone.utc).isoformat())
if not nodes or not edges:
logger.warning("Workflow vide ou sans edges : %s", wf_id)
return pd.DataFrame(columns=[
"case:concept:name",
"concept:name",
"time:timestamp",
])
# Construire un graphe d'adjacence
adjacency: Dict[str, List[dict]] = {}
for edge in edges:
from_node = edge.get("from_node") or edge.get("source_node", "")
adjacency.setdefault(from_node, []).append(edge)
# Parcours DFS pour trouver les chemins (limites a eviter l'explosion)
MAX_PATHS = 100
paths: List[List[str]] = []
def _dfs(current: str, path: List[str], visited: set) -> None:
if len(paths) >= MAX_PATHS:
return
if current in visited:
# Boucle detectee, sauvegarder le chemin tel quel
paths.append(path[:])
return
visited.add(current)
path.append(current)
outgoing = adjacency.get(current, [])
if not outgoing:
# End node
paths.append(path[:])
else:
for edge in outgoing:
to_node = edge.get("to_node") or edge.get("target_node", "")
if to_node:
_dfs(to_node, path, visited)
path.pop()
visited.discard(current)
for entry in entry_nodes:
if entry in nodes:
_dfs(entry, [], set())
# Si pas d'entry nodes, essayer tous les nodes sans edges entrants
if not paths:
target_nodes = set()
for edge in edges:
to_node = edge.get("to_node") or edge.get("target_node", "")
target_nodes.add(to_node)
root_nodes = [nid for nid in nodes if nid not in target_nodes]
for root in root_nodes[:3]:
_dfs(root, [], set())
# Construire le DataFrame
rows: List[Dict[str, Any]] = []
try:
base_time = pd.Timestamp(datetime.fromisoformat(created_at))
except (ValueError, TypeError):
base_time = pd.Timestamp(datetime.now(timezone.utc))
for i, path in enumerate(paths):
case_id = f"{wf_id}_path_{i}"
for step_idx, node_id in enumerate(path):
node = nodes.get(node_id, {})
rows.append({
"case:concept:name": case_id,
"concept:name": node.get("name", node_id),
"time:timestamp": base_time + pd.Timedelta(seconds=step_idx),
})
df = pd.DataFrame(rows)
if not df.empty:
df = df.sort_values(["case:concept:name", "time:timestamp"]).reset_index(drop=True)
logger.info(
"Event log depuis workflow : %d evenements, %d chemins",
len(df), len(paths),
)
return df
# ===========================================================================
# Decouverte BPMN
# ===========================================================================
def discover_bpmn(
event_log_df: pd.DataFrame,
output_dir: str = "data/analytics/bpmn",
name: str = "process",
) -> dict:
"""
Decouvre un modele BPMN depuis un event log via Inductive Miner.
Args:
event_log_df: DataFrame au format PM4Py.
output_dir: repertoire de sortie pour les fichiers generes.
name: prefixe pour les noms de fichiers.
Returns:
{
'bpmn_xml_path': str,
'bpmn_image_path': str,
'petri_net_image_path': str,
'dfg_image_path': str,
'stats': {
'activities': int,
'variants': int,
'cases': int,
}
}
"""
if not PM4PY_AVAILABLE:
raise ImportError("pm4py n'est pas installe. Installez-le : pip install pm4py")
if event_log_df.empty:
raise ValueError("Event log vide, impossible de decouvrir un BPMN")
out = Path(output_dir)
out.mkdir(parents=True, exist_ok=True)
# Decouverte BPMN par Inductive Miner
bpmn_model = pm4py.discover_bpmn_inductive(event_log_df)
# Export BPMN XML
bpmn_xml_path = str(out / f"{name}.bpmn")
try:
pm4py.write_bpmn(bpmn_model, bpmn_xml_path)
except Exception as e:
# PM4Py layout peut echouer avec des labels contenant des caracteres
# speciaux (accents, guillemets, etc.). Fallback : export via l'exporter
# interne sans layout.
logger.warning("Layout BPMN echoue (%s), export sans layout", e)
from pm4py.objects.bpmn.exporter import exporter as bpmn_exporter
bpmn_exporter.apply(bpmn_model, bpmn_xml_path)
logger.info("BPMN XML exporte : %s", bpmn_xml_path)
# Export image BPMN (PNG)
bpmn_image_path = str(out / f"{name}_bpmn.png")
try:
pm4py.save_vis_bpmn(bpmn_model, bpmn_image_path)
logger.info("BPMN PNG exporte : %s", bpmn_image_path)
except Exception as e:
logger.warning("Impossible de generer l'image BPMN : %s", e)
bpmn_image_path = None
# DFG (Directly-Follows Graph) avec performance
dfg_image_path = str(out / f"{name}_dfg.png")
try:
pm4py.save_vis_dfg(
*pm4py.discover_dfg(event_log_df),
file_path=dfg_image_path,
)
logger.info("DFG PNG exporte : %s", dfg_image_path)
except Exception as e:
logger.warning("Impossible de generer le DFG : %s", e)
dfg_image_path = None
# Petri net via Inductive Miner (pour visualisation alternative)
petri_image_path = str(out / f"{name}_petri.png")
try:
net, im, fm = pm4py.discover_petri_net_inductive(event_log_df)
pm4py.save_vis_petri_net(net, im, fm, file_path=petri_image_path)
logger.info("Petri net PNG exporte : %s", petri_image_path)
except Exception as e:
logger.warning("Impossible de generer le Petri net : %s", e)
petri_image_path = None
# Stats de base
variants = pm4py.get_variants(event_log_df)
n_cases = event_log_df["case:concept:name"].nunique()
n_activities = event_log_df["concept:name"].nunique()
result = {
"bpmn_xml_path": bpmn_xml_path,
"bpmn_image_path": bpmn_image_path,
"petri_net_image_path": petri_image_path,
"dfg_image_path": dfg_image_path,
"stats": {
"activities": n_activities,
"variants": len(variants),
"cases": n_cases,
},
}
logger.info("Decouverte BPMN terminee : %s", result["stats"])
return result
# ===========================================================================
# KPIs de process mining
# ===========================================================================
def compute_kpis(event_log_df: pd.DataFrame) -> dict:
"""
Calcule les KPIs de process mining.
Returns:
{
'total_cases': int,
'total_events': int,
'unique_activities': int,
'variants_count': int,
'variants_top5': list,
'avg_case_duration_seconds': float,
'median_case_duration_seconds': float,
'avg_events_per_case': float,
'activity_stats': {
'<activity_name>': {
'count': int,
'avg_duration_seconds': float,
'min_duration_seconds': float,
'max_duration_seconds': float,
}
},
'bottlenecks': [...], # top 3 activites les plus lentes
'app_distribution': { '<app_name>': int },
}
"""
if event_log_df.empty:
return {
"total_cases": 0,
"total_events": 0,
"unique_activities": 0,
"variants_count": 0,
"variants_top5": [],
"avg_case_duration_seconds": 0.0,
"median_case_duration_seconds": 0.0,
"avg_events_per_case": 0.0,
"activity_stats": {},
"bottlenecks": [],
"app_distribution": {},
}
df = event_log_df.copy()
# ---- Metriques globales ----
total_cases = df["case:concept:name"].nunique()
total_events = len(df)
unique_activities = df["concept:name"].nunique()
# ---- Variantes (PM4Py) ----
if PM4PY_AVAILABLE:
variants = pm4py.get_variants(df)
variants_count = len(variants)
# Top 5 variantes par frequence
sorted_variants = sorted(variants.items(), key=lambda x: x[1], reverse=True)
variants_top5 = [
{"variant": " -> ".join(v), "count": c}
for v, c in sorted_variants[:5]
]
else:
variants_count = 0
variants_top5 = []
# ---- Duree par case ----
case_durations: List[float] = []
for _case_id, group in df.groupby("case:concept:name"):
ts = group["time:timestamp"]
if len(ts) >= 2:
duration = (ts.max() - ts.min()).total_seconds()
case_durations.append(duration)
avg_case_dur = float(pd.Series(case_durations).mean()) if case_durations else 0.0
median_case_dur = float(pd.Series(case_durations).median()) if case_durations else 0.0
avg_events_per_case = total_events / total_cases if total_cases > 0 else 0.0
# ---- Stats par activite ----
activity_stats: Dict[str, Dict[str, Any]] = {}
# Calculer la duree entre chaque evenement et le suivant dans le meme case
df_sorted = df.sort_values(["case:concept:name", "time:timestamp"])
df_sorted["next_timestamp"] = df_sorted.groupby("case:concept:name")[
"time:timestamp"
].shift(-1)
df_sorted["duration_to_next"] = (
df_sorted["next_timestamp"] - df_sorted["time:timestamp"]
).dt.total_seconds()
for activity, grp in df_sorted.groupby("concept:name"):
durations = grp["duration_to_next"].dropna()
# Filtrer les durees aberrantes (> 5 min = probablement une pause)
durations = durations[durations <= 300]
stats: Dict[str, Any] = {
"count": len(grp),
"avg_duration_seconds": round(float(durations.mean()), 2) if len(durations) > 0 else 0.0,
"min_duration_seconds": round(float(durations.min()), 2) if len(durations) > 0 else 0.0,
"max_duration_seconds": round(float(durations.max()), 2) if len(durations) > 0 else 0.0,
}
activity_stats[activity] = stats
# ---- Goulots d'etranglement (top 3 activites les plus lentes) ----
bottlenecks = sorted(
[
{"activity": act, "avg_duration_seconds": s["avg_duration_seconds"]}
for act, s in activity_stats.items()
if s["avg_duration_seconds"] > 0
],
key=lambda x: x["avg_duration_seconds"],
reverse=True,
)[:3]
# ---- Distribution par application ----
app_distribution: Dict[str, int] = {}
if "app_name" in df.columns:
app_distribution = df["app_name"].value_counts().to_dict()
return {
"total_cases": total_cases,
"total_events": total_events,
"unique_activities": unique_activities,
"variants_count": variants_count,
"variants_top5": variants_top5,
"avg_case_duration_seconds": round(avg_case_dur, 2),
"median_case_duration_seconds": round(median_case_dur, 2),
"avg_events_per_case": round(avg_events_per_case, 1),
"activity_stats": activity_stats,
"bottlenecks": bottlenecks,
"app_distribution": app_distribution,
}
# ===========================================================================
# Helpers : chargement sessions JSONL
# ===========================================================================
def load_jsonl_session(jsonl_path: str) -> List[dict]:
"""
Charge un fichier live_events.jsonl en liste de dicts.
Ignore les lignes vides ou invalides.
"""
events: List[dict] = []
path = Path(jsonl_path)
if not path.exists():
raise FileNotFoundError(f"Fichier JSONL introuvable : {jsonl_path}")
with open(path, "r", encoding="utf-8") as f:
for line_num, line in enumerate(f, 1):
line = line.strip()
if not line:
continue
try:
events.append(json.loads(line))
except json.JSONDecodeError as e:
logger.warning("Ligne %d invalide dans %s : %s", line_num, jsonl_path, e)
logger.info("Charge %d evenements depuis %s", len(events), jsonl_path)
return events
def load_multiple_sessions(session_dirs: List[str]) -> List[dict]:
"""
Charge plusieurs sessions depuis leurs repertoires.
Cherche un fichier live_events.jsonl dans chaque repertoire.
"""
all_events: List[dict] = []
for session_dir in session_dirs:
jsonl_path = Path(session_dir) / "live_events.jsonl"
if jsonl_path.exists():
all_events.extend(load_jsonl_session(str(jsonl_path)))
else:
logger.warning("Pas de live_events.jsonl dans %s", session_dir)
return all_events

View File

@@ -0,0 +1,60 @@
"""
Détection rapide de changement d'écran via perceptual hash (pHash).
Utilise imagehash pour calculer un hash perceptuel par screenshot.
La distance de Hamming entre deux hashes indique le degré de changement :
- < 5 : même écran (bruit, curseur déplacé)
- 5-15 : changement mineur (scroll, popup, champ rempli)
- > 15 : nouvel écran (nouvelle fenêtre, navigation)
Performance : ~15ms par hash sur CPU pour des screenshots 2560x1600.
"""
from PIL import Image
import imagehash
from typing import Tuple, Optional
from enum import Enum
class ScreenChangeLevel(Enum):
SAME = "same" # distance < 5
MINOR = "minor" # 5 <= distance < 15
MAJOR = "major" # distance >= 15
def compute_phash(image: Image.Image, hash_size: int = 8) -> imagehash.ImageHash:
"""Calcule le pHash d'une image PIL."""
return imagehash.phash(image, hash_size=hash_size)
def compare_screenshots(img1: Image.Image, img2: Image.Image, hash_size: int = 8) -> Tuple[int, ScreenChangeLevel]:
"""
Compare deux screenshots et retourne la distance + le niveau de changement.
Returns:
(distance, level) — distance de Hamming et niveau de changement
"""
h1 = compute_phash(img1, hash_size)
h2 = compute_phash(img2, hash_size)
distance = h1 - h2
if distance < 5:
level = ScreenChangeLevel.SAME
elif distance < 15:
level = ScreenChangeLevel.MINOR
else:
level = ScreenChangeLevel.MAJOR
return distance, level
def compare_hashes(hash1: imagehash.ImageHash, hash2: imagehash.ImageHash) -> Tuple[int, ScreenChangeLevel]:
"""Compare deux hashes pré-calculés."""
distance = hash1 - hash2
if distance < 5:
level = ScreenChangeLevel.SAME
elif distance < 15:
level = ScreenChangeLevel.MINOR
else:
level = ScreenChangeLevel.MAJOR
return distance, level