refactor: split cim10_extractor → bio_normals, bio_extraction, diagnostic_extraction, validation_pipeline
Découpe le monolithe cim10_extractor.py (1356L) en 4 modules spécialisés : - bio_normals.py : constante BIO_NORMALS + _is_abnormal() (feuille) - bio_extraction.py : extraction biologie structurée - diagnostic_extraction.py : extraction DP/DAS/actes CCAM - validation_pipeline.py : validation CIM-10/CCAM + règles métier Le cim10_extractor.py reste orchestrateur (~450L) avec re-exports backward-compat. Imports mis à jour dans clinical_context, rag_search, fusion. 748 tests passent. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
184
src/medical/bio_extraction.py
Normal file
184
src/medical/bio_extraction.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""Extraction des résultats biologiques depuis le texte médical."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import unicodedata
|
||||
|
||||
from ..config import BiologieCle, DossierMedical, load_lab_value_sanity
|
||||
from .bio_normals import BIO_NORMALS, _is_abnormal
|
||||
|
||||
|
||||
def _norm_key(s: str) -> str:
|
||||
"""Normalise une clé (minuscules, sans accents) pour index YAML."""
|
||||
s = (s or "").strip().lower()
|
||||
s = unicodedata.normalize("NFKD", s)
|
||||
s = "".join(ch for ch in s if not unicodedata.combining(ch))
|
||||
return re.sub(r"\s+", " ", s)
|
||||
|
||||
|
||||
def _parse_float_and_token(raw: str) -> tuple[float | None, str | None]:
|
||||
"""Parse un float et renvoie aussi le token numérique normalisé (avec '.')."""
|
||||
if raw is None:
|
||||
return None, None
|
||||
s = str(raw).strip()
|
||||
m = re.search(r"(-?\d+(?:[\.,]\d+)?)", s)
|
||||
if not m:
|
||||
return None, None
|
||||
token = m.group(1).replace(",", ".")
|
||||
try:
|
||||
return float(token), token
|
||||
except ValueError:
|
||||
return None, None
|
||||
|
||||
|
||||
def _sanitize_bio_value(test_name: str, raw_value: str, sanity_cfg: dict) -> tuple[str, float, str, str | None] | None:
|
||||
"""Applique des garde-fous anti-artefacts (OCR/PDF).
|
||||
|
||||
Retour:
|
||||
(token, value_float, quality, reason) ou None si non parsable.
|
||||
quality: ok | suspect | discarded
|
||||
"""
|
||||
val, token = _parse_float_and_token(raw_value)
|
||||
if val is None or token is None:
|
||||
return None
|
||||
|
||||
key = _norm_key(test_name)
|
||||
tests_cfg = (sanity_cfg or {}).get("tests") or {}
|
||||
cfg = tests_cfg.get(key) or {}
|
||||
hard_min = cfg.get("hard_min")
|
||||
hard_max = cfg.get("hard_max")
|
||||
|
||||
if hard_min is not None and val < float(hard_min):
|
||||
return token, val, "discarded", f"Valeur hors bornes plausibles (<{hard_min})"
|
||||
if hard_max is not None and val > float(hard_max):
|
||||
return token, val, "discarded", f"Valeur hors bornes plausibles (>{hard_max})"
|
||||
|
||||
quality = "ok"
|
||||
reason: str | None = None
|
||||
|
||||
suspect_cfg = cfg.get("suspect") or {}
|
||||
single_digit_over = suspect_cfg.get("single_digit_over")
|
||||
if single_digit_over is not None:
|
||||
# Ex: potassium '8' au lieu de '4.8' (décimale perdue)
|
||||
if re.fullmatch(r"\d", str(raw_value).strip()) and val >= float(single_digit_over):
|
||||
quality = "suspect"
|
||||
reason = f"Valeur à 1 chiffre (possible décimale perdue) : vérifier dans le CR"
|
||||
|
||||
return token, val, quality, reason
|
||||
|
||||
|
||||
def _extract_biologie(text: str, dossier: DossierMedical) -> None:
|
||||
"""Extrait des résultats biologiques clés.
|
||||
|
||||
Notes:
|
||||
- Supporte des aliases (TGO/TGP, Hb, Na/K…)
|
||||
- Capte plusieurs occurrences (utile pour valider/infirmer des diagnostics)
|
||||
- Reste volontairement *simple* (regex sur texte extrait) : si une valeur est
|
||||
uniquement dans un tableau PDF mal extrait, elle peut manquer.
|
||||
"""
|
||||
# (pattern, test_name)
|
||||
bio_patterns: list[tuple[str, str]] = [
|
||||
(r"[Ll]ipas[ée]mie\s*(?:[àa=:])?\s*(\d+)\s*(?:UI/L|U/L)?", "Lipasémie"),
|
||||
(r"\bCRP\b\s*[=:àa]?\s*(\d+(?:[.,]\d+)?)\s*(?:mg/[Ll])?", "CRP"),
|
||||
(r"(?:\bASAT\b|\bTGO\b)\s*[=:àa]?\s*([\d.,]+)\s*(?:N|U(?:I)?/L)?", "ASAT"),
|
||||
(r"(?:\bALAT\b|\bTGP\b)\s*[=:àa]?\s*([\d.,]+)\s*(?:N|U(?:I)?/L)?", "ALAT"),
|
||||
(r"\bGGT\b\s*[=:àa]?\s*(\d+)\s*(?:U(?:I)?/L)?", "GGT"),
|
||||
(r"\bPAL\b\s*[=:àa]?\s*(\d+)\s*(?:U(?:I)?/L)?", "PAL"),
|
||||
(r"[Bb]ilirubine\s+(?:totale\s+)?[àa=:]\s*(\d+(?:[.,]\d+)?)\s*(?:µmol/L|mg/dL)?", "Bilirubine totale"),
|
||||
|
||||
# Ionogramme / électrolytes
|
||||
(r"(?:[Ss]odium|[Nn]atr[ée]mie|(?<![A-Za-z])Na\+?(?![A-Za-z]))\s*[=:àa]?\s*([0-9]{2,3}(?:[.,][0-9]+)?)\s*(?:mmol/L|mEq/L)?", "Sodium"),
|
||||
(r"(?:[Pp]otassium|[Kk]ali[ée]mie|(?<![A-Za-z])K\+?(?![A-Za-z]))\s*[=:àa]?\s*([0-9](?:[.,][0-9]+)?)\s*(?:mmol/L|mEq/L)?", "Potassium"),
|
||||
|
||||
(r"[Tt]roponine\s+(?:us\s+)?(n[ée]gative|positive|normale)", "Troponine"),
|
||||
(r"(?:[Hh][ée]moglobine|\bHb\b)\s*[=:àa]?\s*(\d+(?:[.,]\d+)?)\s*(?:g/dL|g/L)?", "Hémoglobine"),
|
||||
(r"[Pp]laquettes?\s*[=:àa]?\s*(\d+(?:[.,]\d+)?)\s*(?:/mm3|G/L)?", "Plaquettes"),
|
||||
(r"[Ll]eucocytes?\s*[=:àa]?\s*(\d+(?:[.,]\d+)?)\s*(?:/mm3|G/L)?", "Leucocytes"),
|
||||
(r"[Cc]r[ée]atinine?\s*[=:àa]?\s*(\d+(?:[.,]\d+)?)\s*(?:µmol/L|mg/dL)?", "Créatinine"),
|
||||
]
|
||||
|
||||
|
||||
# Anti-doublons + limite par test (évite d'exploser le JSON)
|
||||
max_per_test = 6
|
||||
counts: dict[str, int] = {}
|
||||
seen: set[tuple[str, str]] = set()
|
||||
|
||||
sanity_cfg = load_lab_value_sanity()
|
||||
policy = (sanity_cfg or {}).get("policy") or {}
|
||||
drop_out_of_range = bool(policy.get("drop_out_of_range", True))
|
||||
keep_suspect = bool(policy.get("keep_suspect", True))
|
||||
|
||||
for pattern, test_name in bio_patterns:
|
||||
for m in re.finditer(pattern, text):
|
||||
raw_value = (m.group(1) or "").strip()
|
||||
if not raw_value:
|
||||
continue
|
||||
|
||||
# Valeurs qualitatives (troponine négative/positive/normale) :
|
||||
# pas de sanitization numérique.
|
||||
if re.fullmatch(r"[a-zA-Zéèêëàâôûùïîç]+", raw_value):
|
||||
key = (test_name, raw_value.lower())
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
counts[test_name] = counts.get(test_name, 0) + 1
|
||||
if counts[test_name] > max_per_test:
|
||||
break
|
||||
anomalie = _is_abnormal(test_name, raw_value)
|
||||
dossier.biologie_cle.append(
|
||||
BiologieCle(
|
||||
test=test_name,
|
||||
valeur=raw_value,
|
||||
valeur_num=None,
|
||||
anomalie=anomalie,
|
||||
quality="ok",
|
||||
discard_reason=None,
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
sanitized = _sanitize_bio_value(test_name, raw_value, sanity_cfg)
|
||||
if sanitized is None:
|
||||
continue
|
||||
token, val_num, quality, reason = sanitized
|
||||
|
||||
if quality == "suspect" and not keep_suspect:
|
||||
quality = "discarded"
|
||||
reason = reason or "Valeur suspecte (policy keep_suspect=false)"
|
||||
|
||||
# Déduplication sur la valeur normalisée
|
||||
key = (test_name, token)
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
|
||||
counts[test_name] = counts.get(test_name, 0) + 1
|
||||
if counts[test_name] > max_per_test:
|
||||
break
|
||||
|
||||
if quality == "discarded":
|
||||
# On garde la trace pour audit, sans polluer les règles qualité.
|
||||
dossier.biologie_discarded.append(
|
||||
{
|
||||
"test": test_name,
|
||||
"raw": raw_value,
|
||||
"valeur": token,
|
||||
"valeur_num": val_num,
|
||||
"reason": reason,
|
||||
}
|
||||
)
|
||||
if drop_out_of_range:
|
||||
continue
|
||||
|
||||
anomalie = _is_abnormal(test_name, token)
|
||||
dossier.biologie_cle.append(
|
||||
BiologieCle(
|
||||
test=test_name,
|
||||
valeur=token,
|
||||
valeur_num=val_num,
|
||||
anomalie=anomalie,
|
||||
quality=quality,
|
||||
discard_reason=reason,
|
||||
)
|
||||
)
|
||||
Reference in New Issue
Block a user