refactor: réorganisation référentiels, nouveaux modules extraction, nettoyage code obsolète
- Réorganisation data/referentiels/ : pdfs/, dicts/, user/ (structure unifiée) - Fix badges "Source absente" sur page admin référentiels - Ré-indexation COCOA 2025 (555 → 1451 chunks, couverture 94%) - Fix VRAM OOM : embeddings forcés CPU via T2A_EMBED_CPU - Nouveaux modules : document_router, docx_extractor, image_extractor, ocr_engine - Module complétude (quality/completude.py + config YAML) - Template DIM (synthèse dimensionnelle) - Gunicorn config + systemd service t2a-viewer - Suppression t2a_install_rag_cleanup/ (copie obsolète) - Suppression scripts/ et scripts_t2a_v2/ (anciens benchmarks) - Suppression 81 fichiers _doc.txt de test - Cache Ollama : TTL configurable, corrections loader YAML - Dashboard : améliorations templates (base, index, detail, cpam, validation) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
340
src/config.py
340
src/config.py
@@ -8,11 +8,14 @@ from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Optional, Any, Dict
|
||||
|
||||
import logging
|
||||
import yaml
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
_cfg_logger = logging.getLogger(__name__)
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
@@ -33,6 +36,7 @@ DIAGNOSTIC_CONFLICTS_PATH = CONFIG_DIR / "diagnostic_conflicts.yaml"
|
||||
PROCEDURE_DIAGNOSIS_RULES_PATH = CONFIG_DIR / "procedure_diagnosis_rules.yaml"
|
||||
TEMPORAL_RULES_PATH = CONFIG_DIR / "temporal_rules.yaml"
|
||||
PARCOURS_RULES_PATH = CONFIG_DIR / "parcours_rules.yaml"
|
||||
COMPLETUDE_RULES_PATH = CONFIG_DIR / "completude_rules.yaml"
|
||||
RULES_DIR = CONFIG_DIR / "rules"
|
||||
RULES_BASE_PATH = RULES_DIR / "base.yaml"
|
||||
RULES_ENABLED_PATH = RULES_DIR / "enabled.yaml"
|
||||
@@ -131,14 +135,16 @@ RAG_INDEX_DIR = BASE_DIR / "data" / "rag_index"
|
||||
REFERENTIELS_DIR = BASE_DIR / "data" / "referentiels"
|
||||
UPLOAD_MAX_SIZE_MB = 50
|
||||
ALLOWED_EXTENSIONS = {".pdf", ".csv", ".xlsx", ".xls", ".txt"}
|
||||
CIM10_DICT_PATH = BASE_DIR / "data" / "cim10_dict.json"
|
||||
CIM10_SUPPLEMENTS_PATH = BASE_DIR / "data" / "cim10_supplements.json"
|
||||
_DICTS_DIR = REFERENTIELS_DIR / "dicts"
|
||||
_PDFS_DIR = REFERENTIELS_DIR / "pdfs"
|
||||
CIM10_DICT_PATH = _DICTS_DIR / "cim10_dict.json"
|
||||
CIM10_SUPPLEMENTS_PATH = _DICTS_DIR / "cim10_supplements.json"
|
||||
BIO_CONCEPTS_PATH = BASE_DIR / "data" / "bio_concepts.json"
|
||||
CMA_LEVELS_PATH = BASE_DIR / "data" / "cma_levels.json"
|
||||
CCAM_DICT_PATH = BASE_DIR / "data" / "ccam_dict.json"
|
||||
CIM10_PDF = Path(os.environ.get("T2A_CIM10_PDF", "/home/dom/ai/aivanov_CIM/cim-10-fr_2026_a_usage_pmsi_version_provisoire_111225.pdf"))
|
||||
GUIDE_METHODO_PDF = Path(os.environ.get("T2A_GUIDE_METHODO_PDF", "/home/dom/ai/aivanov_CIM/guide_methodo_mco_2026_version_provisoire.pdf"))
|
||||
CCAM_PDF = Path(os.environ.get("T2A_CCAM_PDF", "/home/dom/ai/aivanov_CIM/actualisation_ccam_descriptive_a_usage_pmsi_v4_2025.pdf"))
|
||||
CCAM_DICT_PATH = _DICTS_DIR / "ccam_dict.json"
|
||||
CIM10_PDF = Path(os.environ.get("T2A_CIM10_PDF", str(_PDFS_DIR / "cim-10-fr_2026_a_usage_pmsi_version_provisoire_111225.pdf")))
|
||||
GUIDE_METHODO_PDF = Path(os.environ.get("T2A_GUIDE_METHODO_PDF", str(_PDFS_DIR / "guide_methodo_mco_2026_version_provisoire.pdf")))
|
||||
CCAM_PDF = Path(os.environ.get("T2A_CCAM_PDF", str(_PDFS_DIR / "actualisation_ccam_descriptive_a_usage_pmsi_v4_2025.pdf")))
|
||||
|
||||
# --- Modèle d'embedding ---
|
||||
|
||||
@@ -150,18 +156,37 @@ RERANKER_MODEL = os.environ.get("T2A_RERANKER_MODEL", "cross-encoder/ms-marco-Mi
|
||||
|
||||
# --- Références biologiques (fallback) ---
|
||||
|
||||
def _load_yaml_config(path: Path, defaults: Dict[str, Any], label: str) -> Dict[str, Any]:
|
||||
"""Helper : charge un YAML config avec merge sur defaults et logging explicite.
|
||||
|
||||
- Si le fichier n'existe pas : retourne defaults (info log).
|
||||
- Si le YAML est invalide : retourne defaults + log error.
|
||||
- Sinon : merge YAML sur defaults.
|
||||
"""
|
||||
if not path.exists():
|
||||
_cfg_logger.debug("Config %s : fichier absent (%s), defaults utilisés", label, path)
|
||||
return defaults
|
||||
try:
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
_cfg_logger.error("Config %s : contenu invalide (attendu dict, reçu %s) dans %s",
|
||||
label, type(data).__name__, path)
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except yaml.YAMLError as e:
|
||||
_cfg_logger.error("Config %s : erreur de syntaxe YAML dans %s — %s", label, path, e)
|
||||
return defaults
|
||||
except Exception as e:
|
||||
_cfg_logger.error("Config %s : erreur lecture %s — %s", label, path, e)
|
||||
return defaults
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_reference_ranges() -> Dict[str, Any]:
|
||||
"""Charge les intervalles de référence biologiques depuis config/reference_ranges.yaml.
|
||||
|
||||
Hiérarchie d'usage recommandée dans les règles :
|
||||
1) Normes présentes dans le document (ex: [N: 135-145])
|
||||
2) Table YAML (par bande d'âge)
|
||||
3) "Safe zones" conservatrices si âge inconnu
|
||||
|
||||
Le YAML est volontairement éditable par des non-informaticiens (future UI).
|
||||
"""
|
||||
# Defaults minimalistes (adultes) si YAML absent
|
||||
"""Charge les intervalles de référence biologiques depuis config/reference_ranges.yaml."""
|
||||
defaults: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"age_bands": {"adult_min_years": 18},
|
||||
@@ -171,8 +196,6 @@ def load_reference_ranges() -> Dict[str, Any]:
|
||||
"sodium": {"low": 135, "high": 145, "unit": "mmol/L"},
|
||||
"potassium": {"low": 3.5, "high": 5.0, "unit": "mmol/L"},
|
||||
},
|
||||
# Valeurs pédiatriques: à affiner (par bandes d'âge) si besoin.
|
||||
# Pour les règles "ruled_out" on utilise plutôt les safe_zones_unknown_age
|
||||
"child": {
|
||||
"platelets": {"low": 150, "high": 450, "unit": "G/L"},
|
||||
"sodium": {"low": 135, "high": 145, "unit": "mmol/L"},
|
||||
@@ -186,28 +209,7 @@ def load_reference_ranges() -> Dict[str, Any]:
|
||||
"potassium_ruled_out_low": 3.7,
|
||||
},
|
||||
}
|
||||
|
||||
path = REFERENCE_RANGES_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
except Exception:
|
||||
# PyYAML absent: on garde les valeurs par défaut
|
||||
return defaults
|
||||
|
||||
try:
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
# Merge léger: defaults comme socle, YAML surcharge
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(REFERENCE_RANGES_PATH, defaults, "reference_ranges")
|
||||
|
||||
|
||||
# --- Règles biologiques (pilotées par YAML) ---
|
||||
@@ -215,14 +217,7 @@ def load_reference_ranges() -> Dict[str, Any]:
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_bio_rules() -> Dict[str, Any]:
|
||||
"""Charge les règles biologiques depuis config/bio_rules.yaml.
|
||||
|
||||
Objectif: permettre d'activer/désactiver et de paramétrer les règles
|
||||
de type "contradiction bio ⇒ ruled_out" sans modifier le code.
|
||||
|
||||
Le fichier est volontairement simple (future UI).
|
||||
"""
|
||||
|
||||
"""Charge les règles biologiques depuis config/bio_rules.yaml."""
|
||||
defaults: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"rules": {
|
||||
@@ -231,144 +226,55 @@ def load_bio_rules() -> Dict[str, Any]:
|
||||
"hypokalemia": {"enabled": True, "codes": ["E87.6"], "analyte": "potassium"},
|
||||
},
|
||||
}
|
||||
|
||||
path = BIO_RULES_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
except Exception:
|
||||
return defaults
|
||||
|
||||
try:
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(BIO_RULES_PATH, defaults, "bio_rules")
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_demographic_rules() -> Dict[str, Any]:
|
||||
"""Charge les règles démographiques (sexe/âge) depuis config/demographic_rules.yaml."""
|
||||
defaults: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"sex_rules": {},
|
||||
"age_rules": {},
|
||||
}
|
||||
path = DEMOGRAPHIC_RULES_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(DEMOGRAPHIC_RULES_PATH, {
|
||||
"version": 1, "sex_rules": {}, "age_rules": {},
|
||||
}, "demographic_rules")
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_diagnostic_conflicts() -> Dict[str, Any]:
|
||||
"""Charge les conflits diagnostics depuis config/diagnostic_conflicts.yaml."""
|
||||
defaults: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"mutual_exclusions": [],
|
||||
"incompatibilities": [],
|
||||
}
|
||||
path = DIAGNOSTIC_CONFLICTS_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(DIAGNOSTIC_CONFLICTS_PATH, {
|
||||
"version": 1, "mutual_exclusions": [], "incompatibilities": [],
|
||||
}, "diagnostic_conflicts")
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_procedure_diagnosis_rules() -> Dict[str, Any]:
|
||||
"""Charge les règles de corrélation actes/diagnostics depuis config/procedure_diagnosis_rules.yaml."""
|
||||
defaults: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"rules": [],
|
||||
}
|
||||
path = PROCEDURE_DIAGNOSIS_RULES_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(PROCEDURE_DIAGNOSIS_RULES_PATH, {
|
||||
"version": 1, "rules": [],
|
||||
}, "procedure_diagnosis_rules")
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_temporal_rules() -> Dict[str, Any]:
|
||||
"""Charge les règles temporelles depuis config/temporal_rules.yaml."""
|
||||
defaults: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"rules": [],
|
||||
}
|
||||
path = TEMPORAL_RULES_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(TEMPORAL_RULES_PATH, {
|
||||
"version": 1, "rules": [],
|
||||
}, "temporal_rules")
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_parcours_rules() -> Dict[str, Any]:
|
||||
"""Charge les règles de parcours patient depuis config/parcours_rules.yaml."""
|
||||
defaults: Dict[str, Any] = {
|
||||
"version": 1,
|
||||
"documentary_rules": {},
|
||||
"pathway_rules": {},
|
||||
}
|
||||
path = PARCOURS_RULES_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(PARCOURS_RULES_PATH, {
|
||||
"version": 1, "documentary_rules": {}, "pathway_rules": {},
|
||||
}, "parcours_rules")
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_completude_rules() -> Dict[str, Any]:
|
||||
"""Charge les règles de complétude documentaire depuis config/completude_rules.yaml."""
|
||||
return _load_yaml_config(COMPLETUDE_RULES_PATH, {
|
||||
"version": 1, "diagnostics": {}, "actes": {},
|
||||
}, "completude_rules")
|
||||
|
||||
|
||||
# --- Garde-fous de parsing des valeurs biologiques (anti-OCR) ---
|
||||
@@ -418,25 +324,7 @@ def load_lab_value_sanity() -> Dict[str, Any]:
|
||||
},
|
||||
}
|
||||
|
||||
path = LAB_SANITY_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
except Exception:
|
||||
return defaults
|
||||
|
||||
try:
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
if not isinstance(data, dict):
|
||||
return defaults
|
||||
merged = dict(defaults)
|
||||
for k, v in data.items():
|
||||
merged[k] = v
|
||||
return merged
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(LAB_SANITY_PATH, defaults, "lab_value_sanity")
|
||||
|
||||
|
||||
# --- Catalogue de règles (vetos + décisions), piloté par YAML ---
|
||||
@@ -506,11 +394,6 @@ def load_rules_catalog() -> Dict[str, Dict[str, Any]]:
|
||||
(=> ne casse pas le comportement historique)
|
||||
"""
|
||||
|
||||
try:
|
||||
import yaml # type: ignore
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
catalog: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
# 1) base
|
||||
@@ -519,7 +402,8 @@ def load_rules_catalog() -> Dict[str, Dict[str, Any]]:
|
||||
base_data = yaml.safe_load(RULES_BASE_PATH.read_text(encoding="utf-8")) or {}
|
||||
if isinstance(base_data, dict):
|
||||
catalog = _flatten_rules_yaml(base_data)
|
||||
except Exception:
|
||||
except (yaml.YAMLError, Exception) as e:
|
||||
_cfg_logger.error("Rules catalog : erreur lecture base.yaml — %s", e)
|
||||
catalog = {}
|
||||
|
||||
# 2) enabled overlays
|
||||
@@ -537,8 +421,8 @@ def load_rules_catalog() -> Dict[str, Dict[str, Any]]:
|
||||
extra = active.get("extra")
|
||||
if isinstance(extra, list):
|
||||
extra_files = [str(x) for x in extra if str(x).strip()]
|
||||
except Exception:
|
||||
pass
|
||||
except (yaml.YAMLError, Exception) as e:
|
||||
_cfg_logger.error("Rules catalog : erreur lecture enabled.yaml — %s", e)
|
||||
else:
|
||||
# fallback env
|
||||
active_site = os.environ.get("T2A_SITE", "").strip()
|
||||
@@ -552,8 +436,8 @@ def load_rules_catalog() -> Dict[str, Dict[str, Any]]:
|
||||
data = yaml.safe_load(p.read_text(encoding="utf-8")) or {}
|
||||
if isinstance(data, dict):
|
||||
catalog = _merge_rule_catalog(catalog, _flatten_rules_yaml(data))
|
||||
except Exception:
|
||||
pass
|
||||
except (yaml.YAMLError, Exception) as e:
|
||||
_cfg_logger.error("Rules catalog : erreur overlay spécialité %s — %s", active_specialty, e)
|
||||
|
||||
# 4) site overlay
|
||||
if active_site:
|
||||
@@ -563,8 +447,8 @@ def load_rules_catalog() -> Dict[str, Dict[str, Any]]:
|
||||
data = yaml.safe_load(p.read_text(encoding="utf-8")) or {}
|
||||
if isinstance(data, dict):
|
||||
catalog = _merge_rule_catalog(catalog, _flatten_rules_yaml(data))
|
||||
except Exception:
|
||||
pass
|
||||
except (yaml.YAMLError, Exception) as e:
|
||||
_cfg_logger.error("Rules catalog : erreur overlay site %s — %s", active_site, e)
|
||||
|
||||
# 5) extra overlays
|
||||
for rel in extra_files:
|
||||
@@ -574,8 +458,8 @@ def load_rules_catalog() -> Dict[str, Dict[str, Any]]:
|
||||
data = yaml.safe_load(p.read_text(encoding="utf-8")) or {}
|
||||
if isinstance(data, dict):
|
||||
catalog = _merge_rule_catalog(catalog, _flatten_rules_yaml(data))
|
||||
except Exception:
|
||||
pass
|
||||
except (yaml.YAMLError, Exception) as e:
|
||||
_cfg_logger.error("Rules catalog : erreur overlay %s — %s", rel, e)
|
||||
|
||||
return catalog
|
||||
|
||||
@@ -611,17 +495,7 @@ def load_rules_router() -> Dict[str, Any]:
|
||||
"defaults": {"enabled_packs": ["vetos_core", "decisions_core"]},
|
||||
"triggers": [],
|
||||
}
|
||||
path = RULES_ROUTER_PATH
|
||||
if not path.exists():
|
||||
return defaults
|
||||
try:
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8")) or {}
|
||||
# merge conservateur
|
||||
if isinstance(data, dict):
|
||||
defaults.update({k: v for k, v in data.items() if v is not None})
|
||||
return defaults
|
||||
except Exception:
|
||||
return defaults
|
||||
return _load_yaml_config(RULES_ROUTER_PATH, defaults, "rules_router")
|
||||
|
||||
|
||||
def rule_enabled(rule_id: str) -> bool:
|
||||
@@ -877,6 +751,7 @@ class DossierMedical(BaseModel):
|
||||
ghm_estimation: Optional[GHMEstimation] = None
|
||||
controles_cpam: list[ControleCPAM] = Field(default_factory=list)
|
||||
veto_report: Optional["VetoReport"] = None
|
||||
completude: Optional["CompletudeDossier"] = None
|
||||
processing_time_s: float | None = None
|
||||
metrics: Optional[DossierMetrics] = None
|
||||
rules_runtime: Optional[dict] = None
|
||||
@@ -924,6 +799,14 @@ class GHMEstimation(BaseModel):
|
||||
alertes: list[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class FinancialImpact(BaseModel):
|
||||
"""Estimation de l'impact financier d'un contrôle UCR."""
|
||||
delta_severite: int = 0 # ex: -2 (perte 2 niveaux)
|
||||
impact_estime_euros: int = 0 # estimation grossière
|
||||
priorite: str = "normale" # "critique" | "haute" | "normale" | "faible"
|
||||
raison: str = ""
|
||||
|
||||
|
||||
class ControleCPAM(BaseModel):
|
||||
numero_ogc: int
|
||||
titre: str = ""
|
||||
@@ -933,12 +816,22 @@ class ControleCPAM(BaseModel):
|
||||
da_ucr: Optional[str] = None
|
||||
dr_ucr: Optional[str] = None
|
||||
actes_ucr: Optional[str] = None
|
||||
type_desaccord: Optional[str] = None # "DP" | "DAS" | "DP+DAS" | "Actes"
|
||||
financial_impact: Optional[FinancialImpact] = None
|
||||
contre_argumentation: Optional[str] = None
|
||||
response_data: Optional[dict] = None
|
||||
sources_reponse: list[RAGSource] = Field(default_factory=list)
|
||||
quality_tier: Optional[str] = None # "A" | "B" | "C"
|
||||
requires_review: bool = False
|
||||
quality_warnings: list[str] = Field(default_factory=list)
|
||||
# Délais réglementaires
|
||||
date_notification: Optional[str] = None # JJ/MM/AAAA
|
||||
date_limite_reponse: Optional[str] = None # calculé : notification + 30j
|
||||
statut_reponse: str = "a_traiter" # "a_traiter" | "en_cours" | "envoye" | "hors_delai"
|
||||
# Workflow validation DIM
|
||||
validation_dim: str = "non_valide" # "non_valide" | "en_revision" | "valide" | "rejete"
|
||||
commentaire_dim: Optional[str] = None
|
||||
date_validation: Optional[str] = None
|
||||
|
||||
|
||||
# --- Qualité / Vetos (contestabilité) ---
|
||||
@@ -962,6 +855,43 @@ class VetoReport(BaseModel):
|
||||
issues: list[VetoIssue] = Field(default_factory=list)
|
||||
|
||||
|
||||
# --- Complétude documentaire DIM ---
|
||||
|
||||
|
||||
class ItemCompletude(BaseModel):
|
||||
"""Élément requis/recommandé pour justifier un code."""
|
||||
|
||||
categorie: str # "biologie" | "imagerie" | "document" | "acte" | "clinique"
|
||||
element: str # "Albumine" | "CRO" | "Scanner abdominal"
|
||||
statut: str # "present" | "absent" | "present_confirme" | "present_non_confirme" | "present_indirect"
|
||||
valeur: Optional[str] = None # "28 g/L" si présent
|
||||
importance: str # "obligatoire" | "recommande"
|
||||
impact_cpam: str = "" # explication du risque
|
||||
confirmation_detail: Optional[str] = None # "Albumine 28 g/L < 30 → confirme E43"
|
||||
|
||||
|
||||
class CheckCompletude(BaseModel):
|
||||
"""Vérification de complétude pour un code diagnostique."""
|
||||
|
||||
code: str # "E43"
|
||||
libelle: str # "Dénutrition sévère"
|
||||
type_diag: str # "DP" | "DAS"
|
||||
items: list[ItemCompletude] = Field(default_factory=list)
|
||||
score: int = 100 # 0-100
|
||||
verdict: str = "defendable" # "defendable" | "fragile" | "indefendable"
|
||||
resume: str = "" # "2/3 éléments obligatoires présents"
|
||||
|
||||
|
||||
class CompletudeDossier(BaseModel):
|
||||
"""Rapport global de complétude documentaire pour un dossier."""
|
||||
|
||||
checks: list[CheckCompletude] = Field(default_factory=list)
|
||||
score_global: int = 100
|
||||
verdict_global: str = "defendable"
|
||||
documents_presents: list[str] = Field(default_factory=list)
|
||||
documents_manquants: list[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class AnonymizationReport(BaseModel):
|
||||
source_file: str
|
||||
total_replacements: int = 0
|
||||
|
||||
Reference in New Issue
Block a user