feat: dictionnaire CCAM complet (8 257 codes) + index FAISS enrichi + validation actes
Phase 2 (CCAM) : - Nouveau src/medical/ccam_dict.py : build depuis CCAM_V81.xls via xlrd, lookup 3 niveaux, validation codes - Intégration dans l'extracteur : fallback ccam_lookup + _validate_ccam() avec alertes - CLI : --build-ccam-dict, --rebuild-index Phase 3 (FAISS) : - Chunks CCAM depuis le dictionnaire JSON (priorité sur le PDF) - Chunks CIM-10 index alphabétique (terme → code) - Priorisation cim10_alpha dans la recherche RAG Viewer : endpoint reprocess + bloc scripts Tests : 8 tests CCAM + tests raisonnement RAG (161 passed) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -39,6 +39,7 @@ OLLAMA_TIMEOUT = 120
|
||||
|
||||
RAG_INDEX_DIR = BASE_DIR / "data" / "rag_index"
|
||||
CIM10_DICT_PATH = BASE_DIR / "data" / "cim10_dict.json"
|
||||
CCAM_DICT_PATH = BASE_DIR / "data" / "ccam_dict.json"
|
||||
CIM10_PDF = Path("/home/dom/ai/aivanov_CIM/cim-10-fr_2026_a_usage_pmsi_version_provisoire_111225.pdf")
|
||||
GUIDE_METHODO_PDF = Path("/home/dom/ai/aivanov_CIM/guide_methodo_mco_2026_version_provisoire.pdf")
|
||||
CCAM_PDF = Path("/home/dom/ai/aivanov_CIM/actualisation_ccam_descriptive_a_usage_pmsi_v4_2025.pdf")
|
||||
|
||||
23
src/main.py
23
src/main.py
@@ -168,6 +168,18 @@ def main(input_path: str | None = None) -> None:
|
||||
action="store_true",
|
||||
help="Générer le dictionnaire CIM-10 depuis metadata.json et quitter",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--build-ccam-dict",
|
||||
nargs="?",
|
||||
const="CCAM_V81.xls",
|
||||
metavar="PATH",
|
||||
help="Générer le dictionnaire CCAM depuis un fichier XLS (défaut: CCAM_V81.xls)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--rebuild-index",
|
||||
action="store_true",
|
||||
help="Forcer la reconstruction de l'index FAISS",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.build_dict:
|
||||
@@ -175,6 +187,17 @@ def main(input_path: str | None = None) -> None:
|
||||
build_dict()
|
||||
return
|
||||
|
||||
if args.build_ccam_dict:
|
||||
from .medical.ccam_dict import build_dict as build_ccam
|
||||
result = build_ccam(args.build_ccam_dict)
|
||||
logger.info("Dictionnaire CCAM : %d codes générés", len(result))
|
||||
return
|
||||
|
||||
if args.rebuild_index:
|
||||
from .medical.rag_index import build_index
|
||||
build_index(force=True)
|
||||
return
|
||||
|
||||
if args.no_ner:
|
||||
# Monkey-patch pour désactiver NER
|
||||
from .anonymization import ner_anonymizer
|
||||
|
||||
191
src/medical/ccam_dict.py
Normal file
191
src/medical/ccam_dict.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""Dictionnaire CCAM complet extrait depuis le fichier XLS officiel (CNAM).
|
||||
|
||||
Fournit un lookup intelligent avec normalisation Unicode pour la recherche
|
||||
de codes CCAM à partir de textes d'actes médicaux en français.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import unicodedata
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from ..config import CCAM_DICT_PATH
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Singleton : dictionnaire chargé une seule fois
|
||||
_dict_cache: dict[str, dict] | None = None
|
||||
# Cache des labels normalisés pour le substring matching
|
||||
_normalized_cache: list[tuple[str, str, str]] | None = None
|
||||
|
||||
_CCAM_CODE_RE = re.compile(r"^[A-Z]{4}\d{3}$")
|
||||
|
||||
|
||||
def normalize_text(text: str) -> str:
|
||||
"""Normalise un texte : accent folding, lowercase, collapse whitespace."""
|
||||
text = text.replace("\u2019", "'").replace("\u2018", "'").replace("\u02BC", "'")
|
||||
nfkd = unicodedata.normalize("NFKD", text)
|
||||
stripped = "".join(c for c in nfkd if unicodedata.category(c) != "Mn")
|
||||
return re.sub(r"\s+", " ", stripped.lower()).strip()
|
||||
|
||||
|
||||
def build_dict(source_path: str | Path) -> dict[str, dict]:
|
||||
"""Construit le dictionnaire CCAM depuis un fichier XLS et l'écrit en JSON.
|
||||
|
||||
Format JSON : {code: {description, activite, tarif_s1, regroupement}}
|
||||
|
||||
Args:
|
||||
source_path: Chemin vers le fichier XLS CCAM (ex: CCAM_V81.xls).
|
||||
|
||||
Returns:
|
||||
Le dictionnaire code → infos.
|
||||
"""
|
||||
import xlrd
|
||||
|
||||
source_path = Path(source_path)
|
||||
if not source_path.exists():
|
||||
logger.error("Fichier XLS non trouvé : %s", source_path)
|
||||
return {}
|
||||
|
||||
wb = xlrd.open_workbook(str(source_path))
|
||||
sheet = wb.sheet_by_index(0)
|
||||
|
||||
result: dict[str, dict] = {}
|
||||
|
||||
for r in range(sheet.nrows):
|
||||
code = str(sheet.cell_value(r, 0)).strip()
|
||||
if not _CCAM_CODE_RE.match(code):
|
||||
continue
|
||||
|
||||
description = str(sheet.cell_value(r, 2)).strip()
|
||||
activite_raw = sheet.cell_value(r, 3)
|
||||
activite = int(activite_raw) if isinstance(activite_raw, float) else None
|
||||
|
||||
tarif_raw = sheet.cell_value(r, 5)
|
||||
tarif_s1 = round(tarif_raw, 2) if isinstance(tarif_raw, (int, float)) else None
|
||||
|
||||
regroupement = str(sheet.cell_value(r, 10)).strip() or None
|
||||
|
||||
result[code] = {
|
||||
"description": description,
|
||||
"activite": activite,
|
||||
"tarif_s1": tarif_s1,
|
||||
"regroupement": regroupement,
|
||||
}
|
||||
|
||||
# Écrire le fichier JSON
|
||||
CCAM_DICT_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(CCAM_DICT_PATH, "w", encoding="utf-8") as f:
|
||||
json.dump(result, f, ensure_ascii=False, indent=2)
|
||||
|
||||
logger.info("Dictionnaire CCAM généré : %d codes → %s", len(result), CCAM_DICT_PATH)
|
||||
return result
|
||||
|
||||
|
||||
def load_dict() -> dict[str, dict]:
|
||||
"""Charge le dictionnaire CCAM (singleton lazy-loaded).
|
||||
|
||||
Si le fichier JSON n'existe pas, retourne un dict vide avec un warning.
|
||||
"""
|
||||
global _dict_cache
|
||||
if _dict_cache is not None:
|
||||
return _dict_cache
|
||||
|
||||
if CCAM_DICT_PATH.exists():
|
||||
with open(CCAM_DICT_PATH, encoding="utf-8") as f:
|
||||
_dict_cache = json.load(f)
|
||||
else:
|
||||
logger.warning("Dictionnaire CCAM absent : %s — lancez --build-ccam-dict", CCAM_DICT_PATH)
|
||||
_dict_cache = {}
|
||||
|
||||
return _dict_cache
|
||||
|
||||
|
||||
def _get_normalized_entries() -> list[tuple[str, str, str]]:
|
||||
"""Retourne une liste de (code, description, description_normalisée) triée par longueur."""
|
||||
global _normalized_cache
|
||||
if _normalized_cache is not None:
|
||||
return _normalized_cache
|
||||
|
||||
d = load_dict()
|
||||
entries = []
|
||||
for code, info in d.items():
|
||||
desc = info.get("description", "") if isinstance(info, dict) else str(info)
|
||||
norm = normalize_text(desc)
|
||||
entries.append((code, desc, norm))
|
||||
|
||||
# Trier par longueur de description décroissante (plus spécifique d'abord)
|
||||
entries.sort(key=lambda e: -len(e[2]))
|
||||
_normalized_cache = entries
|
||||
return _normalized_cache
|
||||
|
||||
|
||||
def lookup(
|
||||
text: str,
|
||||
domain_overrides: dict[str, str] | None = None,
|
||||
) -> str | None:
|
||||
"""Recherche un code CCAM pour un texte donné.
|
||||
|
||||
Stratégie en 3 niveaux :
|
||||
1. Match substring dans domain_overrides (prioritaire, ex: CCAM_MAP existant)
|
||||
2. Match exact normalisé dans le dictionnaire complet
|
||||
3. Match substring normalisé avec scoring par spécificité
|
||||
|
||||
Args:
|
||||
text: Le texte de l'acte médical à rechercher.
|
||||
domain_overrides: Dictionnaire terme→code prioritaire.
|
||||
|
||||
Returns:
|
||||
Le code CCAM trouvé ou None.
|
||||
"""
|
||||
if not text:
|
||||
return None
|
||||
|
||||
text_norm = normalize_text(text)
|
||||
|
||||
# Niveau 1 : domain overrides (substring match)
|
||||
if domain_overrides:
|
||||
for terme, code in domain_overrides.items():
|
||||
if normalize_text(terme) in text_norm:
|
||||
return code
|
||||
|
||||
entries = _get_normalized_entries()
|
||||
|
||||
# Niveau 2 : match exact normalisé
|
||||
for code, _desc, norm_desc in entries:
|
||||
if norm_desc == text_norm:
|
||||
return code
|
||||
|
||||
# Niveau 3 : substring match normalisé (plus spécifique d'abord)
|
||||
for code, _desc, norm_desc in entries:
|
||||
if not norm_desc or len(norm_desc) < 4:
|
||||
continue
|
||||
if norm_desc in text_norm or text_norm in norm_desc:
|
||||
return code
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def validate_code(code: str) -> tuple[bool, str]:
|
||||
"""Vérifie si un code CCAM existe dans le dictionnaire.
|
||||
|
||||
Returns:
|
||||
(is_valid, description) — description vide si invalide.
|
||||
"""
|
||||
d = load_dict()
|
||||
if code in d:
|
||||
info = d[code]
|
||||
desc = info.get("description", "") if isinstance(info, dict) else str(info)
|
||||
return True, desc
|
||||
return False, ""
|
||||
|
||||
|
||||
def reset_cache() -> None:
|
||||
"""Réinitialise les caches (utile pour les tests)."""
|
||||
global _dict_cache, _normalized_cache
|
||||
_dict_cache = None
|
||||
_normalized_cache = None
|
||||
@@ -10,6 +10,7 @@ from typing import Optional
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from .cim10_dict import lookup as dict_lookup, normalize_text
|
||||
from .ccam_dict import lookup as ccam_lookup, validate_code as ccam_validate
|
||||
from ..config import (
|
||||
ActeCCAM,
|
||||
BiologieCle,
|
||||
@@ -113,6 +114,9 @@ def extract_medical_info(
|
||||
if use_rag:
|
||||
_enrich_with_rag(dossier)
|
||||
|
||||
# Post-processing : validation des codes CCAM contre le dictionnaire
|
||||
_validate_ccam(dossier)
|
||||
|
||||
# Post-processing : exclusions symptôme vs diagnostic précis
|
||||
_apply_exclusion_rules(dossier)
|
||||
|
||||
@@ -395,6 +399,13 @@ def _extract_actes(text: str, dossier: DossierMedical) -> None:
|
||||
date=date,
|
||||
))
|
||||
|
||||
# Fallback : tenter le lookup CCAM dict pour les actes sans code
|
||||
for acte in dossier.actes_ccam:
|
||||
if not acte.code_ccam_suggestion:
|
||||
code = ccam_lookup(acte.texte, domain_overrides=CCAM_MAP)
|
||||
if code:
|
||||
acte.code_ccam_suggestion = code
|
||||
|
||||
|
||||
def _extract_antecedents(text: str, dossier: DossierMedical) -> None:
|
||||
"""Extrait les antécédents."""
|
||||
@@ -625,6 +636,22 @@ def _is_negated_by_edsnlp(term: str, negated_terms: set[str]) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _validate_ccam(dossier: DossierMedical) -> None:
|
||||
"""Valide les codes CCAM suggérés contre le dictionnaire officiel."""
|
||||
for acte in dossier.actes_ccam:
|
||||
if not acte.code_ccam_suggestion:
|
||||
acte.validite = "non_verifie"
|
||||
continue
|
||||
is_valid, desc = ccam_validate(acte.code_ccam_suggestion)
|
||||
if is_valid:
|
||||
acte.validite = "valide"
|
||||
else:
|
||||
acte.validite = "non_verifie"
|
||||
dossier.alertes_codage.append(
|
||||
f"CCAM {acte.code_ccam_suggestion} ({acte.texte}) : code absent du dictionnaire CCAM V81"
|
||||
)
|
||||
|
||||
|
||||
def _find_act_date(text: str, act_pattern: str) -> str | None:
|
||||
"""Trouve la date associée à un acte."""
|
||||
# Chercher "acte le DD/MM" ou "acte le DD/MM/YYYY"
|
||||
|
||||
@@ -11,7 +11,7 @@ from typing import Optional
|
||||
|
||||
import pdfplumber
|
||||
|
||||
from ..config import RAG_INDEX_DIR, CIM10_PDF, GUIDE_METHODO_PDF, CCAM_PDF
|
||||
from ..config import RAG_INDEX_DIR, CIM10_PDF, GUIDE_METHODO_PDF, CCAM_PDF, CCAM_DICT_PATH
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -33,18 +33,46 @@ class Chunk:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _chunk_cim10(pdf_path: Path) -> list[Chunk]:
|
||||
"""Découpe le PDF CIM-10 en chunks par code 3 caractères (ex: K80, K85)."""
|
||||
"""Découpe le PDF CIM-10 en double chunking : sous-codes individuels + parents 3-char."""
|
||||
chunks: list[Chunk] = []
|
||||
current_code: str | None = None
|
||||
current_text: list[str] = []
|
||||
current_page: int | None = None
|
||||
current_code3: str | None = None
|
||||
current_code3_text: list[str] = []
|
||||
current_code3_page: int | None = None
|
||||
|
||||
# Sous-codes en cours d'accumulation
|
||||
current_subcode: str | None = None
|
||||
current_subcode_text: list[str] = []
|
||||
current_subcode_page: int | None = None
|
||||
|
||||
# Pattern pour détecter un code CIM-10 à 3 caractères en début de ligne
|
||||
code3_pattern = re.compile(r"^([A-Z]\d{2})\s+(.+)")
|
||||
# Pattern pour les sous-codes (ex: K80.0, K80.1)
|
||||
subcode_pattern = re.compile(r"^([A-Z]\d{2}\.\d+)\s+(.+)")
|
||||
|
||||
logger.info("Extraction des chunks CIM-10 depuis %s", pdf_path.name)
|
||||
logger.info("Extraction des chunks CIM-10 (double chunking) depuis %s", pdf_path.name)
|
||||
|
||||
def _flush_subcode():
|
||||
"""Sauvegarde le chunk sous-code en cours."""
|
||||
if current_subcode and current_subcode_text:
|
||||
chunk_text = "\n".join(current_subcode_text)
|
||||
if len(chunk_text.split()) >= 3:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_subcode_page,
|
||||
code=current_subcode,
|
||||
))
|
||||
|
||||
def _flush_code3():
|
||||
"""Sauvegarde le chunk parent 3-char en cours."""
|
||||
_flush_subcode()
|
||||
if current_code3 and current_code3_text:
|
||||
chunk_text = "\n".join(current_code3_text)
|
||||
if len(chunk_text.split()) >= 5:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_code3_page,
|
||||
code=current_code3,
|
||||
))
|
||||
|
||||
with pdfplumber.open(pdf_path) as pdf:
|
||||
for page_num, page in enumerate(pdf.pages, start=1):
|
||||
@@ -57,37 +85,38 @@ def _chunk_cim10(pdf_path: Path) -> list[Chunk]:
|
||||
if not line:
|
||||
continue
|
||||
|
||||
m = code3_pattern.match(line)
|
||||
if m and not subcode_pattern.match(line):
|
||||
# Nouveau code 3-char → sauvegarder le chunk précédent
|
||||
if current_code and current_text:
|
||||
chunk_text = "\n".join(current_text)
|
||||
if len(chunk_text.split()) >= 5:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_page,
|
||||
code=current_code,
|
||||
))
|
||||
current_code = m.group(1)
|
||||
current_text = [line]
|
||||
current_page = page_num
|
||||
m_sub = subcode_pattern.match(line)
|
||||
m3 = code3_pattern.match(line)
|
||||
|
||||
if m_sub:
|
||||
# Nouveau sous-code → flush le sous-code précédent
|
||||
_flush_subcode()
|
||||
current_subcode = m_sub.group(1)
|
||||
current_subcode_text = [line]
|
||||
current_subcode_page = page_num
|
||||
# Ajouter aussi au chunk parent
|
||||
if current_code3:
|
||||
current_code3_text.append(line)
|
||||
elif m3 and not m_sub:
|
||||
# Nouveau code 3-char → flush tout le bloc précédent
|
||||
_flush_code3()
|
||||
current_code3 = m3.group(1)
|
||||
current_code3_text = [line]
|
||||
current_code3_page = page_num
|
||||
current_subcode = None
|
||||
current_subcode_text = []
|
||||
current_subcode_page = None
|
||||
else:
|
||||
if current_code:
|
||||
current_text.append(line)
|
||||
# Ligne de continuation
|
||||
if current_subcode:
|
||||
current_subcode_text.append(line)
|
||||
if current_code3:
|
||||
current_code3_text.append(line)
|
||||
|
||||
# Dernier chunk
|
||||
if current_code and current_text:
|
||||
chunk_text = "\n".join(current_text)
|
||||
if len(chunk_text.split()) >= 5:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_page,
|
||||
code=current_code,
|
||||
))
|
||||
# Flush final
|
||||
_flush_code3()
|
||||
|
||||
logger.info("CIM-10 : %d chunks extraits", len(chunks))
|
||||
logger.info("CIM-10 : %d chunks extraits (double chunking sous-codes + parents)", len(chunks))
|
||||
return chunks
|
||||
|
||||
|
||||
@@ -253,6 +282,95 @@ def _chunk_ccam(pdf_path: Path) -> list[Chunk]:
|
||||
return chunks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Chunking CCAM depuis le dictionnaire JSON
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _chunk_ccam_from_dict() -> list[Chunk]:
|
||||
"""Génère des chunks CCAM depuis ccam_dict.json (un chunk par code+description).
|
||||
|
||||
Prioritaire sur les chunks PDF si le dictionnaire existe.
|
||||
"""
|
||||
if not CCAM_DICT_PATH.exists():
|
||||
return []
|
||||
|
||||
import json as _json
|
||||
with open(CCAM_DICT_PATH, encoding="utf-8") as f:
|
||||
ccam_dict = _json.load(f)
|
||||
|
||||
chunks: list[Chunk] = []
|
||||
for code, info in ccam_dict.items():
|
||||
desc = info.get("description", "") if isinstance(info, dict) else str(info)
|
||||
if not desc:
|
||||
continue
|
||||
regroupement = info.get("regroupement", "") if isinstance(info, dict) else ""
|
||||
tarif = info.get("tarif_s1") if isinstance(info, dict) else None
|
||||
text_parts = [f"{code} {desc}"]
|
||||
if regroupement:
|
||||
text_parts.append(f"Regroupement: {regroupement}")
|
||||
if tarif is not None:
|
||||
text_parts.append(f"Tarif S1: {tarif}€")
|
||||
chunks.append(Chunk(
|
||||
text="\n".join(text_parts),
|
||||
document="ccam",
|
||||
code=code,
|
||||
))
|
||||
|
||||
logger.info("CCAM dict : %d chunks générés depuis %s", len(chunks), CCAM_DICT_PATH)
|
||||
return chunks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Chunking CIM-10 Index Alphabétique
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _chunk_cim10_alpha(pdf_path: Path) -> list[Chunk]:
|
||||
"""Parse la section INDEX ALPHABÉTIQUE du PDF CIM-10.
|
||||
|
||||
Détecte les entrées de type "terme → code" et génère des chunks
|
||||
avec document="cim10_alpha".
|
||||
"""
|
||||
chunks: list[Chunk] = []
|
||||
# Pattern : ligne avec un terme suivi d'un code CIM-10 en fin de ligne
|
||||
entry_pattern = re.compile(r"^(.+?)\s+([A-Z]\d{2}(?:\.\d+)?)\s*$")
|
||||
|
||||
logger.info("Extraction de l'index alphabétique CIM-10 depuis %s", pdf_path.name)
|
||||
|
||||
in_alpha_section = False
|
||||
with pdfplumber.open(pdf_path) as pdf:
|
||||
for page_num, page in enumerate(pdf.pages, start=1):
|
||||
text = page.extract_text()
|
||||
if not text:
|
||||
continue
|
||||
|
||||
# Détecter le début de la section index alphabétique
|
||||
text_upper = text.upper()
|
||||
if "INDEX ALPHAB" in text_upper:
|
||||
in_alpha_section = True
|
||||
# Certaines pages avant l'index : ne pas parser
|
||||
if not in_alpha_section:
|
||||
continue
|
||||
|
||||
for line in text.split("\n"):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
m = entry_pattern.match(line)
|
||||
if m:
|
||||
terme = m.group(1).strip()
|
||||
code = m.group(2)
|
||||
if len(terme) >= 3:
|
||||
chunks.append(Chunk(
|
||||
text=f"{terme} → {code}",
|
||||
document="cim10_alpha",
|
||||
page=page_num,
|
||||
code=code,
|
||||
))
|
||||
|
||||
logger.info("CIM-10 index alphabétique : %d entrées extraites", len(chunks))
|
||||
return chunks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Construction de l'index FAISS
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -280,13 +398,25 @@ def build_index(force: bool = False) -> None:
|
||||
for pdf_path, chunk_fn in [
|
||||
(CIM10_PDF, _chunk_cim10),
|
||||
(GUIDE_METHODO_PDF, _chunk_guide_methodo),
|
||||
(CCAM_PDF, _chunk_ccam),
|
||||
]:
|
||||
if pdf_path.exists():
|
||||
all_chunks.extend(chunk_fn(pdf_path))
|
||||
else:
|
||||
logger.warning("PDF non trouvé : %s", pdf_path)
|
||||
|
||||
# CCAM : priorité au dictionnaire JSON sur le PDF
|
||||
ccam_dict_chunks = _chunk_ccam_from_dict()
|
||||
if ccam_dict_chunks:
|
||||
all_chunks.extend(ccam_dict_chunks)
|
||||
elif CCAM_PDF.exists():
|
||||
all_chunks.extend(_chunk_ccam(CCAM_PDF))
|
||||
else:
|
||||
logger.warning("Ni dictionnaire CCAM ni PDF CCAM trouvé")
|
||||
|
||||
# CIM-10 index alphabétique (source additionnelle)
|
||||
if CIM10_PDF.exists():
|
||||
all_chunks.extend(_chunk_cim10_alpha(CIM10_PDF))
|
||||
|
||||
if not all_chunks:
|
||||
logger.error("Aucun chunk extrait — vérifiez les chemins des PDFs")
|
||||
return
|
||||
@@ -316,9 +446,9 @@ def build_index(force: bool = False) -> None:
|
||||
|
||||
metadata = [asdict(c) for c in all_chunks]
|
||||
# Ne pas sauvegarder le texte complet dans metadata (trop lourd),
|
||||
# garder un extrait de 500 chars
|
||||
# garder un extrait de 800 chars (les sous-codes sont courts, besoin du contexte)
|
||||
for m in metadata:
|
||||
m["extrait"] = m.pop("text")[:500]
|
||||
m["extrait"] = m.pop("text")[:800]
|
||||
|
||||
meta_path.write_text(json.dumps(metadata, ensure_ascii=False, indent=2), encoding="utf-8")
|
||||
|
||||
|
||||
@@ -74,8 +74,8 @@ def search_similar(query: str, top_k: int = 10) -> list[dict]:
|
||||
raw_results.append(meta)
|
||||
|
||||
# Prioriser les sources CIM-10 (au moins 6 sur top_k)
|
||||
cim10_results = [r for r in raw_results if r["document"] == "cim10"]
|
||||
other_results = [r for r in raw_results if r["document"] != "cim10"]
|
||||
cim10_results = [r for r in raw_results if r["document"] in ("cim10", "cim10_alpha")]
|
||||
other_results = [r for r in raw_results if r["document"] not in ("cim10", "cim10_alpha")]
|
||||
|
||||
min_cim10 = min(6, len(cim10_results))
|
||||
final = cim10_results[:min_cim10]
|
||||
@@ -150,6 +150,7 @@ def _build_prompt(texte: str, sources: list[dict], contexte: dict, est_dp: bool
|
||||
for i, src in enumerate(sources, 1):
|
||||
doc_name = {
|
||||
"cim10": "CIM-10 FR 2026",
|
||||
"cim10_alpha": "CIM-10 Index Alphabétique 2026",
|
||||
"guide_methodo": "Guide Méthodologique MCO 2026",
|
||||
"ccam": "CCAM PMSI V4 2025",
|
||||
}.get(src["document"], src["document"])
|
||||
|
||||
@@ -147,4 +147,37 @@ def create_app() -> Flask:
|
||||
logger.info("Modèle Ollama changé : %s", new_model)
|
||||
return jsonify({"ok": True, "model": cfg.OLLAMA_MODEL})
|
||||
|
||||
@app.route("/reprocess/<path:filepath>", methods=["POST"])
|
||||
def reprocess(filepath: str):
|
||||
"""Relance le traitement d'un dossier."""
|
||||
from ..main import process_pdf, write_outputs
|
||||
|
||||
dossier = load_dossier(filepath)
|
||||
source_file = dossier.source_file
|
||||
if not source_file:
|
||||
return jsonify({"error": "Fichier source introuvable"}), 400
|
||||
|
||||
# Chercher le PDF source dans input/
|
||||
input_dir = Path(__file__).parent.parent.parent / "input"
|
||||
pdf_path = None
|
||||
for p in input_dir.rglob(source_file):
|
||||
if p.is_file():
|
||||
pdf_path = p
|
||||
break
|
||||
|
||||
if not pdf_path:
|
||||
return jsonify({"error": f"PDF source '{source_file}' introuvable"}), 404
|
||||
|
||||
try:
|
||||
anonymized_text, new_dossier, report = process_pdf(pdf_path)
|
||||
stem = pdf_path.stem.replace(" ", "_")
|
||||
subdir = None
|
||||
if pdf_path.parent != input_dir:
|
||||
subdir = pdf_path.parent.name
|
||||
write_outputs(stem, anonymized_text, new_dossier, report, subdir=subdir)
|
||||
return jsonify({"ok": True, "message": "Traitement terminé"})
|
||||
except Exception as e:
|
||||
logger.exception("Erreur lors du retraitement")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
return app
|
||||
|
||||
@@ -253,6 +253,7 @@
|
||||
|
||||
loadModels();
|
||||
})();
|
||||
{% block scripts %}{% endblock %}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
Reference in New Issue
Block a user