feat: dictionnaire CCAM complet (8 257 codes) + index FAISS enrichi + validation actes
Phase 2 (CCAM) : - Nouveau src/medical/ccam_dict.py : build depuis CCAM_V81.xls via xlrd, lookup 3 niveaux, validation codes - Intégration dans l'extracteur : fallback ccam_lookup + _validate_ccam() avec alertes - CLI : --build-ccam-dict, --rebuild-index Phase 3 (FAISS) : - Chunks CCAM depuis le dictionnaire JSON (priorité sur le PDF) - Chunks CIM-10 index alphabétique (terme → code) - Priorisation cim10_alpha dans la recherche RAG Viewer : endpoint reprocess + bloc scripts Tests : 8 tests CCAM + tests raisonnement RAG (161 passed) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -11,7 +11,7 @@ from typing import Optional
|
||||
|
||||
import pdfplumber
|
||||
|
||||
from ..config import RAG_INDEX_DIR, CIM10_PDF, GUIDE_METHODO_PDF, CCAM_PDF
|
||||
from ..config import RAG_INDEX_DIR, CIM10_PDF, GUIDE_METHODO_PDF, CCAM_PDF, CCAM_DICT_PATH
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -33,18 +33,46 @@ class Chunk:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _chunk_cim10(pdf_path: Path) -> list[Chunk]:
|
||||
"""Découpe le PDF CIM-10 en chunks par code 3 caractères (ex: K80, K85)."""
|
||||
"""Découpe le PDF CIM-10 en double chunking : sous-codes individuels + parents 3-char."""
|
||||
chunks: list[Chunk] = []
|
||||
current_code: str | None = None
|
||||
current_text: list[str] = []
|
||||
current_page: int | None = None
|
||||
current_code3: str | None = None
|
||||
current_code3_text: list[str] = []
|
||||
current_code3_page: int | None = None
|
||||
|
||||
# Sous-codes en cours d'accumulation
|
||||
current_subcode: str | None = None
|
||||
current_subcode_text: list[str] = []
|
||||
current_subcode_page: int | None = None
|
||||
|
||||
# Pattern pour détecter un code CIM-10 à 3 caractères en début de ligne
|
||||
code3_pattern = re.compile(r"^([A-Z]\d{2})\s+(.+)")
|
||||
# Pattern pour les sous-codes (ex: K80.0, K80.1)
|
||||
subcode_pattern = re.compile(r"^([A-Z]\d{2}\.\d+)\s+(.+)")
|
||||
|
||||
logger.info("Extraction des chunks CIM-10 depuis %s", pdf_path.name)
|
||||
logger.info("Extraction des chunks CIM-10 (double chunking) depuis %s", pdf_path.name)
|
||||
|
||||
def _flush_subcode():
|
||||
"""Sauvegarde le chunk sous-code en cours."""
|
||||
if current_subcode and current_subcode_text:
|
||||
chunk_text = "\n".join(current_subcode_text)
|
||||
if len(chunk_text.split()) >= 3:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_subcode_page,
|
||||
code=current_subcode,
|
||||
))
|
||||
|
||||
def _flush_code3():
|
||||
"""Sauvegarde le chunk parent 3-char en cours."""
|
||||
_flush_subcode()
|
||||
if current_code3 and current_code3_text:
|
||||
chunk_text = "\n".join(current_code3_text)
|
||||
if len(chunk_text.split()) >= 5:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_code3_page,
|
||||
code=current_code3,
|
||||
))
|
||||
|
||||
with pdfplumber.open(pdf_path) as pdf:
|
||||
for page_num, page in enumerate(pdf.pages, start=1):
|
||||
@@ -57,37 +85,38 @@ def _chunk_cim10(pdf_path: Path) -> list[Chunk]:
|
||||
if not line:
|
||||
continue
|
||||
|
||||
m = code3_pattern.match(line)
|
||||
if m and not subcode_pattern.match(line):
|
||||
# Nouveau code 3-char → sauvegarder le chunk précédent
|
||||
if current_code and current_text:
|
||||
chunk_text = "\n".join(current_text)
|
||||
if len(chunk_text.split()) >= 5:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_page,
|
||||
code=current_code,
|
||||
))
|
||||
current_code = m.group(1)
|
||||
current_text = [line]
|
||||
current_page = page_num
|
||||
m_sub = subcode_pattern.match(line)
|
||||
m3 = code3_pattern.match(line)
|
||||
|
||||
if m_sub:
|
||||
# Nouveau sous-code → flush le sous-code précédent
|
||||
_flush_subcode()
|
||||
current_subcode = m_sub.group(1)
|
||||
current_subcode_text = [line]
|
||||
current_subcode_page = page_num
|
||||
# Ajouter aussi au chunk parent
|
||||
if current_code3:
|
||||
current_code3_text.append(line)
|
||||
elif m3 and not m_sub:
|
||||
# Nouveau code 3-char → flush tout le bloc précédent
|
||||
_flush_code3()
|
||||
current_code3 = m3.group(1)
|
||||
current_code3_text = [line]
|
||||
current_code3_page = page_num
|
||||
current_subcode = None
|
||||
current_subcode_text = []
|
||||
current_subcode_page = None
|
||||
else:
|
||||
if current_code:
|
||||
current_text.append(line)
|
||||
# Ligne de continuation
|
||||
if current_subcode:
|
||||
current_subcode_text.append(line)
|
||||
if current_code3:
|
||||
current_code3_text.append(line)
|
||||
|
||||
# Dernier chunk
|
||||
if current_code and current_text:
|
||||
chunk_text = "\n".join(current_text)
|
||||
if len(chunk_text.split()) >= 5:
|
||||
chunks.append(Chunk(
|
||||
text=chunk_text,
|
||||
document="cim10",
|
||||
page=current_page,
|
||||
code=current_code,
|
||||
))
|
||||
# Flush final
|
||||
_flush_code3()
|
||||
|
||||
logger.info("CIM-10 : %d chunks extraits", len(chunks))
|
||||
logger.info("CIM-10 : %d chunks extraits (double chunking sous-codes + parents)", len(chunks))
|
||||
return chunks
|
||||
|
||||
|
||||
@@ -253,6 +282,95 @@ def _chunk_ccam(pdf_path: Path) -> list[Chunk]:
|
||||
return chunks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Chunking CCAM depuis le dictionnaire JSON
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _chunk_ccam_from_dict() -> list[Chunk]:
|
||||
"""Génère des chunks CCAM depuis ccam_dict.json (un chunk par code+description).
|
||||
|
||||
Prioritaire sur les chunks PDF si le dictionnaire existe.
|
||||
"""
|
||||
if not CCAM_DICT_PATH.exists():
|
||||
return []
|
||||
|
||||
import json as _json
|
||||
with open(CCAM_DICT_PATH, encoding="utf-8") as f:
|
||||
ccam_dict = _json.load(f)
|
||||
|
||||
chunks: list[Chunk] = []
|
||||
for code, info in ccam_dict.items():
|
||||
desc = info.get("description", "") if isinstance(info, dict) else str(info)
|
||||
if not desc:
|
||||
continue
|
||||
regroupement = info.get("regroupement", "") if isinstance(info, dict) else ""
|
||||
tarif = info.get("tarif_s1") if isinstance(info, dict) else None
|
||||
text_parts = [f"{code} {desc}"]
|
||||
if regroupement:
|
||||
text_parts.append(f"Regroupement: {regroupement}")
|
||||
if tarif is not None:
|
||||
text_parts.append(f"Tarif S1: {tarif}€")
|
||||
chunks.append(Chunk(
|
||||
text="\n".join(text_parts),
|
||||
document="ccam",
|
||||
code=code,
|
||||
))
|
||||
|
||||
logger.info("CCAM dict : %d chunks générés depuis %s", len(chunks), CCAM_DICT_PATH)
|
||||
return chunks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Chunking CIM-10 Index Alphabétique
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _chunk_cim10_alpha(pdf_path: Path) -> list[Chunk]:
|
||||
"""Parse la section INDEX ALPHABÉTIQUE du PDF CIM-10.
|
||||
|
||||
Détecte les entrées de type "terme → code" et génère des chunks
|
||||
avec document="cim10_alpha".
|
||||
"""
|
||||
chunks: list[Chunk] = []
|
||||
# Pattern : ligne avec un terme suivi d'un code CIM-10 en fin de ligne
|
||||
entry_pattern = re.compile(r"^(.+?)\s+([A-Z]\d{2}(?:\.\d+)?)\s*$")
|
||||
|
||||
logger.info("Extraction de l'index alphabétique CIM-10 depuis %s", pdf_path.name)
|
||||
|
||||
in_alpha_section = False
|
||||
with pdfplumber.open(pdf_path) as pdf:
|
||||
for page_num, page in enumerate(pdf.pages, start=1):
|
||||
text = page.extract_text()
|
||||
if not text:
|
||||
continue
|
||||
|
||||
# Détecter le début de la section index alphabétique
|
||||
text_upper = text.upper()
|
||||
if "INDEX ALPHAB" in text_upper:
|
||||
in_alpha_section = True
|
||||
# Certaines pages avant l'index : ne pas parser
|
||||
if not in_alpha_section:
|
||||
continue
|
||||
|
||||
for line in text.split("\n"):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
m = entry_pattern.match(line)
|
||||
if m:
|
||||
terme = m.group(1).strip()
|
||||
code = m.group(2)
|
||||
if len(terme) >= 3:
|
||||
chunks.append(Chunk(
|
||||
text=f"{terme} → {code}",
|
||||
document="cim10_alpha",
|
||||
page=page_num,
|
||||
code=code,
|
||||
))
|
||||
|
||||
logger.info("CIM-10 index alphabétique : %d entrées extraites", len(chunks))
|
||||
return chunks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Construction de l'index FAISS
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -280,13 +398,25 @@ def build_index(force: bool = False) -> None:
|
||||
for pdf_path, chunk_fn in [
|
||||
(CIM10_PDF, _chunk_cim10),
|
||||
(GUIDE_METHODO_PDF, _chunk_guide_methodo),
|
||||
(CCAM_PDF, _chunk_ccam),
|
||||
]:
|
||||
if pdf_path.exists():
|
||||
all_chunks.extend(chunk_fn(pdf_path))
|
||||
else:
|
||||
logger.warning("PDF non trouvé : %s", pdf_path)
|
||||
|
||||
# CCAM : priorité au dictionnaire JSON sur le PDF
|
||||
ccam_dict_chunks = _chunk_ccam_from_dict()
|
||||
if ccam_dict_chunks:
|
||||
all_chunks.extend(ccam_dict_chunks)
|
||||
elif CCAM_PDF.exists():
|
||||
all_chunks.extend(_chunk_ccam(CCAM_PDF))
|
||||
else:
|
||||
logger.warning("Ni dictionnaire CCAM ni PDF CCAM trouvé")
|
||||
|
||||
# CIM-10 index alphabétique (source additionnelle)
|
||||
if CIM10_PDF.exists():
|
||||
all_chunks.extend(_chunk_cim10_alpha(CIM10_PDF))
|
||||
|
||||
if not all_chunks:
|
||||
logger.error("Aucun chunk extrait — vérifiez les chemins des PDFs")
|
||||
return
|
||||
@@ -316,9 +446,9 @@ def build_index(force: bool = False) -> None:
|
||||
|
||||
metadata = [asdict(c) for c in all_chunks]
|
||||
# Ne pas sauvegarder le texte complet dans metadata (trop lourd),
|
||||
# garder un extrait de 500 chars
|
||||
# garder un extrait de 800 chars (les sous-codes sont courts, besoin du contexte)
|
||||
for m in metadata:
|
||||
m["extrait"] = m.pop("text")[:500]
|
||||
m["extrait"] = m.pop("text")[:800]
|
||||
|
||||
meta_path.write_text(json.dumps(metadata, ensure_ascii=False, indent=2), encoding="utf-8")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user