feat: méthode TIM experte CPAM + moteur de règles étendu

CPAM — Méthode TIM (mémoire en défense) :
- Réécriture CPAM_ARGUMENTATION avec raisonnement 5 passes TIM
  (contexte admin → motif réel → confrontation bio → hiérarchie → validation défensive)
- _BIO_THRESHOLDS (19 entrées) + _build_bio_confrontation() pour
  confrontation biologie/diagnostic avec seuils chiffrés et verdicts
- _format_response() dual format : nouveau TIM (moyens numérotés, tableau
  bio, codes non défendables, conclusion dispositive) + rétrocompat legacy
- CPAM_ADVERSARIAL mis à jour pour vérifier honnêteté intellectuelle
- Tests adaptés + 12 nouveaux tests (bio confrontation, format TIM)

Moteur de règles :
- Nouvelles règles YAML : demographic, diagnostic_conflicts,
  procedure_diagnosis, temporal, parcours
- Bio extraction FAISS (synonymes vectoriels)
- Veto engine enrichi (citations, Trackare skip, règles démographiques)
- Decision engine : _apply_bio_rules_gen() + matchers analytiques

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
dom
2026-03-04 11:57:07 +01:00
parent 795110d2e6
commit ce7a9650af
19 changed files with 1681 additions and 418 deletions

View File

@@ -38,8 +38,10 @@ from .cpam_context import ( # noqa: F401
_get_code_label,
_get_cim10_definitions,
_BIO_INTERPRETATION,
_BIO_THRESHOLDS,
_assess_dossier_strength,
_build_bio_summary,
_build_bio_confrontation,
_check_das_bio_coherence,
)
from .cpam_validation import _CIM10_CODE_RE, _validate_adversarial as _validate_adversarial, _assess_quality_tier as _assess_quality_tier, _fuzzy_match_ref as _fuzzy_match_ref, _sanitize_unauthorized_codes as _sanitize_unauthorized_codes # noqa: F401
@@ -120,6 +122,11 @@ def generate_cpam_response(
# 1. Passe 1 — Extraction structurée (compréhension avant argumentation)
extraction = _extraction_pass(dossier, controle)
degraded_pass1 = extraction is None
if degraded_pass1:
dossier.alertes_codage.append(
"CPAM: passe 1 (extraction structurée) échouée → mode dégradé"
)
# 2. Recherche RAG ciblée
sources = _search_rag_for_control(controle, dossier)
@@ -153,6 +160,12 @@ def generate_cpam_response(
logger.warning(" LLM non disponible — contre-argumentation non générée")
return "", None, rag_sources
# 5b. LOGIC-2 — Marquer le mode dégradé dans le résultat
if degraded_pass1:
result.setdefault("quality_flags", {})
result["quality_flags"]["cpam_pass1_failed"] = True
result["quality_flags"]["degraded_mode"] = True
# 6. Sanitisation déterministe — supprime les codes CIM-10 hors périmètre
sanitized = _sanitize_unauthorized_codes(result, dossier, controle)
if sanitized:
@@ -175,6 +188,16 @@ def generate_cpam_response(
logger.warning(" CPAM : %d code(s) hors périmètre", len(code_warnings))
# 8. Validation adversariale (cohérence factuelle)
# LOGIC-3 : détecter si modèles identiques AVANT l'appel
from ..config import check_adversarial_model_config
same_model, model_msg = check_adversarial_model_config()
if same_model:
result.setdefault("quality_flags", {})
result["quality_flags"]["adversarial_disabled_same_model"] = True
dossier.alertes_codage.append(
"Validation adversariale désactivée (modèles identiques)"
)
adversarial_warnings: list[str] = []
validation = _validate_adversarial(result, tag_map, controle)
if validation and not validation.get("coherent", True):
@@ -186,48 +209,51 @@ def generate_cpam_response(
if adversarial_warnings:
adversarial_warnings.append(f"Score de confiance : {score}/10")
# 8b. Boucle de correction (max 1 retry)
if (validation
and not validation.get("coherent", True)
and validation.get("score_confiance", 10) <= 5
and rule_enabled("RULE-CPAM-CORRECTION-LOOP")):
# 8b. Boucle de correction (max 2 retries)
max_corrections = 2
for attempt in range(max_corrections):
if not (validation
and not validation.get("coherent", True)
and validation.get("score_confiance", 10) <= 5
and rule_enabled("RULE-CPAM-CORRECTION-LOOP")):
break
erreurs_v = validation.get("erreurs", [])
logger.warning(" Score adversarial %s/10 — correction en cours (%d erreur(s))",
validation.get("score_confiance"), len(erreurs_v))
logger.warning(" Score adversarial %s/10 — correction %d/%d (%d erreur(s))",
validation.get("score_confiance"), attempt + 1, max_corrections, len(erreurs_v))
correction_prompt = _build_correction_prompt(prompt, result, validation)
corrected = call_ollama(correction_prompt, temperature=0.0, max_tokens=16000, role="cpam")
if corrected is None:
corrected = call_anthropic(correction_prompt, temperature=0.0, max_tokens=16000)
if corrected:
# Re-valider la correction
validation2 = _validate_adversarial(corrected, tag_map, controle)
score2 = validation2.get("score_confiance", 0) if validation2 else 0
score1 = validation.get("score_confiance", 0)
if not corrected:
break
if score2 > score1:
logger.info(" Correction acceptée (score %s%s)", score1, score2)
result = corrected
validation = validation2
# Sanitiser + recalculer les warnings
_sanitize_unauthorized_codes(result, dossier, controle)
ref_warnings = _validate_references(result, sources)
grounding_warnings = _validate_grounding(result, tag_map)
code_warnings = _validate_codes_in_response(result, dossier, controle)
adversarial_warnings = []
if validation and not validation.get("coherent", True):
for e in validation.get("erreurs", []):
if isinstance(e, str) and e.strip():
adversarial_warnings.append(f"Incohérence détectée : {e}")
if adversarial_warnings:
adversarial_warnings.append(
f"Score de confiance : {validation.get('score_confiance', '?')}/10"
)
else:
logger.warning(" Correction rejetée (score %s%s) — conserve l'original",
score1, score2)
validation2 = _validate_adversarial(corrected, tag_map, controle)
score2 = validation2.get("score_confiance", 0) if validation2 else 0
score1 = validation.get("score_confiance", 0)
if score2 > score1:
logger.info(" Correction %d acceptée (score %s%s)", attempt + 1, score1, score2)
result = corrected
validation = validation2
_sanitize_unauthorized_codes(result, dossier, controle)
ref_warnings = _validate_references(result, sources)
grounding_warnings = _validate_grounding(result, tag_map)
code_warnings = _validate_codes_in_response(result, dossier, controle)
adversarial_warnings = []
if validation and not validation.get("coherent", True):
for e in validation.get("erreurs", []):
if isinstance(e, str) and e.strip():
adversarial_warnings.append(f"Incohérence détectée : {e}")
if adversarial_warnings:
adversarial_warnings.append(
f"Score de confiance : {validation.get('score_confiance', '?')}/10"
)
else:
logger.warning(" Correction %d rejetée (score %s%s)", attempt + 1, score1, score2)
break
all_warnings = ref_warnings + grounding_warnings + code_warnings + adversarial_warnings