feat: méthode TIM experte CPAM + moteur de règles étendu
CPAM — Méthode TIM (mémoire en défense) : - Réécriture CPAM_ARGUMENTATION avec raisonnement 5 passes TIM (contexte admin → motif réel → confrontation bio → hiérarchie → validation défensive) - _BIO_THRESHOLDS (19 entrées) + _build_bio_confrontation() pour confrontation biologie/diagnostic avec seuils chiffrés et verdicts - _format_response() dual format : nouveau TIM (moyens numérotés, tableau bio, codes non défendables, conclusion dispositive) + rétrocompat legacy - CPAM_ADVERSARIAL mis à jour pour vérifier honnêteté intellectuelle - Tests adaptés + 12 nouveaux tests (bio confrontation, format TIM) Moteur de règles : - Nouvelles règles YAML : demographic, diagnostic_conflicts, procedure_diagnosis, temporal, parcours - Bio extraction FAISS (synonymes vectoriels) - Veto engine enrichi (citations, Trackare skip, règles démographiques) - Decision engine : _apply_bio_rules_gen() + matchers analytiques Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -195,6 +195,118 @@ def _build_tagged_context(dossier: DossierMedical) -> tuple[str, dict[str, str]]
|
||||
return text, tag_map
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Seuils biologiques par code CIM-10 (Table 8 du référentiel TIM)
|
||||
# Utilisé pour la confrontation biologie/diagnostic dans le mémoire de défense.
|
||||
# ---------------------------------------------------------------------------
|
||||
_BIO_THRESHOLDS: dict[str, dict] = {
|
||||
"D50": {"test": "Hémoglobine", "condition": "< 13 (H) / < 12 (F)", "also": ["Ferritine < 30"]},
|
||||
"D62": {"test": "Hémoglobine", "condition": "< 13 (H) / < 12 (F)"},
|
||||
"D64": {"test": "Hémoglobine", "condition": "< 13 (H) / < 12 (F)"},
|
||||
"D69.6": {"test": "Plaquettes", "condition": "< 150 G/L"},
|
||||
"E03": {"test": "TSH", "condition": "> 4 mUI/L"},
|
||||
"E05": {"test": "TSH", "condition": "< 0.4 mUI/L"},
|
||||
"E10": {"test": "Glycémie", "condition": "> 7 mmol/L ou HbA1c > 6.5%"},
|
||||
"E11": {"test": "Glycémie", "condition": "> 7 mmol/L ou HbA1c > 6.5%"},
|
||||
"E87.1": {"test": "Sodium", "condition": "< 135 mmol/L"},
|
||||
"E87.0": {"test": "Sodium", "condition": "> 145 mmol/L"},
|
||||
"E87.5": {"test": "Potassium", "condition": "> 5.0 mmol/L"},
|
||||
"E87.6": {"test": "Potassium", "condition": "< 3.5 mmol/L"},
|
||||
"K72": {"test": "ASAT", "condition": "> 120 UI/L (3x norme)", "also": ["ALAT > 120"]},
|
||||
"K85": {"test": "Lipasémie", "condition": "> 180 UI/L (3x norme)"},
|
||||
"N17": {"test": "Créatinine", "condition": "> 130 (H) / > 110 (F)", "note": "avec élévation aiguë"},
|
||||
"N18": {"test": "Créatinine", "condition": "DFG calculé"},
|
||||
"I50": {"test": "BNP", "condition": "> 100 pg/mL ou NT-proBNP > 300"},
|
||||
"I21": {"test": "Troponine", "condition": "> 0.04 ng/mL"},
|
||||
"I26": {"test": "D-dimères", "condition": "> 500 ng/mL"},
|
||||
}
|
||||
|
||||
|
||||
def _build_bio_confrontation(dossier: DossierMedical, controle: ControleCPAM) -> str:
|
||||
"""Construit le tableau de confrontation biologie/diagnostic pour les codes contestés.
|
||||
|
||||
Utilise _BIO_THRESHOLDS (Table 8 TIM) et les valeurs de dossier.biologie_cle
|
||||
pour produire un verdict par diagnostic : CONFIRMÉ / NON CONFIRMÉ / NON DISPONIBLE.
|
||||
"""
|
||||
# Collecter tous les codes en jeu (3 premiers chars pour match préfixe)
|
||||
codes_in_play: list[str] = []
|
||||
if dossier.diagnostic_principal and dossier.diagnostic_principal.cim10_suggestion:
|
||||
codes_in_play.append(normalize_code(dossier.diagnostic_principal.cim10_suggestion))
|
||||
for das in dossier.diagnostics_associes:
|
||||
if das.cim10_suggestion:
|
||||
codes_in_play.append(normalize_code(das.cim10_suggestion))
|
||||
for field in (controle.dp_ucr, controle.da_ucr, controle.dr_ucr):
|
||||
if not field:
|
||||
continue
|
||||
for raw in re.split(r"[,;\s]+", field.strip()):
|
||||
raw = raw.strip()
|
||||
if raw:
|
||||
codes_in_play.append(normalize_code(raw))
|
||||
|
||||
if not codes_in_play:
|
||||
return "(Aucun code en jeu pour la confrontation biologique)"
|
||||
|
||||
# Indexer les valeurs bio disponibles
|
||||
bio_values: dict[str, float] = {}
|
||||
for b in dossier.biologie_cle:
|
||||
if b.test and b.valeur:
|
||||
try:
|
||||
bio_values[b.test] = float(b.valeur.replace(",", ".").split()[0])
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
|
||||
lines: list[str] = []
|
||||
matched_any = False
|
||||
for code in codes_in_play:
|
||||
prefix = code[:3] if len(code) >= 3 else code
|
||||
threshold = _BIO_THRESHOLDS.get(prefix)
|
||||
if not threshold:
|
||||
# Essayer avec code complet (ex: E87.1)
|
||||
threshold = _BIO_THRESHOLDS.get(code[:5] if len(code) >= 5 else code)
|
||||
if not threshold:
|
||||
continue
|
||||
|
||||
matched_any = True
|
||||
test_name = threshold["test"]
|
||||
condition = threshold["condition"]
|
||||
is_valid, label = validate_code(code)
|
||||
label_str = f" ({label})" if is_valid and label else ""
|
||||
|
||||
if test_name in bio_values:
|
||||
val = bio_values[test_name]
|
||||
# Vérifier si la valeur est dans les normes
|
||||
if test_name in BIO_NORMALS:
|
||||
lo, hi = BIO_NORMALS[test_name]
|
||||
is_normal = lo <= val <= hi
|
||||
verdict = "NON CONFIRMÉ (valeur NORMALE)" if is_normal else "CONFIRMÉ"
|
||||
else:
|
||||
verdict = "À VÉRIFIER"
|
||||
lines.append(
|
||||
f" {code}{label_str} : {test_name} requis {condition} → "
|
||||
f"valeur dossier = {val} → {verdict}"
|
||||
)
|
||||
else:
|
||||
lines.append(
|
||||
f" {code}{label_str} : {test_name} requis {condition} → "
|
||||
f"NON DISPONIBLE dans le dossier"
|
||||
)
|
||||
|
||||
# Tests complémentaires (also)
|
||||
for also_test in threshold.get("also", []):
|
||||
parts = also_test.split()
|
||||
if len(parts) >= 1:
|
||||
also_name = parts[0]
|
||||
if also_name in bio_values:
|
||||
lines.append(f" + {also_test} → valeur dossier = {bio_values[also_name]}")
|
||||
else:
|
||||
lines.append(f" + {also_test} → NON DISPONIBLE")
|
||||
|
||||
if not matched_any:
|
||||
return "(Aucun seuil biologique applicable aux codes en jeu)"
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# Interprétations cliniques pour le résumé bio déterministe
|
||||
_BIO_INTERPRETATION: dict[str, dict[str, str]] = {
|
||||
# --- Hépatique / digestif ---
|
||||
@@ -648,6 +760,9 @@ def _build_cpam_prompt(
|
||||
"le manque de données au lieu d'inventer des preuves."
|
||||
)
|
||||
|
||||
# Confrontation biologie / diagnostic (méthode TIM)
|
||||
bio_confrontation = _build_bio_confrontation(dossier, controle)
|
||||
|
||||
# Résumé biologique déterministe (interprétations non modifiables par le LLM)
|
||||
bio_summary = _build_bio_summary(dossier)
|
||||
if bio_summary:
|
||||
@@ -736,5 +851,7 @@ def _build_cpam_prompt(
|
||||
codes_autorises_str=codes_autorises_str,
|
||||
sources_text=sources_text,
|
||||
extraction_str=extraction_str,
|
||||
bio_confrontation_str=bio_confrontation,
|
||||
numero_ogc=controle.numero_ogc,
|
||||
)
|
||||
return prompt, tag_map
|
||||
|
||||
@@ -38,8 +38,10 @@ from .cpam_context import ( # noqa: F401
|
||||
_get_code_label,
|
||||
_get_cim10_definitions,
|
||||
_BIO_INTERPRETATION,
|
||||
_BIO_THRESHOLDS,
|
||||
_assess_dossier_strength,
|
||||
_build_bio_summary,
|
||||
_build_bio_confrontation,
|
||||
_check_das_bio_coherence,
|
||||
)
|
||||
from .cpam_validation import _CIM10_CODE_RE, _validate_adversarial as _validate_adversarial, _assess_quality_tier as _assess_quality_tier, _fuzzy_match_ref as _fuzzy_match_ref, _sanitize_unauthorized_codes as _sanitize_unauthorized_codes # noqa: F401
|
||||
@@ -120,6 +122,11 @@ def generate_cpam_response(
|
||||
|
||||
# 1. Passe 1 — Extraction structurée (compréhension avant argumentation)
|
||||
extraction = _extraction_pass(dossier, controle)
|
||||
degraded_pass1 = extraction is None
|
||||
if degraded_pass1:
|
||||
dossier.alertes_codage.append(
|
||||
"CPAM: passe 1 (extraction structurée) échouée → mode dégradé"
|
||||
)
|
||||
|
||||
# 2. Recherche RAG ciblée
|
||||
sources = _search_rag_for_control(controle, dossier)
|
||||
@@ -153,6 +160,12 @@ def generate_cpam_response(
|
||||
logger.warning(" LLM non disponible — contre-argumentation non générée")
|
||||
return "", None, rag_sources
|
||||
|
||||
# 5b. LOGIC-2 — Marquer le mode dégradé dans le résultat
|
||||
if degraded_pass1:
|
||||
result.setdefault("quality_flags", {})
|
||||
result["quality_flags"]["cpam_pass1_failed"] = True
|
||||
result["quality_flags"]["degraded_mode"] = True
|
||||
|
||||
# 6. Sanitisation déterministe — supprime les codes CIM-10 hors périmètre
|
||||
sanitized = _sanitize_unauthorized_codes(result, dossier, controle)
|
||||
if sanitized:
|
||||
@@ -175,6 +188,16 @@ def generate_cpam_response(
|
||||
logger.warning(" CPAM : %d code(s) hors périmètre", len(code_warnings))
|
||||
|
||||
# 8. Validation adversariale (cohérence factuelle)
|
||||
# LOGIC-3 : détecter si modèles identiques AVANT l'appel
|
||||
from ..config import check_adversarial_model_config
|
||||
same_model, model_msg = check_adversarial_model_config()
|
||||
if same_model:
|
||||
result.setdefault("quality_flags", {})
|
||||
result["quality_flags"]["adversarial_disabled_same_model"] = True
|
||||
dossier.alertes_codage.append(
|
||||
"Validation adversariale désactivée (modèles identiques)"
|
||||
)
|
||||
|
||||
adversarial_warnings: list[str] = []
|
||||
validation = _validate_adversarial(result, tag_map, controle)
|
||||
if validation and not validation.get("coherent", True):
|
||||
@@ -186,48 +209,51 @@ def generate_cpam_response(
|
||||
if adversarial_warnings:
|
||||
adversarial_warnings.append(f"Score de confiance : {score}/10")
|
||||
|
||||
# 8b. Boucle de correction (max 1 retry)
|
||||
if (validation
|
||||
and not validation.get("coherent", True)
|
||||
and validation.get("score_confiance", 10) <= 5
|
||||
and rule_enabled("RULE-CPAM-CORRECTION-LOOP")):
|
||||
# 8b. Boucle de correction (max 2 retries)
|
||||
max_corrections = 2
|
||||
for attempt in range(max_corrections):
|
||||
if not (validation
|
||||
and not validation.get("coherent", True)
|
||||
and validation.get("score_confiance", 10) <= 5
|
||||
and rule_enabled("RULE-CPAM-CORRECTION-LOOP")):
|
||||
break
|
||||
|
||||
erreurs_v = validation.get("erreurs", [])
|
||||
logger.warning(" Score adversarial %s/10 — correction en cours (%d erreur(s))",
|
||||
validation.get("score_confiance"), len(erreurs_v))
|
||||
logger.warning(" Score adversarial %s/10 — correction %d/%d (%d erreur(s))",
|
||||
validation.get("score_confiance"), attempt + 1, max_corrections, len(erreurs_v))
|
||||
|
||||
correction_prompt = _build_correction_prompt(prompt, result, validation)
|
||||
corrected = call_ollama(correction_prompt, temperature=0.0, max_tokens=16000, role="cpam")
|
||||
if corrected is None:
|
||||
corrected = call_anthropic(correction_prompt, temperature=0.0, max_tokens=16000)
|
||||
|
||||
if corrected:
|
||||
# Re-valider la correction
|
||||
validation2 = _validate_adversarial(corrected, tag_map, controle)
|
||||
score2 = validation2.get("score_confiance", 0) if validation2 else 0
|
||||
score1 = validation.get("score_confiance", 0)
|
||||
if not corrected:
|
||||
break
|
||||
|
||||
if score2 > score1:
|
||||
logger.info(" Correction acceptée (score %s → %s)", score1, score2)
|
||||
result = corrected
|
||||
validation = validation2
|
||||
# Sanitiser + recalculer les warnings
|
||||
_sanitize_unauthorized_codes(result, dossier, controle)
|
||||
ref_warnings = _validate_references(result, sources)
|
||||
grounding_warnings = _validate_grounding(result, tag_map)
|
||||
code_warnings = _validate_codes_in_response(result, dossier, controle)
|
||||
adversarial_warnings = []
|
||||
if validation and not validation.get("coherent", True):
|
||||
for e in validation.get("erreurs", []):
|
||||
if isinstance(e, str) and e.strip():
|
||||
adversarial_warnings.append(f"Incohérence détectée : {e}")
|
||||
if adversarial_warnings:
|
||||
adversarial_warnings.append(
|
||||
f"Score de confiance : {validation.get('score_confiance', '?')}/10"
|
||||
)
|
||||
else:
|
||||
logger.warning(" Correction rejetée (score %s → %s) — conserve l'original",
|
||||
score1, score2)
|
||||
validation2 = _validate_adversarial(corrected, tag_map, controle)
|
||||
score2 = validation2.get("score_confiance", 0) if validation2 else 0
|
||||
score1 = validation.get("score_confiance", 0)
|
||||
|
||||
if score2 > score1:
|
||||
logger.info(" Correction %d acceptée (score %s → %s)", attempt + 1, score1, score2)
|
||||
result = corrected
|
||||
validation = validation2
|
||||
_sanitize_unauthorized_codes(result, dossier, controle)
|
||||
ref_warnings = _validate_references(result, sources)
|
||||
grounding_warnings = _validate_grounding(result, tag_map)
|
||||
code_warnings = _validate_codes_in_response(result, dossier, controle)
|
||||
adversarial_warnings = []
|
||||
if validation and not validation.get("coherent", True):
|
||||
for e in validation.get("erreurs", []):
|
||||
if isinstance(e, str) and e.strip():
|
||||
adversarial_warnings.append(f"Incohérence détectée : {e}")
|
||||
if adversarial_warnings:
|
||||
adversarial_warnings.append(
|
||||
f"Score de confiance : {validation.get('score_confiance', '?')}/10"
|
||||
)
|
||||
else:
|
||||
logger.warning(" Correction %d rejetée (score %s → %s)", attempt + 1, score1, score2)
|
||||
break
|
||||
|
||||
all_warnings = ref_warnings + grounding_warnings + code_warnings + adversarial_warnings
|
||||
|
||||
|
||||
@@ -33,7 +33,11 @@ def _fuzzy_match_ref(ref: str, tag_map: dict[str, str]) -> str | None:
|
||||
|
||||
|
||||
def _validate_grounding(response_data: dict, tag_map: dict[str, str]) -> list[str]:
|
||||
"""Vérifie que les références dans preuves_dossier correspondent à des tags existants.
|
||||
"""Vérifie que les références dans preuves correspondent à des tags existants.
|
||||
|
||||
Supporte les deux formats :
|
||||
- Ancien : response_data["preuves_dossier"][].ref
|
||||
- Nouveau TIM : response_data["moyens_defense"][].preuves[].ref
|
||||
|
||||
Applique un fuzzy matching par code CIM-10 avant de flaguer un warning.
|
||||
|
||||
@@ -44,24 +48,40 @@ def _validate_grounding(response_data: dict, tag_map: dict[str, str]) -> list[st
|
||||
return []
|
||||
|
||||
warnings: list[str] = []
|
||||
preuves = response_data.get("preuves_dossier")
|
||||
if not preuves or not isinstance(preuves, list):
|
||||
return warnings
|
||||
|
||||
for p in preuves:
|
||||
if not isinstance(p, dict):
|
||||
continue
|
||||
ref = p.get("ref", "")
|
||||
def _check_ref(ref: str, context: str) -> None:
|
||||
if not ref:
|
||||
continue
|
||||
if ref not in tag_map:
|
||||
matched_tag = _fuzzy_match_ref(ref, tag_map)
|
||||
if matched_tag:
|
||||
logger.info("Grounding : ref [%s] résolue vers [%s]", ref, matched_tag)
|
||||
continue # pas de warning
|
||||
valeur = p.get("valeur", "?")
|
||||
warnings.append(f"Preuve [{ref}] non traçable (« {valeur} »)")
|
||||
logger.warning("Grounding : preuve [%s] introuvable dans les tags du dossier", ref)
|
||||
return
|
||||
# Nettoyer les crochets si présents (nouveau format utilise "[BIO-1]")
|
||||
clean_ref = ref.strip("[]")
|
||||
if clean_ref in tag_map or ref in tag_map:
|
||||
return
|
||||
matched_tag = _fuzzy_match_ref(clean_ref, tag_map)
|
||||
if matched_tag:
|
||||
logger.info("Grounding : ref [%s] résolue vers [%s]", ref, matched_tag)
|
||||
return
|
||||
warnings.append(f"Preuve [{ref}] non traçable (« {context} »)")
|
||||
logger.warning("Grounding : preuve [%s] introuvable dans les tags du dossier", ref)
|
||||
|
||||
# Ancien format : preuves_dossier
|
||||
preuves = response_data.get("preuves_dossier")
|
||||
if preuves and isinstance(preuves, list):
|
||||
for p in preuves:
|
||||
if isinstance(p, dict):
|
||||
_check_ref(p.get("ref", ""), p.get("valeur", "?"))
|
||||
|
||||
# Nouveau format TIM : moyens_defense[].preuves
|
||||
moyens = response_data.get("moyens_defense")
|
||||
if moyens and isinstance(moyens, list):
|
||||
for moyen in moyens:
|
||||
if not isinstance(moyen, dict):
|
||||
continue
|
||||
moyen_preuves = moyen.get("preuves")
|
||||
if not moyen_preuves or not isinstance(moyen_preuves, list):
|
||||
continue
|
||||
for p in moyen_preuves:
|
||||
if isinstance(p, dict):
|
||||
_check_ref(p.get("ref", ""), p.get("fait", "?"))
|
||||
|
||||
return warnings
|
||||
|
||||
@@ -111,12 +131,19 @@ def _validate_references(parsed: dict, sources: list[dict]) -> list[str]:
|
||||
_CIM10_CODE_RE = re.compile(r"\b([A-Z]\d{2}\.?\d{0,2})\b")
|
||||
|
||||
# Champs textuels de la réponse LLM à scanner pour les codes CIM-10
|
||||
# Supporte les deux formats : ancien (contre_arguments_*) et nouveau (moyens_defense TIM)
|
||||
_TEXT_FIELDS = (
|
||||
# Ancien format
|
||||
"analyse_contestation",
|
||||
"contre_arguments_medicaux",
|
||||
"contre_arguments_asymetrie",
|
||||
"contre_arguments_reglementaires",
|
||||
"conclusion",
|
||||
# Nouveau format TIM
|
||||
"rappel_faits",
|
||||
"asymetrie_information",
|
||||
"reponse_points_cpam",
|
||||
"conclusion_dispositive",
|
||||
)
|
||||
|
||||
|
||||
@@ -220,7 +247,7 @@ def _sanitize_unauthorized_codes(
|
||||
if new_val != val:
|
||||
parsed[key] = new_val
|
||||
|
||||
# Sanitiser aussi les preuves_dossier.valeur
|
||||
# Sanitiser aussi les preuves_dossier.valeur (ancien format)
|
||||
preuves = parsed.get("preuves_dossier")
|
||||
if preuves and isinstance(preuves, list):
|
||||
for p in preuves:
|
||||
@@ -240,6 +267,27 @@ def _sanitize_unauthorized_codes(
|
||||
if new_v != v:
|
||||
p["valeur"] = new_v
|
||||
|
||||
# Sanitiser les moyens_defense[].argument (nouveau format TIM)
|
||||
moyens = parsed.get("moyens_defense")
|
||||
if moyens and isinstance(moyens, list):
|
||||
for moyen in moyens:
|
||||
if not isinstance(moyen, dict):
|
||||
continue
|
||||
for field_key in ("argument", "titre"):
|
||||
val = moyen.get(field_key, "")
|
||||
if not val or not isinstance(val, str):
|
||||
continue
|
||||
new_val = val
|
||||
for pattern in _SANITIZE_PATTERNS:
|
||||
new_val = pattern.sub(
|
||||
lambda m, _p=pattern: _replace_code(m),
|
||||
new_val,
|
||||
)
|
||||
new_val = re.sub(r"\(\s*\)", "", new_val)
|
||||
new_val = re.sub(r" +", " ", new_val).strip()
|
||||
if new_val != val:
|
||||
moyen[field_key] = new_val
|
||||
|
||||
if removed:
|
||||
for code in removed:
|
||||
norm = normalize_code(code)
|
||||
@@ -275,7 +323,7 @@ def _validate_codes_in_response(
|
||||
if val and isinstance(val, str):
|
||||
text_fields.append(val)
|
||||
|
||||
# Preuves du dossier — valeurs
|
||||
# Preuves du dossier — valeurs (ancien format)
|
||||
preuves = parsed.get("preuves_dossier")
|
||||
if preuves and isinstance(preuves, list):
|
||||
for p in preuves:
|
||||
@@ -284,6 +332,16 @@ def _validate_codes_in_response(
|
||||
if v and isinstance(v, str):
|
||||
text_fields.append(v)
|
||||
|
||||
# Moyens de défense (nouveau format TIM)
|
||||
moyens = parsed.get("moyens_defense")
|
||||
if moyens and isinstance(moyens, list):
|
||||
for moyen in moyens:
|
||||
if isinstance(moyen, dict):
|
||||
for mkey in ("argument", "titre"):
|
||||
v = moyen.get(mkey, "")
|
||||
if v and isinstance(v, str):
|
||||
text_fields.append(v)
|
||||
|
||||
combined_text = "\n".join(text_fields)
|
||||
found_codes = _CIM10_CODE_RE.findall(combined_text)
|
||||
|
||||
@@ -330,6 +388,18 @@ def _validate_adversarial(
|
||||
"""
|
||||
import json as _json
|
||||
|
||||
# LOGIC-3 — Vérifier si les modèles CPAM et validation sont identiques
|
||||
from ..config import check_adversarial_model_config
|
||||
|
||||
same_model, model_msg = check_adversarial_model_config()
|
||||
if same_model:
|
||||
logger.warning("LOGIC-3: %s", model_msg)
|
||||
return {
|
||||
"coherent": True,
|
||||
"erreurs": [f"Validation adversariale dégradée : {model_msg}"],
|
||||
"score_confiance": 0,
|
||||
}
|
||||
|
||||
# Construire le résumé des éléments factuels disponibles
|
||||
if tag_map:
|
||||
factual_lines = "\n".join(f" [{tag}] {content}" for tag, content in tag_map.items())
|
||||
@@ -341,8 +411,8 @@ def _validate_adversarial(
|
||||
try:
|
||||
response_json = _json.dumps(response_data, ensure_ascii=False, indent=None)
|
||||
# Tronquer si trop long pour le prompt de validation
|
||||
if len(response_json) > 3000:
|
||||
response_json = response_json[:3000] + "..."
|
||||
if len(response_json) > 10000:
|
||||
response_json = response_json[:10000] + "..."
|
||||
except (TypeError, ValueError):
|
||||
logger.warning("Validation adversariale : impossible de sérialiser la réponse")
|
||||
return None
|
||||
@@ -365,9 +435,9 @@ def _validate_adversarial(
|
||||
)
|
||||
|
||||
logger.debug(" Validation adversariale")
|
||||
result = call_ollama(prompt, temperature=0.0, max_tokens=3000, role="validation")
|
||||
result = call_ollama(prompt, temperature=0.0, max_tokens=6000, role="validation")
|
||||
if result is None:
|
||||
result = call_anthropic(prompt, temperature=0.0, max_tokens=3000)
|
||||
result = call_anthropic(prompt, temperature=0.0, max_tokens=6000)
|
||||
if result is None:
|
||||
logger.warning(" Validation adversariale échouée — LLM indisponible")
|
||||
return None
|
||||
@@ -407,14 +477,20 @@ def _build_correction_prompt(
|
||||
erreurs = adversarial_result.get("erreurs", [])
|
||||
erreurs_text = "\n".join(f" {i}. {e}" for i, e in enumerate(erreurs, 1))
|
||||
|
||||
# Résumé compact de la réponse problématique
|
||||
# Résumé compact de la réponse problématique (supporte les deux formats)
|
||||
summary_fields = {}
|
||||
# Ancien format
|
||||
for key in ("analyse_contestation", "contre_arguments_medicaux",
|
||||
"contre_arguments_asymetrie", "contre_arguments_reglementaires",
|
||||
"conclusion"):
|
||||
val = original_response.get(key)
|
||||
if val and isinstance(val, str):
|
||||
# Tronquer chaque champ à 400 chars
|
||||
summary_fields[key] = val[:400] + ("..." if len(val) > 400 else "")
|
||||
# Nouveau format TIM
|
||||
for key in ("rappel_faits", "asymetrie_information", "reponse_points_cpam",
|
||||
"conclusion_dispositive"):
|
||||
val = original_response.get(key)
|
||||
if val and isinstance(val, str):
|
||||
summary_fields[key] = val[:400] + ("..." if len(val) > 400 else "")
|
||||
|
||||
try:
|
||||
@@ -522,13 +598,182 @@ def _assess_quality_tier(
|
||||
return tier, requires_review, categorized
|
||||
|
||||
|
||||
def _is_new_tim_format(parsed: dict) -> bool:
|
||||
"""Détecte si la réponse LLM utilise le nouveau format TIM (moyens_defense)."""
|
||||
return "moyens_defense" in parsed
|
||||
|
||||
|
||||
def _format_response(
|
||||
parsed: dict,
|
||||
ref_warnings: list[str] | None = None,
|
||||
quality_tier: str | None = None,
|
||||
categorized_warnings: list[str] | None = None,
|
||||
) -> str:
|
||||
"""Formate la réponse LLM en texte lisible."""
|
||||
"""Formate la réponse LLM en texte lisible.
|
||||
|
||||
Supporte deux formats via duck-typing :
|
||||
- Nouveau TIM : moyens_defense, confrontation_bio, conclusion_dispositive
|
||||
- Ancien : contre_arguments_medicaux, points_accord, conclusion
|
||||
"""
|
||||
if _is_new_tim_format(parsed):
|
||||
return _format_response_tim(parsed, ref_warnings, quality_tier, categorized_warnings)
|
||||
return _format_response_legacy(parsed, ref_warnings, quality_tier, categorized_warnings)
|
||||
|
||||
|
||||
def _format_response_tim(
|
||||
parsed: dict,
|
||||
ref_warnings: list[str] | None = None,
|
||||
quality_tier: str | None = None,
|
||||
categorized_warnings: list[str] | None = None,
|
||||
) -> str:
|
||||
"""Formate la réponse LLM au format mémoire en défense TIM."""
|
||||
sections: list[str] = []
|
||||
sep = "───────────────────────────────────────────────────────"
|
||||
sep_heavy = "═══════════════════════════════════════════════════════"
|
||||
|
||||
# En-tête
|
||||
objet = parsed.get("objet", "Mémoire en défense")
|
||||
sections.append(f"{sep_heavy}\nMÉMOIRE EN DÉFENSE — {objet}\n{sep_heavy}")
|
||||
|
||||
# Bandeau qualité si tier C
|
||||
if quality_tier == "C":
|
||||
sections.append("⚠ REVUE MANUELLE REQUISE (Qualité : C)")
|
||||
|
||||
# Rappel des faits
|
||||
rappel = parsed.get("rappel_faits")
|
||||
if rappel:
|
||||
sections.append(f"RAPPEL DES FAITS\n{rappel}")
|
||||
|
||||
sections.append(sep)
|
||||
|
||||
# Moyens de défense numérotés
|
||||
moyens = parsed.get("moyens_defense")
|
||||
if moyens and isinstance(moyens, list):
|
||||
for moyen in moyens:
|
||||
if not isinstance(moyen, dict):
|
||||
continue
|
||||
num = moyen.get("numero", "?")
|
||||
titre = moyen.get("titre", "")
|
||||
argument = moyen.get("argument", "")
|
||||
|
||||
moyen_lines = [f"MOYEN N°{num} — {titre}"]
|
||||
if argument:
|
||||
moyen_lines.append(argument)
|
||||
|
||||
# Preuves intégrées dans chaque moyen
|
||||
moyen_preuves = moyen.get("preuves")
|
||||
if moyen_preuves and isinstance(moyen_preuves, list):
|
||||
for p in moyen_preuves:
|
||||
if isinstance(p, dict):
|
||||
ref = p.get("ref", "")
|
||||
fait = p.get("fait", "")
|
||||
signif = p.get("signification", "")
|
||||
moyen_lines.append(f" Preuve : {ref} {fait} → {signif}")
|
||||
|
||||
# Source réglementaire du moyen
|
||||
src_regl = moyen.get("source_reglementaire")
|
||||
if src_regl and src_regl != "null":
|
||||
moyen_lines.append(f" Source : {src_regl}")
|
||||
|
||||
sections.append("\n".join(moyen_lines))
|
||||
|
||||
sections.append(sep)
|
||||
|
||||
# Confrontation biologie / diagnostic (tableau)
|
||||
confrontation = parsed.get("confrontation_bio")
|
||||
if confrontation and isinstance(confrontation, list):
|
||||
table_lines = ["CONFRONTATION BIOLOGIE / DIAGNOSTIC"]
|
||||
table_lines.append(
|
||||
"┌─────────────────┬─────────────┬──────────────┬───────────┬───────────────┐"
|
||||
)
|
||||
table_lines.append(
|
||||
"│ Diagnostic │ Test requis │ Seuil │ Valeur │ Verdict │"
|
||||
)
|
||||
table_lines.append(
|
||||
"├─────────────────┼─────────────┼──────────────┼───────────┼───────────────┤"
|
||||
)
|
||||
for row in confrontation:
|
||||
if not isinstance(row, dict):
|
||||
continue
|
||||
diag = str(row.get("diagnostic", ""))[:17].ljust(17)
|
||||
test = str(row.get("test", ""))[:13].ljust(13)
|
||||
seuil = str(row.get("seuil", ""))[:14].ljust(14)
|
||||
valeur = str(row.get("valeur", ""))[:11].ljust(11)
|
||||
verdict = str(row.get("verdict", ""))[:15].ljust(15)
|
||||
table_lines.append(f"│ {diag}│ {test}│ {seuil}│ {valeur}│ {verdict}│")
|
||||
table_lines.append(
|
||||
"└─────────────────┴─────────────┴──────────────┴───────────┴───────────────┘"
|
||||
)
|
||||
sections.append("\n".join(table_lines))
|
||||
|
||||
sections.append(sep)
|
||||
|
||||
# Codes non défendables (honnêteté intellectuelle)
|
||||
codes_nd = parsed.get("codes_non_defendables")
|
||||
if codes_nd and isinstance(codes_nd, list) and len(codes_nd) > 0:
|
||||
nd_lines = ["⚠ CODES NON DÉFENDABLES (honnêteté intellectuelle)"]
|
||||
for nd in codes_nd:
|
||||
if isinstance(nd, dict):
|
||||
code = nd.get("code", "?")
|
||||
raison = nd.get("raison", "")
|
||||
reco = nd.get("recommandation", "")
|
||||
nd_lines.append(f"- {code} : {raison}")
|
||||
if reco:
|
||||
nd_lines.append(f" → {reco}")
|
||||
sections.append("\n".join(nd_lines))
|
||||
sections.append(sep)
|
||||
|
||||
# Asymétrie d'information
|
||||
asymetrie = parsed.get("asymetrie_information")
|
||||
if asymetrie:
|
||||
sections.append(f"ASYMÉTRIE D'INFORMATION\n{asymetrie}")
|
||||
sections.append(sep)
|
||||
|
||||
# Réponse aux points CPAM
|
||||
reponse_cpam = parsed.get("reponse_points_cpam")
|
||||
if reponse_cpam:
|
||||
sections.append(f"RÉPONSE AUX POINTS DE LA CPAM\n{reponse_cpam}")
|
||||
sections.append(sep)
|
||||
|
||||
# Références réglementaires
|
||||
refs = parsed.get("references")
|
||||
if refs:
|
||||
if isinstance(refs, list):
|
||||
ref_lines = ["RÉFÉRENCES RÉGLEMENTAIRES"]
|
||||
for r in refs:
|
||||
if isinstance(r, dict):
|
||||
doc = r.get("document", "")
|
||||
page = r.get("page", "")
|
||||
citation = r.get("citation", "")
|
||||
ref_lines.append(f"- [{doc}, p.{page}] {citation}")
|
||||
else:
|
||||
ref_lines.append(f"- {r}")
|
||||
sections.append("\n".join(ref_lines))
|
||||
else:
|
||||
sections.append(f"RÉFÉRENCES RÉGLEMENTAIRES\n{refs}")
|
||||
|
||||
sections.append(sep_heavy)
|
||||
|
||||
# Conclusion dispositive
|
||||
conclusion = parsed.get("conclusion_dispositive")
|
||||
if conclusion:
|
||||
sections.append(f"CONCLUSION\n{conclusion}")
|
||||
|
||||
sections.append(sep_heavy)
|
||||
|
||||
# Avertissements
|
||||
sections.extend(_format_warnings(categorized_warnings, ref_warnings))
|
||||
|
||||
return "\n\n".join(sections)
|
||||
|
||||
|
||||
def _format_response_legacy(
|
||||
parsed: dict,
|
||||
ref_warnings: list[str] | None = None,
|
||||
quality_tier: str | None = None,
|
||||
categorized_warnings: list[str] | None = None,
|
||||
) -> str:
|
||||
"""Formate la réponse LLM au format hérité (rétro-compatibilité cache)."""
|
||||
sections = []
|
||||
|
||||
# Bandeau qualité si tier C
|
||||
@@ -543,12 +788,12 @@ def _format_response(
|
||||
if accord and accord.lower() not in ("aucun", "non applicable", "n/a", ""):
|
||||
sections.append(f"POINTS D'ACCORD\n{accord}")
|
||||
|
||||
# Nouveaux champs structurés par axe
|
||||
# Champs structurés par axe
|
||||
contre_med = parsed.get("contre_arguments_medicaux")
|
||||
if contre_med:
|
||||
sections.append(f"CONTRE-ARGUMENTS MÉDICAUX\n{contre_med}")
|
||||
|
||||
# Preuves du dossier (nouveau champ structuré)
|
||||
# Preuves du dossier
|
||||
preuves = parsed.get("preuves_dossier")
|
||||
if preuves and isinstance(preuves, list):
|
||||
preuves_lines = []
|
||||
@@ -577,7 +822,7 @@ def _format_response(
|
||||
if contre:
|
||||
sections.append(f"CONTRE-ARGUMENTS\n{contre}")
|
||||
|
||||
# Références structurées (nouveau format liste) ou ancien format string
|
||||
# Références structurées ou ancien format string
|
||||
refs = parsed.get("references")
|
||||
if refs:
|
||||
if isinstance(refs, list):
|
||||
@@ -599,7 +844,18 @@ def _format_response(
|
||||
if conclusion:
|
||||
sections.append(f"CONCLUSION\n{conclusion}")
|
||||
|
||||
# Avertissements catégorisés (nouveau format)
|
||||
# Avertissements
|
||||
sections.extend(_format_warnings(categorized_warnings, ref_warnings))
|
||||
|
||||
return "\n\n".join(sections)
|
||||
|
||||
|
||||
def _format_warnings(
|
||||
categorized_warnings: list[str] | None = None,
|
||||
ref_warnings: list[str] | None = None,
|
||||
) -> list[str]:
|
||||
"""Formate les avertissements qualité (partagé entre les deux formats)."""
|
||||
sections: list[str] = []
|
||||
if categorized_warnings:
|
||||
critiques = [w for w in categorized_warnings if w.startswith("[CRITIQUE]")]
|
||||
mineurs = [w for w in categorized_warnings if w.startswith("[MINEUR]")]
|
||||
@@ -612,8 +868,6 @@ def _format_response(
|
||||
"AVERTISSEMENTS MINEURS\n" + "\n".join(f"- {w}" for w in mineurs)
|
||||
)
|
||||
elif ref_warnings:
|
||||
# Fallback ancien format
|
||||
warning_text = "\n".join(f"- {w}" for w in ref_warnings)
|
||||
sections.append(f"AVERTISSEMENT — REFERENCES NON VÉRIFIÉES\n{warning_text}")
|
||||
|
||||
return "\n\n".join(sections)
|
||||
return sections
|
||||
|
||||
Reference in New Issue
Block a user