diff --git a/src/config.py b/src/config.py
index 01af9ff..48dc712 100644
--- a/src/config.py
+++ b/src/config.py
@@ -729,6 +729,9 @@ class ControleCPAM(BaseModel):
contre_argumentation: Optional[str] = None
response_data: Optional[dict] = None
sources_reponse: list[RAGSource] = Field(default_factory=list)
+ quality_tier: Optional[str] = None # "A" | "B" | "C"
+ requires_review: bool = False
+ quality_warnings: list[str] = Field(default_factory=list)
# --- Qualité / Vetos (contestabilité) ---
diff --git a/src/control/cpam_response.py b/src/control/cpam_response.py
index 39c9343..0640c3f 100644
--- a/src/control/cpam_response.py
+++ b/src/control/cpam_response.py
@@ -27,6 +27,7 @@ from .cpam_validation import (
_validate_codes_in_response,
_build_correction_prompt,
_format_response,
+ _assess_quality_tier,
)
# Backward compat — sera retiré dans un commit futur
@@ -38,7 +39,7 @@ from .cpam_context import ( # noqa: F401
_build_bio_summary,
_check_das_bio_coherence,
)
-from .cpam_validation import _CIM10_CODE_RE, _validate_adversarial as _validate_adversarial # noqa: F401
+from .cpam_validation import _CIM10_CODE_RE, _validate_adversarial as _validate_adversarial, _assess_quality_tier as _assess_quality_tier # noqa: F401
logger = logging.getLogger(__name__)
@@ -220,8 +221,23 @@ def generate_cpam_response(
all_warnings = ref_warnings + grounding_warnings + code_warnings + adversarial_warnings
+ # 8c. Classification qualité (A/B/C)
+ tier, needs_review, cat_warnings = _assess_quality_tier(
+ result, ref_warnings, grounding_warnings, code_warnings, validation,
+ )
+ controle.quality_tier = tier
+ controle.requires_review = needs_review
+ controle.quality_warnings = cat_warnings
+ logger.info(" Qualité CPAM : tier %s, requires_review=%s, %d warnings",
+ tier, needs_review, len(cat_warnings))
+
# 9. Formater la réponse
- text = _format_response(result, all_warnings)
+ text = _format_response(
+ result,
+ ref_warnings=all_warnings,
+ quality_tier=tier,
+ categorized_warnings=cat_warnings,
+ )
logger.info(" Contre-argumentation générée (%d caractères)", len(text))
return text, result, rag_sources
diff --git a/src/control/cpam_validation.py b/src/control/cpam_validation.py
index fa85acc..98ed781 100644
--- a/src/control/cpam_validation.py
+++ b/src/control/cpam_validation.py
@@ -300,10 +300,91 @@ def _build_correction_prompt(
return original_prompt + correction_block
-def _format_response(parsed: dict, ref_warnings: list[str] | None = None) -> str:
+def _assess_quality_tier(
+ parsed: dict,
+ ref_warnings: list[str],
+ grounding_warnings: list[str],
+ code_warnings: list[str],
+ adversarial_result: dict | None,
+) -> tuple[str, bool, list[str]]:
+ """Évalue le tier qualité (A/B/C) et le flag requires_review.
+
+ Classification :
+ - Tier C (requires_review=True) :
+ score adversarial < 4 OU code_warnings > 0 OU grounding_warnings > 2
+ - Tier B :
+ score adversarial 4-6 OU ref_warnings > 0 OU grounding_warnings 1-2
+ - Tier A :
+ score adversarial >= 7, 0 warning critique, <= 1 warning mineur
+
+ Returns:
+ (tier, requires_review, categorized_warnings)
+ """
+ categorized: list[str] = []
+ score = adversarial_result.get("score_confiance", -1) if adversarial_result else -1
+ has_critical = False
+ minor_count = 0
+
+ # --- Warnings critiques ---
+ for w in code_warnings:
+ categorized.append(f"[CRITIQUE] {w}")
+ has_critical = True
+
+ if score != -1 and score <= 3:
+ categorized.append(f"[CRITIQUE] Score adversarial très bas : {score}/10")
+ has_critical = True
+
+ if len(grounding_warnings) > 2:
+ for w in grounding_warnings:
+ categorized.append(f"[CRITIQUE] {w}")
+ has_critical = True
+ elif grounding_warnings:
+ for w in grounding_warnings:
+ categorized.append(f"[MINEUR] {w}")
+ minor_count += 1
+
+ # --- Warnings mineurs ---
+ for w in ref_warnings:
+ categorized.append(f"[MINEUR] {w}")
+ minor_count += 1
+
+ if adversarial_result and not adversarial_result.get("coherent", True):
+ for e in adversarial_result.get("erreurs", []):
+ if isinstance(e, str) and e.strip():
+ categorized.append(f"[MINEUR] Incohérence : {e}")
+ minor_count += 1
+
+ if score != -1 and 4 <= score <= 6:
+ categorized.append(f"[MINEUR] Score adversarial moyen : {score}/10")
+ minor_count += 1
+
+ # --- Classification ---
+ if has_critical or (score != -1 and score < 4):
+ tier = "C"
+ requires_review = True
+ elif minor_count > 0 or (score != -1 and 4 <= score <= 6):
+ tier = "B"
+ requires_review = False
+ else:
+ tier = "A"
+ requires_review = False
+
+ return tier, requires_review, categorized
+
+
+def _format_response(
+ parsed: dict,
+ ref_warnings: list[str] | None = None,
+ quality_tier: str | None = None,
+ categorized_warnings: list[str] | None = None,
+) -> str:
"""Formate la réponse LLM en texte lisible."""
sections = []
+ # Bandeau qualité si tier C
+ if quality_tier == "C":
+ sections.append("⚠ REVUE MANUELLE REQUISE (Qualité : C)")
+
analyse = parsed.get("analyse_contestation")
if analyse:
sections.append(f"ANALYSE DE LA CONTESTATION\n{analyse}")
@@ -368,8 +449,20 @@ def _format_response(parsed: dict, ref_warnings: list[str] | None = None) -> str
if conclusion:
sections.append(f"CONCLUSION\n{conclusion}")
- # Avertissements sur les références non vérifiables
- if ref_warnings:
+ # Avertissements catégorisés (nouveau format)
+ if categorized_warnings:
+ critiques = [w for w in categorized_warnings if w.startswith("[CRITIQUE]")]
+ mineurs = [w for w in categorized_warnings if w.startswith("[MINEUR]")]
+ if critiques:
+ sections.append(
+ "AVERTISSEMENTS CRITIQUES\n" + "\n".join(f"- {w}" for w in critiques)
+ )
+ if mineurs:
+ sections.append(
+ "AVERTISSEMENTS MINEURS\n" + "\n".join(f"- {w}" for w in mineurs)
+ )
+ elif ref_warnings:
+ # Fallback ancien format
warning_text = "\n".join(f"- {w}" for w in ref_warnings)
sections.append(f"AVERTISSEMENT — REFERENCES NON VÉRIFIÉES\n{warning_text}")
diff --git a/src/viewer/templates/cpam.html b/src/viewer/templates/cpam.html
index 1416929..145aa12 100644
--- a/src/viewer/templates/cpam.html
+++ b/src/viewer/templates/cpam.html
@@ -33,6 +33,7 @@
| Dossier |
OGC |
+ Qualité |
Titre |
Décision |
Codes contestés |
@@ -51,6 +52,17 @@
{% endif %}
{{ c.ctrl.numero_ogc }} |
+
+ {% if c.ctrl.quality_tier == 'A' %}
+ A
+ {% elif c.ctrl.quality_tier == 'B' %}
+ B
+ {% elif c.ctrl.quality_tier == 'C' %}
+ C
+ {% else %}
+ —
+ {% endif %}
+ |
{{ c.ctrl.titre }} |
{% if 'retient' in c.ctrl.decision_ucr|lower %}
diff --git a/tests/test_cpam_response.py b/tests/test_cpam_response.py
index f39901d..217b469 100644
--- a/tests/test_cpam_response.py
+++ b/tests/test_cpam_response.py
@@ -30,6 +30,7 @@ from src.control.cpam_response import (
_validate_codes_in_response,
_validate_grounding,
_validate_references,
+ _assess_quality_tier,
generate_cpam_response,
)
@@ -1287,7 +1288,7 @@ class TestValidateAdversarial:
text, response_data, sources = generate_cpam_response(dossier, controle)
assert "Antibiotiques mentionnés" in text
- assert "Score de confiance" in text
+ assert "Score adversarial" in text
def test_adversarial_empty_tag_map(self):
"""Dossier sans tags → validation fonctionne quand même."""
@@ -1438,7 +1439,7 @@ class TestBuildBioSummary:
dossier = DossierMedical(
source_file="test.pdf",
biologie_cle=[
- BiologieCle(test="Ferritine", valeur="15 µg/L", anomalie=True),
+ BiologieCle(test="Vitamine D", valeur="15 ng/mL", anomalie=True),
],
)
summary = _build_bio_summary(dossier)
@@ -1677,3 +1678,275 @@ class TestCorrectionLoop:
assert "CRP citée à 250" in correction
assert "Prompt d'argumentation original" in correction
assert "Corrige UNIQUEMENT" in correction
+
+
+class TestAssessQualityTier:
+ """Tests pour la classification qualité CPAM (A/B/C)."""
+
+ def test_tier_a_no_warnings_high_score(self):
+ """0 warning, score adversarial >= 7 → tier A, requires_review=False."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=[],
+ grounding_warnings=[],
+ code_warnings=[],
+ adversarial_result={"coherent": True, "erreurs": [], "score_confiance": 9},
+ )
+ assert tier == "A"
+ assert review is False
+ assert len(warnings) == 0
+
+ def test_tier_b_ref_warnings(self):
+ """Warnings de référence → tier B."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=["Référence non vérifiable : Manuel Inventé"],
+ grounding_warnings=[],
+ code_warnings=[],
+ adversarial_result={"coherent": True, "erreurs": [], "score_confiance": 8},
+ )
+ assert tier == "B"
+ assert review is False
+ assert any("[MINEUR]" in w for w in warnings)
+
+ def test_tier_b_medium_adversarial_score(self):
+ """Score adversarial 4-6 → tier B."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=[],
+ grounding_warnings=[],
+ code_warnings=[],
+ adversarial_result={"coherent": True, "erreurs": [], "score_confiance": 5},
+ )
+ assert tier == "B"
+ assert review is False
+
+ def test_tier_b_one_grounding_warning(self):
+ """1 preuve non traçable → tier B (mineur)."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=[],
+ grounding_warnings=["Preuve [BIO-99] non traçable"],
+ code_warnings=[],
+ adversarial_result={"coherent": True, "erreurs": [], "score_confiance": 8},
+ )
+ assert tier == "B"
+ assert review is False
+ assert any("[MINEUR]" in w for w in warnings)
+
+ def test_tier_c_code_warnings(self):
+ """Code hors périmètre → tier C, requires_review=True."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=[],
+ grounding_warnings=[],
+ code_warnings=["Code Z45.8 hors périmètre dossier/UCR"],
+ adversarial_result={"coherent": True, "erreurs": [], "score_confiance": 7},
+ )
+ assert tier == "C"
+ assert review is True
+ assert any("[CRITIQUE]" in w for w in warnings)
+
+ def test_tier_c_low_adversarial_score(self):
+ """Score adversarial < 4 → tier C."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=[],
+ grounding_warnings=[],
+ code_warnings=[],
+ adversarial_result={"coherent": False, "erreurs": ["Bio inventée"], "score_confiance": 2},
+ )
+ assert tier == "C"
+ assert review is True
+ assert any("[CRITIQUE]" in w for w in warnings)
+
+ def test_tier_c_many_grounding_warnings(self):
+ """3+ preuves non traçables → tier C (critique)."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=[],
+ grounding_warnings=[
+ "Preuve [BIO-1] non traçable",
+ "Preuve [BIO-2] non traçable",
+ "Preuve [BIO-3] non traçable",
+ ],
+ code_warnings=[],
+ adversarial_result={"coherent": True, "erreurs": [], "score_confiance": 7},
+ )
+ assert tier == "C"
+ assert review is True
+
+ def test_tier_a_no_adversarial(self):
+ """Pas de validation adversariale (None) + 0 warnings → tier A."""
+ tier, review, warnings = _assess_quality_tier(
+ parsed={},
+ ref_warnings=[],
+ grounding_warnings=[],
+ code_warnings=[],
+ adversarial_result=None,
+ )
+ assert tier == "A"
+ assert review is False
+
+
+class TestFormatResponseCategorized:
+ """Tests pour le formatage avec warnings catégorisés et quality_tier."""
+
+ def test_tier_c_banner(self):
+ """Tier C → bandeau REVUE MANUELLE REQUISE."""
+ text = _format_response(
+ {"conclusion": "Conclusion..."},
+ quality_tier="C",
+ categorized_warnings=["[CRITIQUE] Code hors périmètre"],
+ )
+ assert "REVUE MANUELLE REQUISE" in text
+ assert "Qualité : C" in text
+ assert "AVERTISSEMENTS CRITIQUES" in text
+
+ def test_tier_a_no_banner(self):
+ """Tier A → pas de bandeau."""
+ text = _format_response(
+ {"conclusion": "Conclusion..."},
+ quality_tier="A",
+ categorized_warnings=[],
+ )
+ assert "REVUE MANUELLE REQUISE" not in text
+
+ def test_warnings_separated(self):
+ """Warnings critiques et mineurs dans des sections distinctes."""
+ text = _format_response(
+ {"conclusion": "Conclusion..."},
+ quality_tier="C",
+ categorized_warnings=[
+ "[CRITIQUE] Code Z45.8 hors périmètre",
+ "[MINEUR] Référence non vérifiable",
+ ],
+ )
+ assert "AVERTISSEMENTS CRITIQUES" in text
+ assert "AVERTISSEMENTS MINEURS" in text
+ assert text.index("CRITIQUES") < text.index("MINEURS")
+
+ def test_backward_compat_old_ref_warnings(self):
+ """Sans categorized_warnings, fallback sur ref_warnings."""
+ text = _format_response(
+ {"conclusion": "Conclusion..."},
+ ref_warnings=["Référence non vérifiable : X"],
+ )
+ assert "AVERTISSEMENT — REFERENCES NON VÉRIFIÉES" in text
+
+
+class TestCheckDasBioCoherenceExtended:
+ """Tests pour les nouveaux patterns DAS/bio (Phase 5)."""
+
+ def test_sepsis_with_normal_crp(self):
+ """DAS 'sepsis' mais CRP normale → incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Sepsis sévère", cim10_suggestion="A41.9"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="CRP", valeur="3", anomalie=False),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) >= 1
+ assert any("Sepsis" in w or "sepsis" in w for w in warnings)
+
+ def test_infarctus_with_normal_troponine(self):
+ """DAS 'infarctus' mais troponine normale → incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Infarctus du myocarde", cim10_suggestion="I21.9"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="Troponine", valeur="0.01", anomalie=False),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) >= 1
+
+ def test_infarctus_with_high_troponine_ok(self):
+ """DAS 'infarctus' + troponine élevée → pas d'incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Infarctus du myocarde", cim10_suggestion="I21.9"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="Troponine", valeur="0.5", anomalie=True),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) == 0
+
+ def test_denutrition_with_normal_albumine(self):
+ """DAS 'dénutrition' mais albumine normale → incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Dénutrition sévère", cim10_suggestion="E43"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="Albumine", valeur="42", anomalie=False),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) >= 1
+
+ def test_hypothyroidie_with_normal_tsh(self):
+ """DAS 'hypothyroïdie' mais TSH normale → incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Hypothyroïdie", cim10_suggestion="E03.9"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="TSH", valeur="2.5", anomalie=False),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) >= 1
+
+ def test_diabete_with_normal_glycemie(self):
+ """DAS 'diabète' mais glycémie normale → incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Diabète de type 2", cim10_suggestion="E11.9"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="Glycémie", valeur="4.5", anomalie=False),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) >= 1
+
+ def test_embolie_pulmonaire_with_normal_d_dimeres(self):
+ """DAS 'embolie pulmonaire' mais D-dimères normaux → incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Embolie pulmonaire", cim10_suggestion="I26.9"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="D-dimères", valeur="200", anomalie=False),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) >= 1
+
+ def test_insuffisance_renale_with_normal_creatinine(self):
+ """DAS 'insuffisance rénale' mais créatinine normale → incohérence."""
+ dossier = DossierMedical(
+ source_file="test.pdf",
+ diagnostics_associes=[
+ Diagnostic(texte="Insuffisance rénale aiguë", cim10_suggestion="N17.9"),
+ ],
+ biologie_cle=[
+ BiologieCle(test="Créatinine", valeur="80", anomalie=False),
+ ],
+ )
+ warnings = _check_das_bio_coherence(dossier)
+ assert len(warnings) >= 1
|