feat: tests pour src/viewer/helpers.py (77 tests)
Couvre les filtres Jinja2 (confidence_badge, severity_badge, cma_level_badge, decision_badge, human_where, format_doc_name, etc.), les fonctions de statistiques (compute_group_stats, compute_dashboard_stats, compute_dim_synthesis), et les utilitaires (_date_to_iso, _sort_qc_alerts, _compute_jours_restants). Utilise pytest.mark.parametrize pour les cas multiples. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
526
tests/test_viewer_helpers.py
Normal file
526
tests/test_viewer_helpers.py
Normal file
@@ -0,0 +1,526 @@
|
|||||||
|
"""Tests unitaires pour src/viewer/helpers.py.
|
||||||
|
|
||||||
|
Couvre : filtres Jinja2, helpers de statistiques, fonctions utilitaires.
|
||||||
|
Pas de dépendance à Ollama ni au système de fichiers (sauf tmpdir).
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
from collections import Counter
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from markupsafe import Markup
|
||||||
|
|
||||||
|
from src.config import (
|
||||||
|
DossierMedical,
|
||||||
|
Diagnostic,
|
||||||
|
DPSelection,
|
||||||
|
DPCandidate,
|
||||||
|
ActeCCAM,
|
||||||
|
CodeDecision,
|
||||||
|
GHMEstimation,
|
||||||
|
VetoReport,
|
||||||
|
VetoIssue,
|
||||||
|
CompletudeDossier,
|
||||||
|
ControleCPAM,
|
||||||
|
FinancialImpact,
|
||||||
|
Sejour,
|
||||||
|
)
|
||||||
|
from src.viewer.helpers import (
|
||||||
|
compute_group_stats,
|
||||||
|
compute_dashboard_stats,
|
||||||
|
compute_dim_synthesis,
|
||||||
|
confidence_badge,
|
||||||
|
confidence_label,
|
||||||
|
severity_badge,
|
||||||
|
cma_level_badge,
|
||||||
|
format_duration,
|
||||||
|
format_dossier_name,
|
||||||
|
format_doc_name,
|
||||||
|
decision_badge,
|
||||||
|
format_cpam_text,
|
||||||
|
human_where,
|
||||||
|
_date_to_iso,
|
||||||
|
_sort_qc_alerts,
|
||||||
|
_compute_jours_restants,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ── Helpers ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
def _item(dossier: DossierMedical, path_rel: str = "test/file.json") -> dict:
|
||||||
|
"""Crée un item de scan_dossiers minimal."""
|
||||||
|
return {"name": "file", "path_rel": path_rel, "dossier": dossier}
|
||||||
|
|
||||||
|
|
||||||
|
def _diag(
|
||||||
|
texte: str = "Diagnostic",
|
||||||
|
code: str | None = None,
|
||||||
|
confidence: str | None = None,
|
||||||
|
est_cma: bool | None = None,
|
||||||
|
decision: CodeDecision | None = None,
|
||||||
|
cim10_final: str | None = None,
|
||||||
|
) -> Diagnostic:
|
||||||
|
return Diagnostic(
|
||||||
|
texte=texte,
|
||||||
|
cim10_suggestion=code,
|
||||||
|
cim10_confidence=confidence,
|
||||||
|
est_cma=est_cma,
|
||||||
|
cim10_decision=decision,
|
||||||
|
cim10_final=cim10_final,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# confidence_badge / confidence_label
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfidenceBadge:
|
||||||
|
def test_high(self):
|
||||||
|
result = confidence_badge("high")
|
||||||
|
assert isinstance(result, Markup)
|
||||||
|
assert "Haute" in result
|
||||||
|
assert "#16a34a" in result
|
||||||
|
|
||||||
|
def test_medium(self):
|
||||||
|
result = confidence_badge("medium")
|
||||||
|
assert "Moyenne" in result
|
||||||
|
|
||||||
|
def test_low(self):
|
||||||
|
result = confidence_badge("low")
|
||||||
|
assert "Basse" in result
|
||||||
|
assert "#dc2626" in result
|
||||||
|
|
||||||
|
def test_none_returns_empty(self):
|
||||||
|
assert confidence_badge(None) == ""
|
||||||
|
|
||||||
|
def test_empty_string_returns_empty(self):
|
||||||
|
assert confidence_badge("") == ""
|
||||||
|
|
||||||
|
def test_unknown_value_uses_default_colors(self):
|
||||||
|
result = confidence_badge("unknown")
|
||||||
|
assert "unknown" in result
|
||||||
|
assert "#6b7280" in result # default foreground
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfidenceLabel:
|
||||||
|
@pytest.mark.parametrize("value,expected", [
|
||||||
|
("high", "Haute"),
|
||||||
|
("medium", "Moyenne"),
|
||||||
|
("low", "Basse"),
|
||||||
|
(None, ""),
|
||||||
|
("", ""),
|
||||||
|
("autre", "autre"),
|
||||||
|
])
|
||||||
|
def test_labels(self, value, expected):
|
||||||
|
assert confidence_label(value) == expected
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# cma_level_badge
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestCmaLevelBadge:
|
||||||
|
def test_none_returns_empty(self):
|
||||||
|
assert cma_level_badge(None) == ""
|
||||||
|
|
||||||
|
def test_zero_returns_empty(self):
|
||||||
|
assert cma_level_badge(0) == ""
|
||||||
|
|
||||||
|
def test_negative_returns_empty(self):
|
||||||
|
assert cma_level_badge(-1) == ""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("level", [1, 2, 3, 4])
|
||||||
|
def test_valid_levels(self, level):
|
||||||
|
result = cma_level_badge(level)
|
||||||
|
assert isinstance(result, Markup)
|
||||||
|
assert f"CMA {level}" in result
|
||||||
|
|
||||||
|
def test_level_above_4_capped(self):
|
||||||
|
"""Un niveau > 4 est cappé à 4."""
|
||||||
|
result = cma_level_badge(5)
|
||||||
|
assert "CMA 4" in result
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# format_dossier_name / format_doc_name
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestFormatDossierName:
|
||||||
|
def test_racine(self):
|
||||||
|
assert format_dossier_name("racine") == "Non classés"
|
||||||
|
|
||||||
|
def test_normal_name(self):
|
||||||
|
assert format_dossier_name("190_23139234") == "190_23139234"
|
||||||
|
|
||||||
|
|
||||||
|
class TestFormatDocName:
|
||||||
|
@pytest.mark.parametrize("name,expected", [
|
||||||
|
("190_fusionne_cim10", "Fusionné"),
|
||||||
|
("CRH_23139234_cim10", "CRH"),
|
||||||
|
("CRO_23139234_cim10", "CRO"),
|
||||||
|
("crh_23139234", "CRH"),
|
||||||
|
("trackare-01295620", "Trackare"),
|
||||||
|
("ANAPATH_23103383", "Anapath"),
|
||||||
|
("some_other_doc", "some_other_doc"),
|
||||||
|
])
|
||||||
|
def test_doc_names(self, name, expected):
|
||||||
|
assert format_doc_name(name) == expected
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# decision_badge
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestDecisionBadge:
|
||||||
|
def test_none_returns_empty(self):
|
||||||
|
assert decision_badge(None) == ""
|
||||||
|
|
||||||
|
def test_keep_returns_empty(self):
|
||||||
|
"""KEEP n'a pas de badge (cas par défaut)."""
|
||||||
|
dec = CodeDecision(action="KEEP")
|
||||||
|
assert decision_badge(dec) == ""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("action,label", [
|
||||||
|
("DOWNGRADE", "Rétrogradé"),
|
||||||
|
("REMOVE", "Supprimé"),
|
||||||
|
("RULED_OUT", "Écarté"),
|
||||||
|
("NEED_INFO", "Preuve manquante"),
|
||||||
|
("PROMOTE_DP", "Promu en DP"),
|
||||||
|
])
|
||||||
|
def test_action_labels(self, action, label):
|
||||||
|
dec = CodeDecision(action=action)
|
||||||
|
result = decision_badge(dec)
|
||||||
|
assert label in result
|
||||||
|
|
||||||
|
def test_dict_input(self):
|
||||||
|
"""Accepte un dict en plus d'un CodeDecision."""
|
||||||
|
result = decision_badge({"action": "REMOVE"})
|
||||||
|
assert "Supprimé" in result
|
||||||
|
|
||||||
|
def test_dict_keep(self):
|
||||||
|
result = decision_badge({"action": "KEEP"})
|
||||||
|
assert result == ""
|
||||||
|
|
||||||
|
def test_unknown_action(self):
|
||||||
|
"""Action inconnue affiche le texte brut."""
|
||||||
|
result = decision_badge({"action": "CUSTOM_ACTION"})
|
||||||
|
assert "CUSTOM_ACTION" in result
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# human_where
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestHumanWhere:
|
||||||
|
@pytest.mark.parametrize("value,expected", [
|
||||||
|
(None, "Global"),
|
||||||
|
("", "Global"),
|
||||||
|
("diagnostic_principal", "Diagnostic Principal"),
|
||||||
|
("diagnostics_associes", "Diagnostics Associés"),
|
||||||
|
("sejour", "Séjour"),
|
||||||
|
("diagnostics_associes[0]", "DAS n°1"),
|
||||||
|
("diagnostics_associes[5]", "DAS n°6"),
|
||||||
|
("actes_ccam[0]", "Acte n°1"),
|
||||||
|
("actes_ccam[2]", "Acte n°3"),
|
||||||
|
("autre_chose", "autre_chose"),
|
||||||
|
])
|
||||||
|
def test_conversions(self, value, expected):
|
||||||
|
assert human_where(value) == expected
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# _date_to_iso
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestDateToIso:
|
||||||
|
def test_valid_date(self):
|
||||||
|
assert _date_to_iso("15/03/2025") == "2025-03-15"
|
||||||
|
|
||||||
|
def test_invalid_format(self):
|
||||||
|
assert _date_to_iso("2025-03-15") == ""
|
||||||
|
|
||||||
|
def test_empty_string(self):
|
||||||
|
assert _date_to_iso("") == ""
|
||||||
|
|
||||||
|
def test_single_part(self):
|
||||||
|
assert _date_to_iso("15") == ""
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# _sort_qc_alerts
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestSortQcAlerts:
|
||||||
|
def test_critical_first(self):
|
||||||
|
alerts = [
|
||||||
|
"Recommandation : ajouter un DAS",
|
||||||
|
"Erreur critique : code invalide",
|
||||||
|
"Code justifié solidement",
|
||||||
|
]
|
||||||
|
sorted_alerts = _sort_qc_alerts(alerts)
|
||||||
|
assert sorted_alerts[0] == "Erreur critique : code invalide"
|
||||||
|
assert sorted_alerts[-1] == "Code justifié solidement"
|
||||||
|
|
||||||
|
def test_empty_list(self):
|
||||||
|
assert _sort_qc_alerts([]) == []
|
||||||
|
|
||||||
|
def test_dp_prioritized_within_tier(self):
|
||||||
|
"""Les alertes mentionnant le DP sont priorisées dans leur tier."""
|
||||||
|
alerts = [
|
||||||
|
"Redondance dans DAS",
|
||||||
|
"Redondance DP diagnostic principal suspect",
|
||||||
|
]
|
||||||
|
sorted_alerts = _sort_qc_alerts(alerts)
|
||||||
|
assert "DP" in sorted_alerts[0] or "diagnostic principal" in sorted_alerts[0]
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# _compute_jours_restants
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestComputeJoursRestants:
|
||||||
|
def test_no_date(self):
|
||||||
|
ctrl = MagicMock()
|
||||||
|
ctrl.date_limite_reponse = None
|
||||||
|
assert _compute_jours_restants(ctrl) is None
|
||||||
|
|
||||||
|
def test_invalid_date(self):
|
||||||
|
ctrl = MagicMock()
|
||||||
|
ctrl.date_limite_reponse = "not-a-date"
|
||||||
|
assert _compute_jours_restants(ctrl) is None
|
||||||
|
|
||||||
|
def test_valid_date_returns_int(self):
|
||||||
|
ctrl = MagicMock()
|
||||||
|
ctrl.date_limite_reponse = "15/03/2030"
|
||||||
|
result = _compute_jours_restants(ctrl)
|
||||||
|
assert isinstance(result, int)
|
||||||
|
assert result > 0 # well into the future
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# compute_group_stats
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestComputeGroupStats:
|
||||||
|
def test_empty_items(self):
|
||||||
|
stats = compute_group_stats([])
|
||||||
|
assert stats == {"das_count": 0, "alertes_count": 0, "actes_count": 0, "cma_count": 0}
|
||||||
|
|
||||||
|
def test_cma_counted_on_dp(self):
|
||||||
|
"""CMA sur le DP est compté."""
|
||||||
|
d = DossierMedical(
|
||||||
|
diagnostic_principal=_diag("DP", "K85.9", est_cma=True),
|
||||||
|
)
|
||||||
|
stats = compute_group_stats([_item(d)])
|
||||||
|
assert stats["cma_count"] == 1
|
||||||
|
|
||||||
|
def test_multiple_items(self):
|
||||||
|
d1 = DossierMedical(
|
||||||
|
diagnostics_associes=[_diag("DAS1", "I10", est_cma=True)],
|
||||||
|
actes_ccam=[ActeCCAM(texte="Acte1")],
|
||||||
|
alertes_codage=["Alerte"],
|
||||||
|
)
|
||||||
|
d2 = DossierMedical(
|
||||||
|
diagnostics_associes=[_diag("DAS2", "E11.9"), _diag("DAS3", "J18.9", est_cma=True)],
|
||||||
|
)
|
||||||
|
stats = compute_group_stats([_item(d1), _item(d2)])
|
||||||
|
assert stats["das_count"] == 3
|
||||||
|
assert stats["actes_count"] == 1
|
||||||
|
assert stats["alertes_count"] == 1
|
||||||
|
assert stats["cma_count"] == 2
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# compute_dashboard_stats
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestComputeDashboardStats:
|
||||||
|
def test_empty_groups(self):
|
||||||
|
stats = compute_dashboard_stats({})
|
||||||
|
assert stats["total_dossiers"] == 0
|
||||||
|
assert stats["total_fichiers"] == 0
|
||||||
|
assert stats["top_codes"] == []
|
||||||
|
assert stats["processing_time_avg"] == 0
|
||||||
|
|
||||||
|
def test_single_dossier(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
diagnostic_principal=_diag("DP", "K85.9", confidence="high"),
|
||||||
|
diagnostics_associes=[_diag("DAS", code="I10")],
|
||||||
|
actes_ccam=[ActeCCAM(texte="Acte")],
|
||||||
|
alertes_codage=["Alerte"],
|
||||||
|
processing_time_s=10.5,
|
||||||
|
)
|
||||||
|
groups = {"grp1": [_item(d)]}
|
||||||
|
stats = compute_dashboard_stats(groups)
|
||||||
|
assert stats["total_dossiers"] == 1
|
||||||
|
assert stats["total_fichiers"] == 1
|
||||||
|
assert stats["total_das"] == 1
|
||||||
|
assert stats["total_actes"] == 1
|
||||||
|
assert stats["total_alertes"] == 1
|
||||||
|
assert stats["processing_time_avg"] == 10.5
|
||||||
|
|
||||||
|
def test_dp_validity_absent_when_no_dp(self):
|
||||||
|
d = DossierMedical()
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
stats = compute_dashboard_stats(groups)
|
||||||
|
assert stats["dp_validity"].get("absent", 0) == 1
|
||||||
|
|
||||||
|
def test_ghm_types_counted(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
ghm_estimation=GHMEstimation(type_ghm="C", severite=3),
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
stats = compute_dashboard_stats(groups)
|
||||||
|
assert stats["ghm_types"].get("C") == 1
|
||||||
|
assert stats["severity_dist"].get(3) == 1
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# compute_dim_synthesis
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestComputeDimSynthesis:
|
||||||
|
def test_empty_groups(self):
|
||||||
|
result = compute_dim_synthesis({})
|
||||||
|
assert result["dp"]["total"] == 0
|
||||||
|
assert result["das"]["total"] == 0
|
||||||
|
assert result["veto"]["avg_score"] == 0
|
||||||
|
|
||||||
|
def test_dp_confirmed(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
dp_final=DPSelection(
|
||||||
|
chosen_code="K85.9",
|
||||||
|
verdict="CONFIRMED",
|
||||||
|
confidence="high",
|
||||||
|
evidence=["Test evidence"],
|
||||||
|
),
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
result = compute_dim_synthesis(groups)
|
||||||
|
assert result["dp"]["total"] == 1
|
||||||
|
assert result["dp"]["confirmed"] == 1
|
||||||
|
|
||||||
|
def test_dp_review_creates_alert(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
dp_final=DPSelection(
|
||||||
|
chosen_code="R10.4",
|
||||||
|
verdict="REVIEW",
|
||||||
|
confidence="medium",
|
||||||
|
reason="Ambigu",
|
||||||
|
evidence=[],
|
||||||
|
),
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
result = compute_dim_synthesis(groups)
|
||||||
|
assert result["dp"]["review"] == 1
|
||||||
|
assert len(result["alertes"]["review"]) == 1
|
||||||
|
|
||||||
|
def test_das_decisions_counted(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
diagnostics_associes=[
|
||||||
|
_diag("DAS1", "I10", decision=CodeDecision(action="KEEP")),
|
||||||
|
_diag("DAS2", "D69.6", decision=CodeDecision(action="RULED_OUT")),
|
||||||
|
_diag("DAS3", "D50", decision=CodeDecision(action="DOWNGRADE", final_code="D64.9")),
|
||||||
|
_diag("DAS4", "Z87.1", decision=CodeDecision(action="REMOVE")),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
result = compute_dim_synthesis(groups)
|
||||||
|
assert result["das"]["total"] == 4
|
||||||
|
assert result["das"]["kept"] == 1
|
||||||
|
assert result["das"]["ruled_out"] == 1
|
||||||
|
assert result["das"]["downgraded"] == 1
|
||||||
|
assert result["das"]["removed"] == 1
|
||||||
|
|
||||||
|
def test_das_no_decision_counted_as_kept(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
diagnostics_associes=[_diag("DAS", "I10")], # no decision
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
result = compute_dim_synthesis(groups)
|
||||||
|
assert result["das"]["kept"] == 1
|
||||||
|
|
||||||
|
def test_veto_report_aggregated(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
veto_report=VetoReport(
|
||||||
|
verdict="FAIL",
|
||||||
|
score_contestabilite=40,
|
||||||
|
issues=[
|
||||||
|
VetoIssue(veto="VETO-01", severity="HARD", where="dp", message="test"),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
result = compute_dim_synthesis(groups)
|
||||||
|
assert result["veto"]["avg_score"] == 40
|
||||||
|
assert result["veto"]["distribution"].get("FAIL") == 1
|
||||||
|
assert len(result["alertes"]["fail"]) == 1
|
||||||
|
|
||||||
|
def test_completude_indefendable(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
completude=CompletudeDossier(
|
||||||
|
verdict_global="indefendable",
|
||||||
|
score_global=20,
|
||||||
|
documents_manquants=["CRO", "Anapath"],
|
||||||
|
),
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
result = compute_dim_synthesis(groups)
|
||||||
|
assert result["completude"]["distribution"].get("indefendable") == 1
|
||||||
|
assert len(result["alertes"]["indefendable"]) == 1
|
||||||
|
|
||||||
|
def test_taux_modification_zero_when_no_das(self):
|
||||||
|
result = compute_dim_synthesis({})
|
||||||
|
assert result["das"]["taux_modification"] == 0
|
||||||
|
|
||||||
|
def test_cpam_impact(self):
|
||||||
|
d = DossierMedical(
|
||||||
|
controles_cpam=[
|
||||||
|
ControleCPAM(
|
||||||
|
numero_ogc=1,
|
||||||
|
financial_impact=FinancialImpact(
|
||||||
|
impact_estime_euros=1500,
|
||||||
|
priorite="haute",
|
||||||
|
),
|
||||||
|
validation_dim="valide",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
groups = {"grp": [_item(d)]}
|
||||||
|
result = compute_dim_synthesis(groups)
|
||||||
|
assert result["cpam"]["total"] == 1
|
||||||
|
assert result["cpam"]["impact_total"] == 1500
|
||||||
|
assert result["cpam"]["by_priority"].get("haute") == 1
|
||||||
|
assert result["cpam"]["by_status"].get("valide") == 1
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================================================
|
||||||
|
# format_cpam_text (additional edge cases beyond test_viewer.py)
|
||||||
|
# ===================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestFormatCpamTextExtra:
|
||||||
|
def test_blank_lines_produce_br(self):
|
||||||
|
result = format_cpam_text("Line 1\n\nLine 2")
|
||||||
|
assert "<br>" in result
|
||||||
|
|
||||||
|
def test_list_closed_at_end(self):
|
||||||
|
"""A list not followed by text should still be closed."""
|
||||||
|
result = format_cpam_text("- Item 1\n- Item 2")
|
||||||
|
assert result.count("<ul") == 1
|
||||||
|
assert result.count("</ul>") == 1
|
||||||
Reference in New Issue
Block a user