feat(phase2): Gazetteers FINESS 102K établissements + fine-tuning CamemBERT-bio F1=89%
Gazetteers FINESS (data.gouv.fr open data): - 102K numéros FINESS → détection par lookup exact dans _mask_admin_label + selective_rescan - 122K noms d'établissements, 113K téléphones, 76K adresses (disponibles) - Un nombre 9 chiffres matchant un vrai FINESS est masqué même sans label "FINESS" Fine-tuning CamemBERT-bio (almanach/camembert-bio-base): - Export silver annotations réécrit : alignement original↔pseudonymisé (difflib) → 6862 entités B- (vs 3344 avec l'ancien audit-only) sur 222K tokens - Sliding windows (200 tokens, stride 100) pour documents longs - WeightedNERTrainer avec class weights cappés (max 10x) + label smoothing - Résultat: Precision=88.1%, Recall=89.8%, F1=88.9% (20 epochs, lr=1e-5) - Modèle sauvegardé dans models/camembert-bio-deid/best (non commité) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -15,8 +15,11 @@ import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
from collections import Counter
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
# Vérifier les dépendances
|
||||
try:
|
||||
@@ -59,38 +62,60 @@ ID2LABEL = {i: l for l, i in LABEL2ID.items()}
|
||||
MODEL_NAME = "almanach/camembert-bio-base"
|
||||
|
||||
|
||||
def load_bio_files(data_dir: Path) -> Dict[str, List]:
|
||||
"""Charge les fichiers .bio en format HuggingFace datasets."""
|
||||
def load_bio_files(data_dir: Path, window_size: int = 200, stride: int = 100) -> Dict[str, List]:
|
||||
"""Charge les fichiers .bio et découpe en fenêtres glissantes.
|
||||
|
||||
Les documents cliniques sont très longs. On les découpe en fenêtres de
|
||||
~window_size tokens avec un chevauchement de stride. On ne garde que les
|
||||
fenêtres contenant au moins une entité (pour l'équilibre des classes).
|
||||
"""
|
||||
tokens_list: List[List[str]] = []
|
||||
labels_list: List[List[int]] = []
|
||||
|
||||
for bio_file in sorted(data_dir.glob("*.bio")):
|
||||
text = bio_file.read_text(encoding="utf-8")
|
||||
current_tokens: List[str] = []
|
||||
current_labels: List[int] = []
|
||||
# Charger tous les tokens du document
|
||||
all_tokens: List[str] = []
|
||||
all_labels: List[int] = []
|
||||
|
||||
for line in text.splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
# Fin de phrase
|
||||
if current_tokens:
|
||||
tokens_list.append(current_tokens)
|
||||
labels_list.append(current_labels)
|
||||
current_tokens = []
|
||||
current_labels = []
|
||||
continue
|
||||
|
||||
parts = line.split("\t")
|
||||
if len(parts) != 2:
|
||||
continue
|
||||
token, label = parts
|
||||
label_id = LABEL2ID.get(label, LABEL2ID["O"])
|
||||
current_tokens.append(token)
|
||||
current_labels.append(label_id)
|
||||
all_tokens.append(token)
|
||||
all_labels.append(label_id)
|
||||
|
||||
if current_tokens:
|
||||
tokens_list.append(current_tokens)
|
||||
labels_list.append(current_labels)
|
||||
if not all_tokens:
|
||||
continue
|
||||
|
||||
# Découper en fenêtres glissantes
|
||||
n = len(all_tokens)
|
||||
for start in range(0, n, stride):
|
||||
end = min(start + window_size, n)
|
||||
chunk_tokens = all_tokens[start:end]
|
||||
chunk_labels = all_labels[start:end]
|
||||
|
||||
# Corriger les I- en début de fenêtre → B-
|
||||
if chunk_labels and chunk_labels[0] > 0:
|
||||
lbl_name = LABEL_LIST[chunk_labels[0]]
|
||||
if lbl_name.startswith("I-"):
|
||||
b_name = "B-" + lbl_name[2:]
|
||||
if b_name in LABEL2ID:
|
||||
chunk_labels[0] = LABEL2ID[b_name]
|
||||
|
||||
# Garder les fenêtres avec entités + quelques fenêtres "O" (10%)
|
||||
has_entities = any(l != 0 for l in chunk_labels)
|
||||
if has_entities or (start % (stride * 10) == 0):
|
||||
tokens_list.append(chunk_tokens)
|
||||
labels_list.append(chunk_labels)
|
||||
|
||||
if end >= n:
|
||||
break
|
||||
|
||||
return {"tokens": tokens_list, "ner_tags": labels_list}
|
||||
|
||||
@@ -131,6 +156,59 @@ def tokenize_and_align(examples, tokenizer):
|
||||
return tokenized
|
||||
|
||||
|
||||
class WeightedNERTrainer(Trainer):
|
||||
"""Trainer avec poids de classe pour contrer le déséquilibre O vs entités."""
|
||||
|
||||
def __init__(self, class_weights=None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
if class_weights is not None:
|
||||
self.class_weights = class_weights.to(self.args.device)
|
||||
else:
|
||||
self.class_weights = None
|
||||
|
||||
def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
|
||||
labels = inputs.pop("labels")
|
||||
outputs = model(**inputs)
|
||||
logits = outputs.logits
|
||||
|
||||
if self.class_weights is not None:
|
||||
loss_fct = nn.CrossEntropyLoss(
|
||||
weight=self.class_weights,
|
||||
ignore_index=-100,
|
||||
label_smoothing=0.1,
|
||||
)
|
||||
else:
|
||||
loss_fct = nn.CrossEntropyLoss(ignore_index=-100, label_smoothing=0.1)
|
||||
|
||||
loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1))
|
||||
return (loss, outputs) if return_outputs else loss
|
||||
|
||||
|
||||
def compute_class_weights(raw_data: Dict, num_labels: int, max_weight: float = 10.0) -> torch.FloatTensor:
|
||||
"""Calcule les poids inversement proportionnels à la fréquence, cappés après normalisation."""
|
||||
counts = Counter()
|
||||
for labels in raw_data["ner_tags"]:
|
||||
for l in labels:
|
||||
counts[l] += 1
|
||||
|
||||
total = sum(counts.values())
|
||||
weights = torch.ones(num_labels)
|
||||
for label_id, count in counts.items():
|
||||
if count > 0:
|
||||
weights[label_id] = total / (num_labels * count)
|
||||
|
||||
# Normaliser : O=1.0
|
||||
if weights[0] > 0:
|
||||
scale = 1.0 / weights[0]
|
||||
weights *= scale
|
||||
|
||||
# Capper APRÈS normalisation pour limiter le déséquilibre
|
||||
weights = torch.clamp(weights, max=max_weight)
|
||||
|
||||
print(f" Class weights (O={weights[0]:.1f}, non-O moyen={weights[1:].mean():.1f}, max={weights[1:].max():.1f})")
|
||||
return weights
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Fine-tune CamemBERT-bio pour désidentification")
|
||||
parser.add_argument("--data-dir", type=Path,
|
||||
@@ -203,6 +281,10 @@ def main():
|
||||
"f1": results["overall_f1"],
|
||||
}
|
||||
|
||||
# Class weights pour contrer le déséquilibre 97% O
|
||||
print("\nCalcul des poids de classe...")
|
||||
weights = compute_class_weights(raw_data, len(LABEL_LIST))
|
||||
|
||||
# Training
|
||||
args.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
training_args = TrainingArguments(
|
||||
@@ -218,13 +300,14 @@ def main():
|
||||
load_best_model_at_end=True,
|
||||
metric_for_best_model="f1",
|
||||
logging_steps=50,
|
||||
fp16=False, # CPU training
|
||||
fp16=True, # GPU training avec mixed precision
|
||||
report_to="none",
|
||||
save_total_limit=2,
|
||||
)
|
||||
|
||||
data_collator = DataCollatorForTokenClassification(tokenizer)
|
||||
trainer = Trainer(
|
||||
trainer = WeightedNERTrainer(
|
||||
class_weights=weights,
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=tokenized["train"],
|
||||
|
||||
Reference in New Issue
Block a user