225 lines
10 KiB
Python
225 lines
10 KiB
Python
import os
|
|
import sys
|
|
import tkinter as tk
|
|
from tkinter import filedialog, messagebox, ttk
|
|
import threading
|
|
import subprocess
|
|
import time
|
|
import ollama
|
|
|
|
# Token HF
|
|
HF_TOKEN = "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"
|
|
|
|
# Dictionnaire de Prompts
|
|
PROMPT_TEMPLATES = {
|
|
"Consultation Standard": """Tu es un expert médical assistant. Analyse cette consultation et produis une synthèse incluant :
|
|
1. Motif de consultation.
|
|
2. Symptômes et antécédents.
|
|
3. Examen clinique réalisé.
|
|
4. Diagnostic et Plan thérapeutique (ordonnance, conseils).""",
|
|
|
|
"Compte-rendu Opératoire": """Analyse cette réunion chirurgicale. Produis un compte-rendu incluant :
|
|
1. Type d'intervention et indication.
|
|
2. Déroulement technique étape par étape.
|
|
3. Matériel utilisé et incidents éventuels.
|
|
4. Suites opératoires immédiates prévues.""",
|
|
|
|
"Réunion d'Équipe (Staff)": """Synthétise cette réunion de service médical :
|
|
1. Liste des patients discutés.
|
|
2. Décisions collégiales prises pour chaque cas.
|
|
3. Tâches assignées aux différents membres de l'équipe.""",
|
|
|
|
"Lettre au Confrère": """Rédige une lettre de liaison médicale professionnelle adressée à un confrère à partir de ce transcript.
|
|
La lettre doit être formelle, concise et inclure tous les éléments clés de la consultation.""",
|
|
|
|
"Prompt Personnalisé": "Tapez vos propres instructions ici..."
|
|
}
|
|
|
|
class MedicalScribeGUIv2:
|
|
def __init__(self, root):
|
|
self.root = root
|
|
self.root.title("Medical AI Scribe v2.0 - Turbo Edition")
|
|
self.root.geometry("950x850")
|
|
self.root.configure(bg="#f4f7f6")
|
|
|
|
# Variables
|
|
self.audio_path = tk.StringVar()
|
|
self.selected_model = tk.StringVar()
|
|
self.selected_template = tk.StringVar(value="Consultation Standard")
|
|
self.status_var = tk.StringVar(value="Prêt.")
|
|
self.progress_val = tk.DoubleVar(value=0)
|
|
|
|
self.current_process = None
|
|
|
|
self.setup_ui()
|
|
# Charger modèles en arrière-plan
|
|
threading.Thread(target=self.load_ollama_models, daemon=True).start()
|
|
|
|
def setup_ui(self):
|
|
main_frame = tk.Frame(self.root, bg="#f4f7f6", padx=20, pady=20)
|
|
main_frame.pack(fill=tk.BOTH, expand=True)
|
|
|
|
# Header
|
|
header = tk.Label(main_frame, text="Medical AI Scribe v2.0", font=("Helvetica", 18, "bold"), bg="#f4f7f6", fg="#2c3e50")
|
|
header.pack(pady=(0, 20))
|
|
|
|
# 1. Fichier
|
|
tk.Label(main_frame, text="Fichier Audio :", font=("Helvetica", 10, "bold"), bg="#f4f7f6").pack(anchor="w")
|
|
file_frame = tk.Frame(main_frame, bg="#f4f7f6")
|
|
file_frame.pack(fill="x", pady=(5, 15))
|
|
tk.Entry(file_frame, textvariable=self.audio_path, font=("Arial", 10), width=85).pack(side="left", padx=(0, 10))
|
|
tk.Button(file_frame, text="Parcourir", command=self.browse_file, bg="#3498db", fg="white", relief=tk.FLAT).pack(side="left")
|
|
|
|
# 2. Modèle & Template
|
|
row2 = tk.Frame(main_frame, bg="#f4f7f6")
|
|
row2.pack(fill="x", pady=(5, 15))
|
|
|
|
col_model = tk.Frame(row2, bg="#f4f7f6")
|
|
col_model.pack(side="left", fill="x", expand=True)
|
|
tk.Label(col_model, text="Modèle Ollama :", font=("Helvetica", 10, "bold"), bg="#f4f7f6").pack(anchor="w")
|
|
self.model_combo = ttk.Combobox(col_model, textvariable=self.selected_model, width=35)
|
|
self.model_combo.pack(anchor="w", pady=5)
|
|
self.model_combo.set("gpt-oss:120b-cloud")
|
|
|
|
col_template = tk.Frame(row2, bg="#f4f7f6")
|
|
col_template.pack(side="right", fill="x", expand=True)
|
|
tk.Label(col_template, text="Dictionnaire de Prompts :", font=("Helvetica", 10, "bold"), bg="#f4f7f6").pack(anchor="w")
|
|
self.template_combo = ttk.Combobox(col_template, textvariable=self.selected_template, width=35, state="readonly")
|
|
self.template_combo['values'] = list(PROMPT_TEMPLATES.keys())
|
|
self.template_combo.pack(anchor="w", pady=5)
|
|
self.template_combo.bind("<<ComboboxSelected>>", self.on_template_change)
|
|
|
|
# 3. Prompt Editor
|
|
tk.Label(main_frame, text="Instructions de Synthèse (Prompt) :", font=("Helvetica", 10, "bold"), bg="#f4f7f6").pack(anchor="w")
|
|
self.prompt_text = tk.Text(main_frame, height=8, font=("Arial", 10), padx=10, pady=10)
|
|
self.prompt_text.pack(fill="x", pady=(5, 15))
|
|
self.prompt_text.insert("1.0", PROMPT_TEMPLATES["Consultation Standard"])
|
|
|
|
# 4. Progress Bar
|
|
tk.Label(main_frame, text="Progression :", font=("Helvetica", 9), bg="#f4f7f6").pack(anchor="w")
|
|
self.progress_bar = ttk.Progressbar(main_frame, variable=self.progress_val, maximum=100, mode='determinate')
|
|
self.progress_bar.pack(fill="x", pady=(5, 2))
|
|
self.status_label = tk.Label(main_frame, textvariable=self.status_var, bg="#f4f7f6", font=("Arial", 9, "italic"))
|
|
self.status_label.pack(pady=(0, 10))
|
|
|
|
# 5. Boutons Actions
|
|
btn_frame = tk.Frame(main_frame, bg="#f4f7f6")
|
|
btn_frame.pack(fill="x", pady=10)
|
|
self.run_btn = tk.Button(btn_frame, text="LANCER LE TRAITEMENT", bg="#27ae60", fg="white",
|
|
font=("Helvetica", 12, "bold"), relief=tk.FLAT, pady=10, command=self.start_pipeline)
|
|
self.run_btn.pack(side="left", fill="x", expand=True, padx=(0, 10))
|
|
self.stop_btn = tk.Button(btn_frame, text="STOP", bg="#e74c3c", fg="white",
|
|
font=("Helvetica", 12, "bold"), relief=tk.FLAT, pady=10, state="disabled", width=15, command=self.stop_pipeline)
|
|
self.stop_btn.pack(side="right")
|
|
|
|
# 6. Logs
|
|
tk.Label(main_frame, text="Flux de Transcription (Live) :", font=("Helvetica", 10, "bold"), bg="#f4f7f6").pack(anchor="w")
|
|
self.log_area = tk.Text(main_frame, height=15, bg="#1e1e1e", fg="#00ff00", font=("Consolas", 9), padx=10, pady=10)
|
|
self.log_area.pack(fill=tk.BOTH, expand=True, pady=5)
|
|
|
|
def on_template_change(self, event):
|
|
template = self.selected_template.get()
|
|
self.prompt_text.delete("1.0", tk.END)
|
|
self.prompt_text.insert("1.0", PROMPT_TEMPLATES[template])
|
|
|
|
def browse_file(self):
|
|
fn = filedialog.askopenfilename(filetypes=[("Audio", "*.wav *.mp3 *.m4a *.flac")])
|
|
if fn: self.audio_path.set(fn)
|
|
|
|
def load_ollama_models(self):
|
|
try:
|
|
import ollama
|
|
resp = ollama.list()
|
|
models = getattr(resp, 'models', []) if hasattr(resp, 'models') else resp.get('models', [])
|
|
names = []
|
|
for m in models:
|
|
name = getattr(m, 'model', None) or (m.get('model') if isinstance(m, dict) else None)
|
|
if name: names.append(name)
|
|
if names:
|
|
self.model_combo['values'] = names
|
|
if "gpt-oss:120b-cloud" in names: self.selected_model.set("gpt-oss:120b-cloud")
|
|
except: pass
|
|
|
|
def log(self, msg):
|
|
self.log_area.insert(tk.END, msg + "\n")
|
|
self.log_area.see(tk.END)
|
|
self.root.update_idletasks()
|
|
|
|
def stop_pipeline(self):
|
|
if self.current_process:
|
|
self.current_process.terminate()
|
|
self.log("\n[STOP] Arrêt demandé.")
|
|
self.status_var.set("Interrompu.")
|
|
self.run_btn.config(state="normal", bg="#27ae60")
|
|
self.stop_btn.config(state="disabled")
|
|
|
|
def start_pipeline(self):
|
|
if not self.audio_path.get(): return
|
|
self.run_btn.config(state="disabled", bg="#95a5a6")
|
|
self.stop_btn.config(state="normal")
|
|
self.log_area.delete("1.0", tk.END)
|
|
self.progress_val.set(0)
|
|
threading.Thread(target=self.run_worker, daemon=True).start()
|
|
|
|
def run_worker(self):
|
|
audio = self.audio_path.get()
|
|
model_name = self.selected_model.get()
|
|
prompt = self.prompt_text.get("1.0", tk.END).strip()
|
|
|
|
try:
|
|
self.log("--- INITIALISATION DU MOTEUR TURBO ---")
|
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
env = os.environ.copy()
|
|
env["HF_TOKEN"] = HF_TOKEN
|
|
|
|
self.current_process = subprocess.Popen(
|
|
[sys.executable, os.path.join(script_dir, "medical_diarizer.py"), audio],
|
|
env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1
|
|
)
|
|
|
|
for line in self.current_process.stdout:
|
|
line = line.strip()
|
|
if line.startswith("[STATUS] PROGRESS:"):
|
|
try:
|
|
val = int(line.split(":")[1])
|
|
self.progress_val.set(val)
|
|
self.status_var.set(f"Analyse en cours : {val}%")
|
|
except: pass
|
|
else:
|
|
self.log(line)
|
|
|
|
self.current_process.wait()
|
|
|
|
transcript_file = audio.rsplit('.', 1)[0] + "_diarized.txt"
|
|
if not os.path.exists(transcript_file): raise Exception("Échec de la transcription.")
|
|
|
|
# Synthèse
|
|
self.status_var.set("Génération du compte-rendu par l'IA...")
|
|
self.log("\n--- GÉNÉRATION DE LA SYNTHÈSE MÉDICALE ---")
|
|
import ollama
|
|
with open(transcript_file, "r") as f: content = f.read()
|
|
|
|
resp = ollama.chat(model=model_name, messages=[
|
|
{"role": "system", "content": prompt},
|
|
{"role": "user", "content": content}
|
|
])
|
|
|
|
out = audio.rsplit('.', 1)[0] + "_summary_v2.md"
|
|
with open(out, "w") as f: f.write(resp['message']['content'])
|
|
|
|
self.log(f"\n[SUCCÈS] Synthèse enregistrée : {os.path.basename(out)}")
|
|
self.status_var.set("Traitement terminé.")
|
|
messagebox.showinfo("Succès", f"Compte-rendu généré :\n{os.path.basename(out)}")
|
|
|
|
except Exception as e:
|
|
self.log(f"\n[ERREUR] {e}")
|
|
self.status_var.set("Erreur de traitement.")
|
|
finally:
|
|
self.run_btn.config(state="normal", bg="#27ae60")
|
|
self.stop_btn.config(state="disabled")
|
|
|
|
if __name__ == "__main__":
|
|
root = tk.Tk()
|
|
MedicalScribeGUIv2(root)
|
|
root.mainloop()
|