109 lines
3.9 KiB
Python
Executable File
109 lines
3.9 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Vérifier l'état de l'apprentissage et des workflows.
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
import pickle
|
|
|
|
sys.path.insert(0, 'geniusia2')
|
|
|
|
print("="*60)
|
|
print("ÉTAT DE L'APPRENTISSAGE")
|
|
print("="*60)
|
|
|
|
# 1. Vérifier le checkpoint du fine-tuner
|
|
print("\n1. Checkpoint Fine-tuner:")
|
|
checkpoint_path = "data/fine_tuning/orchestrator_finetuning.pkl"
|
|
if os.path.exists(checkpoint_path):
|
|
try:
|
|
with open(checkpoint_path, 'rb') as f:
|
|
data = pickle.load(f)
|
|
|
|
print(f" ✓ Checkpoint trouvé")
|
|
print(f" - Exemples positifs: {len(data.get('positive_examples', []))}")
|
|
print(f" - Exemples négatifs: {len(data.get('negative_examples', []))}")
|
|
print(f" - Trainings effectués: {data.get('training_count', 0)}")
|
|
|
|
if data.get('metrics_history'):
|
|
print(f" - Dernière loss: {data['metrics_history'][-1].get('loss', 'N/A')}")
|
|
except Exception as e:
|
|
print(f" ❌ Erreur lecture: {e}")
|
|
else:
|
|
print(f" ⚠️ Checkpoint non trouvé")
|
|
|
|
# 2. Vérifier l'index FAISS
|
|
print("\n2. Index FAISS:")
|
|
index_path = "data/workflow_embeddings"
|
|
if os.path.exists(f"{index_path}.index"):
|
|
size = os.path.getsize(f"{index_path}.index")
|
|
print(f" ✓ Index trouvé ({size} bytes)")
|
|
|
|
try:
|
|
with open(f"{index_path}.metadata", 'rb') as f:
|
|
metadata = pickle.load(f)
|
|
|
|
print(f" - Dimension: {metadata.get('dimension', 'N/A')}")
|
|
print(f" - Embeddings: {len(metadata.get('metadata', []))}")
|
|
|
|
# Afficher quelques workflows
|
|
workflows = {}
|
|
for m in metadata.get('metadata', []):
|
|
wid = m.get('workflow_id', 'unknown')
|
|
workflows[wid] = workflows.get(wid, 0) + 1
|
|
|
|
if workflows:
|
|
print(f" - Workflows indexés: {len(workflows)}")
|
|
for wid, count in list(workflows.items())[:5]:
|
|
print(f" • {wid}: {count} embeddings")
|
|
except Exception as e:
|
|
print(f" ❌ Erreur lecture metadata: {e}")
|
|
else:
|
|
print(f" ⚠️ Index non trouvé")
|
|
|
|
# 3. Vérifier les profils utilisateur (ancien système)
|
|
print("\n3. Profils utilisateur (ancien système):")
|
|
profiles_dir = "data/user_profiles"
|
|
if os.path.exists(profiles_dir):
|
|
profiles = [d for d in os.listdir(profiles_dir) if os.path.isdir(os.path.join(profiles_dir, d))]
|
|
print(f" ✓ {len(profiles)} profils trouvés")
|
|
|
|
for profile in profiles[:3]:
|
|
tasks_dir = os.path.join(profiles_dir, profile, "tasks")
|
|
if os.path.exists(tasks_dir):
|
|
tasks = [f for f in os.listdir(tasks_dir) if f.endswith('.json')]
|
|
print(f" - {profile}: {len(tasks)} tâches")
|
|
else:
|
|
print(f" ⚠️ Pas de profils")
|
|
|
|
# 4. Recommandations
|
|
print("\n" + "="*60)
|
|
print("RECOMMANDATIONS")
|
|
print("="*60)
|
|
|
|
checkpoint_exists = os.path.exists(checkpoint_path)
|
|
if checkpoint_exists:
|
|
with open(checkpoint_path, 'rb') as f:
|
|
data = pickle.load(f)
|
|
total_examples = len(data.get('positive_examples', [])) + len(data.get('negative_examples', []))
|
|
|
|
if total_examples == 0:
|
|
print("\n⚠️ Aucun exemple collecté")
|
|
print(" → Le système fonctionne mais n'a pas encore d'exemples")
|
|
print(" → Continue à utiliser l'application normalement")
|
|
elif total_examples < 10:
|
|
print(f"\n📊 {total_examples} exemples collectés")
|
|
print(f" → Encore {10 - total_examples} exemples avant le premier fine-tuning")
|
|
else:
|
|
print(f"\n✅ {total_examples} exemples collectés")
|
|
print(f" → Fine-tuning devrait avoir été déclenché")
|
|
print(f" → Trainings effectués: {data.get('training_count', 0)}")
|
|
|
|
print("\nPour voir des suggestions:")
|
|
print(" 1. Répète la même action 3+ fois (ex: cliquer sur le même bouton)")
|
|
print(" 2. Attends la détection du pattern")
|
|
print(" 3. Répète cette séquence 20 fois pour l'apprendre")
|
|
print(" 4. La prochaine fois, une suggestion apparaîtra")
|
|
print("")
|