Files
Geniusia_v2/test_learning_manager_simple.py
2026-03-05 00:20:25 +01:00

207 lines
7.3 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
#!/usr/bin/env python3
"""
Test simple du gestionnaire d'apprentissage sans pytest
"""
import sys
import tempfile
import shutil
from pathlib import Path
from datetime import datetime
import numpy as np
# Ajouter le répertoire au path
sys.path.insert(0, str(Path(__file__).parent))
from geniusia2.core.learning_manager import LearningManager
from geniusia2.core.models import Action
from geniusia2.core.embeddings_manager import EmbeddingsManager
from geniusia2.core.logger import Logger
def test_learning_manager():
"""Test basique du gestionnaire d'apprentissage."""
print("=" * 60)
print("Test du Gestionnaire d'Apprentissage")
print("=" * 60)
# Créer des répertoires temporaires
temp_dir = tempfile.mkdtemp()
profiles_dir = Path(temp_dir) / "profiles"
logs_dir = Path(temp_dir) / "logs"
index_dir = Path(temp_dir) / "index"
keys_dir = Path(temp_dir) / "keys"
profiles_dir.mkdir(parents=True, exist_ok=True)
logs_dir.mkdir(parents=True, exist_ok=True)
index_dir.mkdir(parents=True, exist_ok=True)
keys_dir.mkdir(parents=True, exist_ok=True)
try:
# Créer les composants
print("\n1. Initialisation des composants...")
embeddings_manager = EmbeddingsManager(
model_name="ViT-B-32",
index_path=str(index_dir),
device="cpu"
)
print(" ✓ EmbeddingsManager créé")
logger = Logger(
log_dir=str(logs_dir),
key_path=str(keys_dir)
)
print(" ✓ Logger créé")
config = {
"thresholds": {
"autopilot_observations": 20,
"autopilot_concordance": 0.95,
"confidence_min": 0.90,
"rollback_confidence": 0.85
}
}
learning_manager = LearningManager(
embeddings_manager=embeddings_manager,
logger=logger,
config=config,
profiles_path=str(profiles_dir)
)
print(" ✓ LearningManager créé")
print(f" Mode initial: {learning_manager.mode}")
# Test 2: Observer une action
print("\n2. Test observation d'action...")
action = Action(
action_type="click",
target_element="valider_button",
bbox=(100, 100, 50, 30),
confidence=0.9,
embedding=np.random.rand(512).astype(np.float32),
timestamp=datetime.now(),
window_title="Test Window"
)
learning_manager.observe(action)
print(f" ✓ Action observée")
print(f" Nombre de tâches: {len(learning_manager.tasks)}")
task_id = list(learning_manager.tasks.keys())[0]
task = learning_manager.tasks[task_id]
print(f" Task ID: {task_id}")
print(f" Observations: {task.observation_count}")
print(f" Mode: {task.mode}")
# Test 3: Transition Shadow → Assist
print("\n3. Test transition Shadow → Assist...")
for i in range(4):
learning_manager.observe(action)
print(f" ✓ 5 observations effectuées")
print(f" Mode actuel: {task.mode}")
assert task.mode == "assist", "Devrait être en mode Assist"
print(" ✓ Transition réussie vers mode Assist")
# Test 4: Calcul de confiance
print("\n4. Test calcul de confiance...")
confidence = learning_manager.calculate_confidence(
vision_conf=0.9,
llm_score=0.8,
task_id=task_id
)
print(f" ✓ Confiance calculée: {confidence:.3f}")
expected = 0.6 * 0.9 + 0.3 * 0.8 + 0.1 * 0.0
print(f" Attendu: {expected:.3f}")
assert abs(confidence - expected) < 0.01, "Calcul de confiance incorrect"
print(" ✓ Formule correcte (0.6×vision + 0.3×llm + 0.1×historique)")
# Test 5: Confirmation d'action
print("\n5. Test confirmation d'action...")
learning_manager.confirm_action({
"type": "accept",
"task_id": task_id
})
print(f" ✓ Action acceptée")
print(f" Taux de concordance: {task.concordance_rate:.2%}")
assert task.concordance_rate == 1.0, "Concordance devrait être 100%"
# Test 6: Évaluation de tâche
print("\n6. Test évaluation de tâche...")
metrics = learning_manager.evaluate_task(task_id)
print(f" ✓ Métriques obtenues:")
print(f" - Observations: {metrics['observation_count']}")
print(f" - Concordance: {metrics['concordance_rate']:.2%}")
print(f" - Corrections: {metrics['correction_count']}")
print(f" - Mode: {metrics['mode']}")
# Test 7: Statistiques globales
print("\n7. Test statistiques globales...")
stats = learning_manager.get_task_stats()
print(f" ✓ Statistiques:")
print(f" - Total tâches: {stats['total_tasks']}")
print(f" - Shadow: {stats['shadow_tasks']}")
print(f" - Assist: {stats['assist_tasks']}")
print(f" - Auto: {stats['auto_tasks']}")
# Test 8: Vérification critères Autopilot
print("\n8. Test critères Autopilot...")
# Observer 15 fois de plus (total 20)
for i in range(15):
learning_manager.observe(action)
learning_manager.confirm_action({
"type": "accept",
"task_id": task_id
})
print(f" ✓ 20 observations avec 100% concordance")
should_auto = learning_manager.should_transition_to_auto(task_id)
print(f" Éligible pour Autopilot: {should_auto}")
if should_auto:
print(" ✓ Critères Autopilot remplis (≥20 obs, ≥95% concordance)")
# Test 9: Sauvegarde et chargement
print("\n9. Test sauvegarde et chargement...")
learning_manager._save_profile(task_id)
print(f" ✓ Profil sauvegardé")
# Créer un nouveau gestionnaire pour charger
learning_manager2 = LearningManager(
embeddings_manager=embeddings_manager,
logger=logger,
config=config,
profiles_path=str(profiles_dir)
)
print(f" ✓ Nouveau gestionnaire créé")
print(f" Tâches chargées: {len(learning_manager2.tasks)}")
if task_id in learning_manager2.tasks:
loaded_task = learning_manager2.tasks[task_id]
print(f" ✓ Tâche rechargée:")
print(f" - Observations: {loaded_task.observation_count}")
print(f" - Mode: {loaded_task.mode}")
print(f" - Concordance: {loaded_task.concordance_rate:.2%}")
print("\n" + "=" * 60)
print("✓ TOUS LES TESTS RÉUSSIS!")
print("=" * 60)
return True
except Exception as e:
print(f"\n✗ ERREUR: {e}")
import traceback
traceback.print_exc()
return False
finally:
# Nettoyer
shutil.rmtree(temp_dir)
print("\n✓ Fichiers temporaires nettoyés")
if __name__ == "__main__":
success = test_learning_manager()
sys.exit(0 if success else 1)