v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
151
examples/test_phase7_simple.py
Normal file
151
examples/test_phase7_simple.py
Normal file
@@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test Phase 7 - Learning System (Simplified)"""
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def test_imports():
|
||||
logger.info("\n=== Testing Imports ===")
|
||||
try:
|
||||
from core.learning.learning_manager import LearningManager, WorkflowStats
|
||||
logger.info("✓ LearningManager imported")
|
||||
from core.learning.feedback_processor import FeedbackProcessor, FeedbackType
|
||||
logger.info("✓ FeedbackProcessor imported")
|
||||
from core.models.workflow_graph import LearningState
|
||||
logger.info("✓ LearningState imported")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"✗ Import failed: {e}")
|
||||
return False
|
||||
|
||||
def test_learning_manager_basic():
|
||||
logger.info("\n=== Testing LearningManager Basic ===")
|
||||
try:
|
||||
from core.learning.learning_manager import LearningManager, WorkflowStats
|
||||
from core.models.workflow_graph import LearningState
|
||||
|
||||
manager = LearningManager()
|
||||
logger.info("✓ LearningManager created")
|
||||
|
||||
# Create stats directly
|
||||
stats = WorkflowStats(
|
||||
workflow_id="test_wf_1",
|
||||
learning_state=LearningState.OBSERVATION
|
||||
)
|
||||
manager.workflows["test_wf_1"] = stats
|
||||
logger.info("✓ Workflow stats created")
|
||||
|
||||
# Test observations
|
||||
for i in range(5):
|
||||
manager.record_observation("test_wf_1")
|
||||
stats.confidence_scores.append(0.92)
|
||||
|
||||
state = manager.get_workflow_state("test_wf_1")
|
||||
logger.info(f" After 5 observations: {state.value}")
|
||||
|
||||
# Test executions
|
||||
for i in range(10):
|
||||
manager.record_execution("test_wf_1", success=True, confidence=0.93)
|
||||
|
||||
state = manager.get_workflow_state("test_wf_1")
|
||||
logger.info(f" After 10 executions: {state.value}")
|
||||
|
||||
stats_result = manager.get_workflow_stats("test_wf_1")
|
||||
logger.info(f" Stats: success_rate={stats_result.success_rate:.2f}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"✗ Test failed: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def test_feedback_processor():
|
||||
logger.info("\n=== Testing FeedbackProcessor ===")
|
||||
try:
|
||||
from core.learning.feedback_processor import FeedbackProcessor, FeedbackType
|
||||
|
||||
processor = FeedbackProcessor()
|
||||
logger.info("✓ FeedbackProcessor created")
|
||||
|
||||
result = processor.process_feedback(
|
||||
workflow_id="test_wf_1",
|
||||
execution_id="exec_1",
|
||||
feedback_type=FeedbackType.CORRECT,
|
||||
confidence=0.95
|
||||
)
|
||||
logger.info(f"✓ Feedback processed: {len(result['suggestions'])} suggestions")
|
||||
|
||||
stats = processor.get_feedback_stats("test_wf_1")
|
||||
logger.info(f"✓ Stats: {stats['total']} total, accuracy={stats['accuracy']:.2f}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"✗ Test failed: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def test_state_transitions():
|
||||
logger.info("\n=== Testing State Transitions ===")
|
||||
try:
|
||||
from core.learning.learning_manager import LearningManager, WorkflowStats
|
||||
from core.models.workflow_graph import LearningState
|
||||
|
||||
manager = LearningManager()
|
||||
|
||||
# Test OBSERVATION → COACHING
|
||||
stats = WorkflowStats(workflow_id="wf_trans", learning_state=LearningState.OBSERVATION)
|
||||
manager.workflows["wf_trans"] = stats
|
||||
|
||||
logger.info(f" Initial: {stats.learning_state.value}")
|
||||
|
||||
# Trigger transition
|
||||
for i in range(5):
|
||||
stats.observation_count += 1
|
||||
stats.confidence_scores.append(0.92)
|
||||
manager._check_state_transition("wf_trans")
|
||||
|
||||
logger.info(f" After 5 obs: {stats.learning_state.value}")
|
||||
|
||||
# COACHING → AUTO_CANDIDATE
|
||||
for i in range(10):
|
||||
stats.execution_count += 1
|
||||
stats.success_count += 1
|
||||
manager._check_state_transition("wf_trans")
|
||||
|
||||
logger.info(f" After 10 exec: {stats.learning_state.value}")
|
||||
|
||||
logger.info("✓ State transitions working")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"✗ Test failed: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def main():
|
||||
logger.info("=" * 60)
|
||||
logger.info("Phase 7 - Learning System Tests")
|
||||
logger.info("=" * 60)
|
||||
|
||||
tests = [
|
||||
test_imports,
|
||||
test_learning_manager_basic,
|
||||
test_feedback_processor,
|
||||
test_state_transitions
|
||||
]
|
||||
|
||||
results = []
|
||||
for test in tests:
|
||||
try:
|
||||
result = test()
|
||||
results.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Test crashed: {e}", exc_info=True)
|
||||
results.append(False)
|
||||
|
||||
passed = sum(results)
|
||||
logger.info(f"\n{'='*60}\nResults: {passed}/{len(results)} tests passed\n{'='*60}")
|
||||
return 0 if passed == len(results) else 1
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user