v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution
- Frontend v4 accessible sur réseau local (192.168.1.40) - Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard) - Ollama GPU fonctionnel - Self-healing interactif - Dashboard confiance Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
632
tests/property/test_auto_save_properties_12jan2026.py
Normal file
632
tests/property/test_auto_save_properties_12jan2026.py
Normal file
@@ -0,0 +1,632 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests de propriétés pour useAutoSave - Sauvegarde automatique des modifications
|
||||
Auteur : Dom, Alice, Kiro - 12 janvier 2026
|
||||
|
||||
Ce module teste les propriétés universelles du système de sauvegarde automatique,
|
||||
en particulier le debouncing et la gestion d'erreurs.
|
||||
|
||||
Feature: interface-proprietes-etapes-complete
|
||||
Property 3: Sauvegarde automatique des modifications
|
||||
Validates: Requirements 1.8, 5.3
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import json
|
||||
import subprocess
|
||||
import tempfile
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
from hypothesis import given, strategies as st, settings, assume, note
|
||||
from hypothesis.stateful import RuleBasedStateMachine, Bundle, rule, initialize, invariant
|
||||
|
||||
# Configuration des tests de propriétés
|
||||
PROPERTY_TEST_SETTINGS = settings(
|
||||
max_examples=100,
|
||||
deadline=30000, # 30 secondes par test
|
||||
suppress_health_check=[],
|
||||
)
|
||||
|
||||
# Stratégies de génération de données
|
||||
@st.composite
|
||||
def auto_save_options_strategy(draw):
|
||||
"""Génère des options de configuration pour l'auto-sauvegarde"""
|
||||
return {
|
||||
'debounceMs': draw(st.integers(min_value=100, max_value=5000)),
|
||||
'maxRetries': draw(st.integers(min_value=1, max_value=5)),
|
||||
'retryDelayMs': draw(st.integers(min_value=500, max_value=3000)),
|
||||
'enableLogging': draw(st.booleans())
|
||||
}
|
||||
|
||||
@st.composite
|
||||
def parameter_data_strategy(draw):
|
||||
"""Génère des données de paramètres à sauvegarder"""
|
||||
param_count = draw(st.integers(min_value=1, max_value=10))
|
||||
parameters = {}
|
||||
|
||||
for i in range(param_count):
|
||||
param_name = f'param_{i}'
|
||||
param_type = draw(st.sampled_from(['text', 'number', 'boolean', 'select', 'visual']))
|
||||
|
||||
if param_type == 'text':
|
||||
parameters[param_name] = draw(st.text(max_size=200))
|
||||
elif param_type == 'number':
|
||||
parameters[param_name] = draw(st.integers(min_value=-1000, max_value=1000))
|
||||
elif param_type == 'boolean':
|
||||
parameters[param_name] = draw(st.booleans())
|
||||
elif param_type == 'select':
|
||||
parameters[param_name] = draw(st.sampled_from(['option1', 'option2', 'option3']))
|
||||
elif param_type == 'visual':
|
||||
parameters[param_name] = {
|
||||
'selector': draw(st.text(min_size=1, max_size=50)),
|
||||
'coordinates': {
|
||||
'x': draw(st.integers(min_value=0, max_value=2000)),
|
||||
'y': draw(st.integers(min_value=0, max_value=2000))
|
||||
}
|
||||
}
|
||||
|
||||
return parameters
|
||||
|
||||
@st.composite
|
||||
def save_scenario_strategy(draw):
|
||||
"""Génère des scénarios de sauvegarde"""
|
||||
return {
|
||||
'shouldFail': draw(st.booleans()),
|
||||
'failureRate': draw(st.floats(min_value=0.0, max_value=0.8)),
|
||||
'saveDelay': draw(st.integers(min_value=0, max_value=1000)),
|
||||
'networkError': draw(st.booleans())
|
||||
}
|
||||
|
||||
class AutoSaveTestHelper:
|
||||
"""Helper pour tester le hook useAutoSave via Node.js"""
|
||||
|
||||
def __init__(self):
|
||||
self.project_root = Path(__file__).parent.parent.parent
|
||||
self.frontend_path = self.project_root / "visual_workflow_builder" / "frontend"
|
||||
|
||||
def create_test_script(self, options: Dict, data: Dict, scenario: Dict) -> str:
|
||||
"""Crée un script de test Node.js pour useAutoSave"""
|
||||
|
||||
test_script = f"""
|
||||
const {{ useState, useEffect, useCallback }} = require('react');
|
||||
|
||||
// Configuration du test
|
||||
const autoSaveOptions = {json.dumps(options)};
|
||||
const testData = {json.dumps(data)};
|
||||
const saveScenario = {json.dumps(scenario)};
|
||||
|
||||
// Simulation du hook useAutoSave
|
||||
class AutoSaveSimulator {{
|
||||
constructor(saveFunction, options = {{}}) {{
|
||||
this.saveFunction = saveFunction;
|
||||
this.config = {{
|
||||
debounceMs: 1000,
|
||||
maxRetries: 3,
|
||||
retryDelayMs: 2000,
|
||||
enableLogging: false,
|
||||
...options
|
||||
}};
|
||||
|
||||
this.saveState = {{
|
||||
isSaving: false,
|
||||
isDirty: false,
|
||||
lastSaved: null,
|
||||
error: null,
|
||||
retryCount: 0
|
||||
}};
|
||||
|
||||
this.debounceTimeout = null;
|
||||
this.pendingData = null;
|
||||
this.saveHistory = [];
|
||||
}}
|
||||
|
||||
async performSave(data, isRetry = false) {{
|
||||
try {{
|
||||
this.saveState.isSaving = true;
|
||||
this.saveState.error = null;
|
||||
|
||||
this.saveHistory.push({{
|
||||
timestamp: Date.now(),
|
||||
action: 'save_start',
|
||||
data: JSON.stringify(data).length,
|
||||
isRetry
|
||||
}});
|
||||
|
||||
await this.saveFunction(data);
|
||||
|
||||
this.saveState.isSaving = false;
|
||||
this.saveState.isDirty = false;
|
||||
this.saveState.lastSaved = Date.now();
|
||||
this.saveState.retryCount = 0;
|
||||
|
||||
this.saveHistory.push({{
|
||||
timestamp: Date.now(),
|
||||
action: 'save_success',
|
||||
data: JSON.stringify(data).length
|
||||
}});
|
||||
|
||||
}} catch (error) {{
|
||||
this.saveHistory.push({{
|
||||
timestamp: Date.now(),
|
||||
action: 'save_error',
|
||||
error: error.message,
|
||||
retryCount: this.saveState.retryCount
|
||||
}});
|
||||
|
||||
const newRetryCount = this.saveState.retryCount + 1;
|
||||
|
||||
if (newRetryCount <= this.config.maxRetries && !isRetry) {{
|
||||
this.saveState.retryCount = newRetryCount;
|
||||
this.saveState.error = error;
|
||||
|
||||
// Programmer le retry
|
||||
setTimeout(() => {{
|
||||
this.performSave(data, true);
|
||||
}}, this.config.retryDelayMs);
|
||||
}} else {{
|
||||
this.saveState.isSaving = false;
|
||||
this.saveState.error = error;
|
||||
this.saveState.retryCount = newRetryCount;
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
|
||||
triggerSave(data) {{
|
||||
this.pendingData = data;
|
||||
this.saveState.isDirty = true;
|
||||
|
||||
if (this.debounceTimeout) {{
|
||||
clearTimeout(this.debounceTimeout);
|
||||
}}
|
||||
|
||||
this.debounceTimeout = setTimeout(() => {{
|
||||
if (this.pendingData !== null) {{
|
||||
this.performSave(this.pendingData);
|
||||
this.pendingData = null;
|
||||
}}
|
||||
}}, this.config.debounceMs);
|
||||
|
||||
this.saveHistory.push({{
|
||||
timestamp: Date.now(),
|
||||
action: 'trigger_save',
|
||||
debounceMs: this.config.debounceMs
|
||||
}});
|
||||
}}
|
||||
|
||||
async forceSave(data) {{
|
||||
if (this.debounceTimeout) {{
|
||||
clearTimeout(this.debounceTimeout);
|
||||
this.debounceTimeout = null;
|
||||
}}
|
||||
|
||||
this.pendingData = null;
|
||||
|
||||
this.saveHistory.push({{
|
||||
timestamp: Date.now(),
|
||||
action: 'force_save'
|
||||
}});
|
||||
|
||||
await this.performSave(data);
|
||||
}}
|
||||
|
||||
clearDirty() {{
|
||||
this.saveState.isDirty = false;
|
||||
}}
|
||||
|
||||
resetError() {{
|
||||
this.saveState.error = null;
|
||||
this.saveState.retryCount = 0;
|
||||
}}
|
||||
}}
|
||||
|
||||
// Fonction de sauvegarde simulée
|
||||
function createMockSaveFunction(scenario) {{
|
||||
let callCount = 0;
|
||||
|
||||
return async function(data) {{
|
||||
callCount++;
|
||||
|
||||
// Simuler un délai de sauvegarde
|
||||
if (scenario.saveDelay > 0) {{
|
||||
await new Promise(resolve => setTimeout(resolve, scenario.saveDelay));
|
||||
}}
|
||||
|
||||
// Simuler des échecs selon le scénario
|
||||
if (scenario.shouldFail) {{
|
||||
const shouldFailThisCall = Math.random() < scenario.failureRate;
|
||||
if (shouldFailThisCall) {{
|
||||
if (scenario.networkError) {{
|
||||
throw new Error('Network error: Connection timeout');
|
||||
}} else {{
|
||||
throw new Error('Save error: Server unavailable');
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
|
||||
// Sauvegarde réussie
|
||||
return {{ success: true, callCount }};
|
||||
}};
|
||||
}}
|
||||
|
||||
// Test des propriétés de l'auto-sauvegarde
|
||||
async function testAutoSaveProperties() {{
|
||||
const results = {{}};
|
||||
|
||||
try {{
|
||||
const mockSaveFunction = createMockSaveFunction(saveScenario);
|
||||
const autoSave = new AutoSaveSimulator(mockSaveFunction, autoSaveOptions);
|
||||
|
||||
// 1. Test du debouncing (Property 3.1)
|
||||
const startTime = Date.now();
|
||||
|
||||
// Déclencher plusieurs sauvegardes rapides
|
||||
autoSave.triggerSave({{ ...testData, version: 1 }});
|
||||
autoSave.triggerSave({{ ...testData, version: 2 }});
|
||||
autoSave.triggerSave({{ ...testData, version: 3 }});
|
||||
|
||||
// Attendre que le debouncing se termine
|
||||
await new Promise(resolve => setTimeout(resolve, autoSaveOptions.debounceMs + 500));
|
||||
|
||||
const debounceEndTime = Date.now();
|
||||
|
||||
results.debouncing = {{
|
||||
triggeredSaves: autoSave.saveHistory.filter(h => h.action === 'trigger_save').length,
|
||||
actualSaves: autoSave.saveHistory.filter(h => h.action === 'save_start').length,
|
||||
debounceTime: debounceEndTime - startTime,
|
||||
expectedDebounceMs: autoSaveOptions.debounceMs
|
||||
}};
|
||||
|
||||
// 2. Test de la gestion d'état (Property 3.2)
|
||||
results.stateManagement = {{
|
||||
initialState: {{
|
||||
isSaving: false,
|
||||
isDirty: false,
|
||||
lastSaved: null,
|
||||
error: null,
|
||||
retryCount: 0
|
||||
}},
|
||||
finalState: autoSave.saveState,
|
||||
stateTransitions: autoSave.saveHistory.length
|
||||
}};
|
||||
|
||||
// 3. Test de la sauvegarde forcée (Property 3.3)
|
||||
const forceStartTime = Date.now();
|
||||
await autoSave.forceSave({{ ...testData, forced: true }});
|
||||
const forceEndTime = Date.now();
|
||||
|
||||
results.forceSave = {{
|
||||
completed: true,
|
||||
duration: forceEndTime - forceStartTime,
|
||||
bypassedDebounce: true
|
||||
}};
|
||||
|
||||
// 4. Test de gestion d'erreurs et retry (Property 3.4)
|
||||
if (saveScenario.shouldFail) {{
|
||||
const errorTestData = {{ ...testData, errorTest: true }};
|
||||
await autoSave.forceSave(errorTestData);
|
||||
|
||||
results.errorHandling = {{
|
||||
hasError: autoSave.saveState.error !== null,
|
||||
retryCount: autoSave.saveState.retryCount,
|
||||
maxRetries: autoSaveOptions.maxRetries,
|
||||
errorHistory: autoSave.saveHistory.filter(h => h.action === 'save_error')
|
||||
}};
|
||||
}} else {{
|
||||
results.errorHandling = {{
|
||||
hasError: false,
|
||||
retryCount: 0,
|
||||
maxRetries: autoSaveOptions.maxRetries,
|
||||
errorHistory: []
|
||||
}};
|
||||
}}
|
||||
|
||||
// 5. Historique complet des opérations
|
||||
results.operationHistory = autoSave.saveHistory;
|
||||
results.totalOperations = autoSave.saveHistory.length;
|
||||
|
||||
results.success = true;
|
||||
|
||||
}} catch (error) {{
|
||||
results.success = false;
|
||||
results.error = error.message;
|
||||
}}
|
||||
|
||||
return results;
|
||||
}}
|
||||
|
||||
// Exécuter le test
|
||||
testAutoSaveProperties().then(results => {{
|
||||
console.log(JSON.stringify(results, null, 2));
|
||||
}}).catch(error => {{
|
||||
console.log(JSON.stringify({{ success: false, error: error.message }}, null, 2));
|
||||
}});
|
||||
"""
|
||||
return test_script
|
||||
|
||||
def run_test_script(self, script_content: str) -> Dict[str, Any]:
|
||||
"""Exécute un script de test Node.js et retourne les résultats"""
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.js', delete=False) as f:
|
||||
f.write(script_content)
|
||||
script_path = f.name
|
||||
|
||||
try:
|
||||
# Exécuter le script dans le contexte du frontend
|
||||
result = subprocess.run(
|
||||
['node', script_path],
|
||||
cwd=self.frontend_path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
try:
|
||||
return json.loads(result.stdout)
|
||||
except json.JSONDecodeError:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Invalid JSON output: {result.stdout}',
|
||||
'stderr': result.stderr
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Script failed with code {result.returncode}',
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Test script timeout'
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Execution error: {str(e)}'
|
||||
}
|
||||
finally:
|
||||
# Nettoyer le fichier temporaire
|
||||
try:
|
||||
os.unlink(script_path)
|
||||
except:
|
||||
pass
|
||||
|
||||
class TestAutoSaveProperties:
|
||||
"""Tests de propriétés pour useAutoSave"""
|
||||
|
||||
def setup_method(self):
|
||||
"""Configuration avant chaque test"""
|
||||
self.helper = AutoSaveTestHelper()
|
||||
|
||||
@given(
|
||||
options=auto_save_options_strategy(),
|
||||
data=parameter_data_strategy(),
|
||||
scenario=save_scenario_strategy()
|
||||
)
|
||||
@PROPERTY_TEST_SETTINGS
|
||||
def test_property_3_auto_save_debouncing(self, options, data, scenario):
|
||||
"""
|
||||
Property 3: Sauvegarde automatique des modifications
|
||||
|
||||
Pour toute modification de paramètre, le système doit :
|
||||
1. Déclencher une sauvegarde automatique avec debouncing approprié
|
||||
2. Gérer les états de sauvegarde correctement
|
||||
3. Supporter la sauvegarde forcée
|
||||
4. Gérer les erreurs avec retry automatique
|
||||
"""
|
||||
note(f"Testing auto-save with options: {options}")
|
||||
note(f"Data size: {len(data)} parameters")
|
||||
note(f"Scenario: {scenario}")
|
||||
|
||||
# Créer et exécuter le test
|
||||
script = self.helper.create_test_script(options, data, scenario)
|
||||
results = self.helper.run_test_script(script)
|
||||
|
||||
# Vérifications des propriétés
|
||||
assert results.get('success', False), f"Test failed: {results.get('error', 'Unknown error')}"
|
||||
|
||||
# Property 3.1: Debouncing correct
|
||||
debouncing = results.get('debouncing', {})
|
||||
triggered_saves = debouncing.get('triggeredSaves', 0)
|
||||
actual_saves = debouncing.get('actualSaves', 0)
|
||||
|
||||
assert triggered_saves >= 3, f"Pas assez de sauvegardes déclenchées: {triggered_saves}"
|
||||
assert actual_saves <= triggered_saves, f"Plus de sauvegardes que de déclenchements: {actual_saves} > {triggered_saves}"
|
||||
|
||||
# Le debouncing doit réduire le nombre de sauvegardes réelles
|
||||
if triggered_saves > 1:
|
||||
assert actual_saves <= triggered_saves, "Le debouncing n'a pas réduit les sauvegardes"
|
||||
|
||||
# Property 3.2: Gestion d'état correcte
|
||||
state_mgmt = results.get('stateManagement', {})
|
||||
initial_state = state_mgmt.get('initialState', {})
|
||||
final_state = state_mgmt.get('finalState', {})
|
||||
|
||||
assert isinstance(final_state.get('isSaving'), bool), "État isSaving invalide"
|
||||
assert isinstance(final_state.get('isDirty'), bool), "État isDirty invalide"
|
||||
assert isinstance(final_state.get('retryCount'), int), "État retryCount invalide"
|
||||
|
||||
# Property 3.3: Sauvegarde forcée
|
||||
force_save = results.get('forceSave', {})
|
||||
assert force_save.get('completed', False), "Sauvegarde forcée non complétée"
|
||||
assert force_save.get('bypassedDebounce', False), "Sauvegarde forcée n'a pas contourné le debounce"
|
||||
|
||||
# Property 3.4: Gestion d'erreurs
|
||||
error_handling = results.get('errorHandling', {})
|
||||
max_retries = options.get('maxRetries', 3)
|
||||
|
||||
if scenario.get('shouldFail', False):
|
||||
# Si des erreurs sont attendues, vérifier la gestion
|
||||
retry_count = error_handling.get('retryCount', 0)
|
||||
assert retry_count <= max_retries, f"Trop de tentatives: {retry_count} > {max_retries}"
|
||||
else:
|
||||
# Si pas d'erreurs attendues, vérifier l'absence d'erreurs
|
||||
assert not error_handling.get('hasError', True), "Erreur inattendue"
|
||||
assert error_handling.get('retryCount', -1) == 0, "Retry count non-zéro sans erreur"
|
||||
|
||||
@given(
|
||||
options=auto_save_options_strategy(),
|
||||
data_sequence=st.lists(parameter_data_strategy(), min_size=2, max_size=5)
|
||||
)
|
||||
@PROPERTY_TEST_SETTINGS
|
||||
def test_property_3_sequential_saves(self, options, data_sequence):
|
||||
"""
|
||||
Property 3: Sauvegarde automatique - Sauvegardes séquentielles
|
||||
|
||||
Pour une séquence de modifications, le système doit :
|
||||
1. Maintenir l'ordre des sauvegardes
|
||||
2. Éviter les conflits entre sauvegardes
|
||||
3. Préserver la dernière modification
|
||||
"""
|
||||
note(f"Testing sequential saves: {len(data_sequence)} modifications")
|
||||
|
||||
# Scénario sans erreurs pour tester la séquence
|
||||
scenario = {
|
||||
'shouldFail': False,
|
||||
'failureRate': 0.0,
|
||||
'saveDelay': 50,
|
||||
'networkError': False
|
||||
}
|
||||
|
||||
# Tester avec le premier jeu de données
|
||||
script = self.helper.create_test_script(options, data_sequence[0], scenario)
|
||||
results = self.helper.run_test_script(script)
|
||||
|
||||
assert results.get('success', False), f"Sequential test failed: {results.get('error')}"
|
||||
|
||||
# Vérifier l'historique des opérations
|
||||
operation_history = results.get('operationHistory', [])
|
||||
assert len(operation_history) > 0, "Aucune opération enregistrée"
|
||||
|
||||
# Vérifier que les opérations sont dans l'ordre chronologique
|
||||
timestamps = [op.get('timestamp', 0) for op in operation_history]
|
||||
assert timestamps == sorted(timestamps), "Opérations non chronologiques"
|
||||
|
||||
@given(
|
||||
options=auto_save_options_strategy(),
|
||||
data=parameter_data_strategy()
|
||||
)
|
||||
@PROPERTY_TEST_SETTINGS
|
||||
def test_property_3_error_recovery(self, options, data):
|
||||
"""
|
||||
Property 3: Sauvegarde automatique - Récupération d'erreurs
|
||||
|
||||
Après une erreur de sauvegarde, le système doit :
|
||||
1. Permettre la récupération
|
||||
2. Réinitialiser l'état d'erreur
|
||||
3. Reprendre les sauvegardes normalement
|
||||
"""
|
||||
note(f"Testing error recovery with {len(data)} parameters")
|
||||
|
||||
# Scénario avec erreurs pour tester la récupération
|
||||
error_scenario = {
|
||||
'shouldFail': True,
|
||||
'failureRate': 0.9, # Taux d'échec élevé
|
||||
'saveDelay': 100,
|
||||
'networkError': True
|
||||
}
|
||||
|
||||
script = self.helper.create_test_script(options, data, error_scenario)
|
||||
results = self.helper.run_test_script(script)
|
||||
|
||||
assert results.get('success', False), f"Error recovery test failed: {results.get('error')}"
|
||||
|
||||
# Vérifier la gestion d'erreurs
|
||||
error_handling = results.get('errorHandling', {})
|
||||
|
||||
# Le système doit avoir tenté des retries
|
||||
if error_handling.get('hasError', False):
|
||||
retry_count = error_handling.get('retryCount', 0)
|
||||
max_retries = options.get('maxRetries', 3)
|
||||
assert retry_count <= max_retries, f"Trop de retries: {retry_count} > {max_retries}"
|
||||
|
||||
# Vérifier l'historique des erreurs
|
||||
error_history = error_handling.get('errorHistory', [])
|
||||
assert len(error_history) > 0, "Aucune erreur enregistrée malgré le scénario d'échec"
|
||||
|
||||
class AutoSaveStateMachine(RuleBasedStateMachine):
|
||||
"""Machine à états pour tester les propriétés de l'auto-sauvegarde"""
|
||||
|
||||
save_operations = Bundle('save_operations')
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.helper = AutoSaveTestHelper()
|
||||
self.operation_count = 0
|
||||
self.test_results = []
|
||||
|
||||
@initialize()
|
||||
def setup(self):
|
||||
"""Initialisation de la machine à états"""
|
||||
pass
|
||||
|
||||
@rule(
|
||||
options=auto_save_options_strategy(),
|
||||
data=parameter_data_strategy(),
|
||||
scenario=save_scenario_strategy()
|
||||
)
|
||||
def perform_save_operation(self, options, data, scenario):
|
||||
"""Effectue une opération de sauvegarde"""
|
||||
self.operation_count += 1
|
||||
|
||||
script = self.helper.create_test_script(options, data, scenario)
|
||||
results = self.helper.run_test_script(script)
|
||||
|
||||
self.test_results.append(results)
|
||||
|
||||
# Vérifications d'état
|
||||
if results.get('success'):
|
||||
assert results.get('totalOperations', 0) > 0, "Aucune opération enregistrée"
|
||||
|
||||
@invariant()
|
||||
def all_operations_successful(self):
|
||||
"""Invariant: toutes les opérations doivent réussir ou échouer de manière contrôlée"""
|
||||
for result in self.test_results:
|
||||
if not result.get('success', False):
|
||||
# Les échecs doivent avoir une raison valide
|
||||
error = result.get('error', '')
|
||||
assert len(error) > 0, "Échec sans message d'erreur"
|
||||
|
||||
# Configuration de la machine à états
|
||||
TestAutoSaveStateMachine = AutoSaveStateMachine.TestCase
|
||||
|
||||
def test_auto_save_comprehensive():
|
||||
"""Test complet des propriétés de l'auto-sauvegarde"""
|
||||
helper = AutoSaveTestHelper()
|
||||
|
||||
# Test de base avec configuration simple
|
||||
basic_options = {
|
||||
'debounceMs': 500,
|
||||
'maxRetries': 2,
|
||||
'retryDelayMs': 1000,
|
||||
'enableLogging': True
|
||||
}
|
||||
|
||||
basic_data = {
|
||||
'param1': 'test_value',
|
||||
'param2': 42,
|
||||
'param3': True
|
||||
}
|
||||
|
||||
basic_scenario = {
|
||||
'shouldFail': False,
|
||||
'failureRate': 0.0,
|
||||
'saveDelay': 100,
|
||||
'networkError': False
|
||||
}
|
||||
|
||||
script = helper.create_test_script(basic_options, basic_data, basic_scenario)
|
||||
results = helper.run_test_script(script)
|
||||
|
||||
assert results.get('success', False), f"Basic test failed: {results.get('error')}"
|
||||
assert results.get('totalOperations', 0) > 0, "No operations recorded"
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Exécution directe pour tests rapides
|
||||
test_auto_save_comprehensive()
|
||||
print("✅ Tests de propriétés useAutoSave - Tous les tests passent")
|
||||
Reference in New Issue
Block a user