382 lines
12 KiB
Python
Executable File
382 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Diagnostic complet du système GeniusIA v2.
|
|
Vérifie toutes les dépendances et composants critiques.
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
def check_python_version():
|
|
"""Vérifie la version de Python."""
|
|
print("\n1. Version Python")
|
|
print("=" * 60)
|
|
version = sys.version_info
|
|
print(f" Python {version.major}.{version.minor}.{version.micro}")
|
|
|
|
if version.major == 3 and version.minor >= 8:
|
|
print(" ✓ Version compatible (>= 3.8)")
|
|
return True
|
|
else:
|
|
print(" ✗ Version incompatible (nécessite >= 3.8)")
|
|
return False
|
|
|
|
|
|
def check_virtual_env():
|
|
"""Vérifie si un environnement virtuel existe."""
|
|
print("\n2. Environnement virtuel")
|
|
print("=" * 60)
|
|
|
|
venv_path = Path("geniusia2/venv")
|
|
|
|
if venv_path.exists():
|
|
print(f" ✓ Environnement virtuel trouvé: {venv_path}")
|
|
|
|
# Vérifier si activé
|
|
if hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix):
|
|
print(" ✓ Environnement virtuel ACTIVÉ")
|
|
return True
|
|
else:
|
|
print(" ⚠ Environnement virtuel NON ACTIVÉ")
|
|
print(" Exécute: source geniusia2/venv/bin/activate")
|
|
return False
|
|
else:
|
|
print(f" ✗ Environnement virtuel non trouvé")
|
|
print(" Exécute: python3 -m venv geniusia2/venv")
|
|
return False
|
|
|
|
|
|
def check_dependencies():
|
|
"""Vérifie les dépendances Python critiques."""
|
|
print("\n3. Dépendances Python")
|
|
print("=" * 60)
|
|
|
|
dependencies = {
|
|
'torch': 'PyTorch (deep learning)',
|
|
'faiss': 'FAISS (recherche de similarité)',
|
|
'open_clip': 'OpenCLIP (embeddings visuels)',
|
|
'PIL': 'Pillow (traitement d\'images)',
|
|
'numpy': 'NumPy (calcul numérique)',
|
|
'pynput': 'PyInput (capture événements)',
|
|
'transformers': 'Transformers (modèles Qwen)',
|
|
}
|
|
|
|
results = {}
|
|
|
|
for module, description in dependencies.items():
|
|
try:
|
|
if module == 'PIL':
|
|
import PIL
|
|
version = PIL.__version__ if hasattr(PIL, '__version__') else 'inconnue'
|
|
elif module == 'open_clip':
|
|
import open_clip
|
|
version = 'installé'
|
|
else:
|
|
mod = __import__(module)
|
|
version = mod.__version__ if hasattr(mod, '__version__') else 'inconnue'
|
|
|
|
print(f" ✓ {module:15} {version:15} - {description}")
|
|
results[module] = True
|
|
except ImportError:
|
|
print(f" ✗ {module:15} {'NON INSTALLÉ':15} - {description}")
|
|
results[module] = False
|
|
|
|
return results
|
|
|
|
|
|
def check_system_dependencies():
|
|
"""Vérifie les dépendances système."""
|
|
print("\n4. Dépendances système")
|
|
print("=" * 60)
|
|
|
|
commands = {
|
|
'xdotool': 'Contrôle de la souris/clavier',
|
|
'scrot': 'Capture d\'écran',
|
|
'wmctrl': 'Gestion des fenêtres',
|
|
}
|
|
|
|
results = {}
|
|
|
|
for cmd, description in commands.items():
|
|
result = os.system(f"which {cmd} > /dev/null 2>&1")
|
|
if result == 0:
|
|
print(f" ✓ {cmd:15} - {description}")
|
|
results[cmd] = True
|
|
else:
|
|
print(f" ✗ {cmd:15} - {description}")
|
|
results[cmd] = False
|
|
|
|
return results
|
|
|
|
|
|
def check_data_directories():
|
|
"""Vérifie les répertoires de données."""
|
|
print("\n5. Répertoires de données")
|
|
print("=" * 60)
|
|
|
|
directories = {
|
|
'geniusia2/data/faiss_index': 'Index FAISS',
|
|
'geniusia2/data/user_profiles': 'Profils utilisateur',
|
|
'geniusia2/data/user_profiles/workflows': 'Workflows sauvegardés',
|
|
'geniusia2/data/logs': 'Logs système',
|
|
'geniusia2/models': 'Modèles ML',
|
|
}
|
|
|
|
results = {}
|
|
|
|
for dir_path, description in directories.items():
|
|
path = Path(dir_path)
|
|
if path.exists():
|
|
# Compter les fichiers
|
|
files = list(path.glob('*'))
|
|
print(f" ✓ {description:25} - {len(files)} fichiers")
|
|
results[dir_path] = True
|
|
else:
|
|
print(f" ✗ {description:25} - MANQUANT")
|
|
results[dir_path] = False
|
|
|
|
return results
|
|
|
|
|
|
def check_models():
|
|
"""Vérifie les modèles téléchargés."""
|
|
print("\n6. Modèles ML")
|
|
print("=" * 60)
|
|
|
|
models = {
|
|
'geniusia2/models/openclip': 'OpenCLIP (embeddings)',
|
|
'geniusia2/models/qwen2.5_vl': 'Qwen3-VL (vision)',
|
|
'geniusia2/models/owl_v2': 'OWL-ViT v2 (détection)',
|
|
}
|
|
|
|
results = {}
|
|
|
|
for model_path, description in models.items():
|
|
path = Path(model_path)
|
|
if path.exists():
|
|
files = list(path.glob('**/*'))
|
|
size_mb = sum(f.stat().st_size for f in files if f.is_file()) / (1024 * 1024)
|
|
print(f" ✓ {description:25} - {size_mb:.1f} MB")
|
|
results[model_path] = True
|
|
else:
|
|
print(f" ✗ {description:25} - NON TÉLÉCHARGÉ")
|
|
results[model_path] = False
|
|
|
|
return results
|
|
|
|
|
|
def check_core_modules():
|
|
"""Vérifie les modules core du système."""
|
|
print("\n7. Modules Core")
|
|
print("=" * 60)
|
|
|
|
modules = [
|
|
'geniusia2/core/orchestrator.py',
|
|
'geniusia2/core/suggestion_manager.py',
|
|
'geniusia2/core/workflow_matcher.py',
|
|
'geniusia2/core/workflow_detector.py',
|
|
'geniusia2/core/embeddings_manager.py',
|
|
'geniusia2/core/learning_manager.py',
|
|
'geniusia2/core/task_replay.py',
|
|
'geniusia2/core/event_capture.py',
|
|
]
|
|
|
|
results = {}
|
|
|
|
for module_path in modules:
|
|
path = Path(module_path)
|
|
if path.exists():
|
|
size_kb = path.stat().st_size / 1024
|
|
print(f" ✓ {path.name:30} - {size_kb:.1f} KB")
|
|
results[module_path] = True
|
|
else:
|
|
print(f" ✗ {path.name:30} - MANQUANT")
|
|
results[module_path] = False
|
|
|
|
return results
|
|
|
|
|
|
def check_faiss_index():
|
|
"""Vérifie l'index FAISS en détail."""
|
|
print("\n8. Index FAISS")
|
|
print("=" * 60)
|
|
|
|
index_file = Path("geniusia2/data/faiss_index/embeddings.index")
|
|
metadata_file = Path("geniusia2/data/faiss_index/metadata.pkl")
|
|
|
|
if not index_file.exists():
|
|
print(" ✗ Fichier d'index manquant")
|
|
return False
|
|
|
|
if not metadata_file.exists():
|
|
print(" ✗ Fichier de métadonnées manquant")
|
|
return False
|
|
|
|
# Taille des fichiers
|
|
index_size = index_file.stat().st_size / 1024
|
|
metadata_size = metadata_file.stat().st_size / 1024
|
|
|
|
print(f" ✓ Index: {index_size:.1f} KB")
|
|
print(f" ✓ Métadonnées: {metadata_size:.1f} KB")
|
|
|
|
# Essayer de charger les métadonnées
|
|
try:
|
|
import pickle
|
|
with open(metadata_file, 'rb') as f:
|
|
metadata = pickle.load(f)
|
|
|
|
if isinstance(metadata, list):
|
|
print(f" ✓ {len(metadata)} embeddings stockés")
|
|
elif isinstance(metadata, dict):
|
|
print(f" ✓ {len(metadata)} embeddings stockés")
|
|
else:
|
|
print(f" ⚠ Format de métadonnées inattendu: {type(metadata)}")
|
|
|
|
return True
|
|
except Exception as e:
|
|
print(f" ✗ Erreur de chargement: {e}")
|
|
return False
|
|
|
|
|
|
def check_workflows():
|
|
"""Vérifie les workflows sauvegardés."""
|
|
print("\n9. Workflows sauvegardés")
|
|
print("=" * 60)
|
|
|
|
workflow_dirs = [
|
|
Path("geniusia2/data/user_profiles/workflows"),
|
|
Path("data/user_profiles/workflows"),
|
|
]
|
|
|
|
total_workflows = 0
|
|
|
|
for workflow_dir in workflow_dirs:
|
|
if workflow_dir.exists():
|
|
workflows = list(workflow_dir.glob('workflow_*.json'))
|
|
if workflows:
|
|
print(f" ✓ {workflow_dir}: {len(workflows)} workflows")
|
|
total_workflows += len(workflows)
|
|
|
|
if total_workflows > 0:
|
|
print(f"\n Total: {total_workflows} workflows disponibles")
|
|
return True
|
|
else:
|
|
print(" ⚠ Aucun workflow sauvegardé")
|
|
print(" C'est normal si tu n'as pas encore enregistré de workflows")
|
|
return True
|
|
|
|
|
|
def generate_recommendations(results):
|
|
"""Génère des recommandations basées sur les résultats."""
|
|
print("\n" + "=" * 60)
|
|
print("RECOMMANDATIONS")
|
|
print("=" * 60)
|
|
|
|
recommendations = []
|
|
|
|
# Vérifier FAISS
|
|
if 'faiss' in results['dependencies'] and not results['dependencies']['faiss']:
|
|
recommendations.append({
|
|
'priority': 'CRITIQUE',
|
|
'action': 'Installer FAISS',
|
|
'command': './geniusia2/install_faiss.sh'
|
|
})
|
|
|
|
# Vérifier PyTorch
|
|
if 'torch' in results['dependencies'] and not results['dependencies']['torch']:
|
|
recommendations.append({
|
|
'priority': 'CRITIQUE',
|
|
'action': 'Installer PyTorch',
|
|
'command': 'pip install torch torchvision'
|
|
})
|
|
|
|
# Vérifier OpenCLIP
|
|
if 'open_clip' in results['dependencies'] and not results['dependencies']['open_clip']:
|
|
recommendations.append({
|
|
'priority': 'IMPORTANTE',
|
|
'action': 'Installer OpenCLIP',
|
|
'command': 'pip install open-clip-torch'
|
|
})
|
|
|
|
# Vérifier dépendances système
|
|
missing_sys = [k for k, v in results.get('system', {}).items() if not v]
|
|
if missing_sys:
|
|
recommendations.append({
|
|
'priority': 'IMPORTANTE',
|
|
'action': f'Installer dépendances système: {", ".join(missing_sys)}',
|
|
'command': f'sudo apt-get install {" ".join(missing_sys)}'
|
|
})
|
|
|
|
# Vérifier modèles
|
|
missing_models = [k for k, v in results.get('models', {}).items() if not v]
|
|
if missing_models:
|
|
recommendations.append({
|
|
'priority': 'OPTIONNELLE',
|
|
'action': 'Télécharger les modèles ML',
|
|
'command': 'python3 geniusia2/download_models.py'
|
|
})
|
|
|
|
if not recommendations:
|
|
print("\n✓ Aucune action requise - Le système est prêt!")
|
|
return
|
|
|
|
# Afficher par priorité
|
|
for priority in ['CRITIQUE', 'IMPORTANTE', 'OPTIONNELLE']:
|
|
priority_recs = [r for r in recommendations if r['priority'] == priority]
|
|
if priority_recs:
|
|
print(f"\n{priority}:")
|
|
for i, rec in enumerate(priority_recs, 1):
|
|
print(f" {i}. {rec['action']}")
|
|
print(f" → {rec['command']}")
|
|
|
|
|
|
def main():
|
|
"""Fonction principale."""
|
|
print("\n" + "=" * 60)
|
|
print("DIAGNOSTIC COMPLET DU SYSTÈME GENIUSIA V2")
|
|
print("=" * 60)
|
|
|
|
results = {
|
|
'python': check_python_version(),
|
|
'venv': check_virtual_env(),
|
|
'dependencies': check_dependencies(),
|
|
'system': check_system_dependencies(),
|
|
'directories': check_data_directories(),
|
|
'models': check_models(),
|
|
'core': check_core_modules(),
|
|
'faiss': check_faiss_index(),
|
|
'workflows': check_workflows(),
|
|
}
|
|
|
|
# Générer les recommandations
|
|
generate_recommendations(results)
|
|
|
|
# Résumé global
|
|
print("\n" + "=" * 60)
|
|
print("RÉSUMÉ GLOBAL")
|
|
print("=" * 60)
|
|
|
|
total_checks = sum(len(v) if isinstance(v, dict) else 1 for v in results.values())
|
|
passed_checks = sum(
|
|
sum(1 for x in v.values() if x) if isinstance(v, dict) else (1 if v else 0)
|
|
for v in results.values()
|
|
)
|
|
|
|
percentage = (passed_checks / total_checks * 100) if total_checks > 0 else 0
|
|
|
|
print(f"\nVérifications réussies: {passed_checks}/{total_checks} ({percentage:.1f}%)")
|
|
|
|
if percentage >= 90:
|
|
print("\n✓ Système en excellent état!")
|
|
elif percentage >= 70:
|
|
print("\n⚠ Système fonctionnel mais quelques améliorations possibles")
|
|
else:
|
|
print("\n✗ Plusieurs composants nécessitent attention")
|
|
|
|
return 0 if percentage >= 70 else 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|