v1.0 - Version stable: multi-PC, détection UI-DETR-1, 3 modes exécution

- Frontend v4 accessible sur réseau local (192.168.1.40)
- Ports ouverts: 3002 (frontend), 5001 (backend), 5004 (dashboard)
- Ollama GPU fonctionnel
- Self-healing interactif
- Dashboard confiance

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Dom
2026-01-29 11:23:51 +01:00
parent 21bfa3b337
commit a27b74cf22
1595 changed files with 412691 additions and 400 deletions

540
server/api_core.py Normal file
View File

@@ -0,0 +1,540 @@
"""
API REST Complète - Expose toutes les fonctionnalités du core RPA Vision V3
Endpoints:
- /api/core/capture - Capture d'écran
- /api/core/detect - Détection UI
- /api/core/embed - Génération d'embeddings
- /api/core/match - Matching d'écran
- /api/core/faiss - Gestion index FAISS
- /api/core/pipeline - Pipeline complet
"""
import os
import sys
import json
import base64
import tempfile
from pathlib import Path
from datetime import datetime
from flask import Blueprint, jsonify, request, send_file
from io import BytesIO
# Add parent to path
sys.path.insert(0, str(Path(__file__).parent.parent))
api_core = Blueprint('api_core', __name__, url_prefix='/api/core')
# Lazy-loaded components
_pipeline = None
_capturer = None
_detector = None
def get_pipeline():
global _pipeline
if _pipeline is None:
from core.pipeline.workflow_pipeline import WorkflowPipeline
_pipeline = WorkflowPipeline()
return _pipeline
def get_capturer():
global _capturer
if _capturer is None:
from core.capture.screen_capturer import ScreenCapturer
_capturer = ScreenCapturer()
return _capturer
# =============================================================================
# Capture API
# =============================================================================
@api_core.route('/capture', methods=['GET'])
def capture_screen():
"""Capture l'écran actuel et retourne l'image."""
try:
capturer = get_capturer()
frame = capturer.capture_frame()
if frame is None:
return jsonify({'error': 'Capture failed'}), 500
# Convert to base64
from PIL import Image
img = Image.fromarray(frame.image)
buffer = BytesIO()
img.save(buffer, format='PNG')
img_base64 = base64.b64encode(buffer.getvalue()).decode()
return jsonify({
'success': True,
'frame_id': frame.frame_id,
'timestamp': frame.timestamp.isoformat(),
'resolution': [frame.image.shape[1], frame.image.shape[0]],
'hash': frame.hash,
'changed': frame.changed_from_previous,
'window_info': frame.window_info,
'image_base64': img_base64
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/capture/stats', methods=['GET'])
def capture_stats():
"""Statistiques de capture."""
try:
capturer = get_capturer()
stats = capturer.get_stats()
return jsonify({
'total_captures': stats.total_captures,
'captures_per_second': stats.captures_per_second,
'unchanged_skipped': stats.unchanged_frames_skipped,
'avg_capture_time_ms': stats.average_capture_time_ms,
'buffer_size': stats.buffer_size,
'memory_mb': stats.memory_usage_mb
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/capture/window', methods=['GET'])
def get_active_window():
"""Info sur la fenêtre active."""
try:
capturer = get_capturer()
window = capturer.get_active_window()
return jsonify({'window': window})
except Exception as e:
return jsonify({'error': str(e)}), 500
# =============================================================================
# Detection API
# =============================================================================
@api_core.route('/detect', methods=['POST'])
def detect_ui():
"""Détecte les éléments UI dans une image."""
try:
data = request.get_json()
# Accept base64 image or file path
if 'image_base64' in data:
import base64
from PIL import Image
img_data = base64.b64decode(data['image_base64'])
img = Image.open(BytesIO(img_data))
# Save to temp file
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:
img.save(f.name)
image_path = f.name
elif 'image_path' in data:
image_path = data['image_path']
else:
return jsonify({'error': 'image_base64 or image_path required'}), 400
pipeline = get_pipeline()
if pipeline.ui_detector is None:
return jsonify({'error': 'UI detector not available'}), 503
elements = pipeline.ui_detector.detect(image_path)
return jsonify({
'success': True,
'elements_count': len(elements),
'elements': [
{
'element_id': el.element_id,
'type': el.type,
'role': el.role,
'label': el.label,
'bbox': list(el.bbox),
'confidence': el.confidence
}
for el in elements
]
})
except Exception as e:
return jsonify({'error': str(e)}), 500
# =============================================================================
# Embedding API
# =============================================================================
@api_core.route('/embed/image', methods=['POST'])
def embed_image():
"""Génère un embedding pour une image."""
try:
data = request.get_json()
if 'image_base64' not in data:
return jsonify({'error': 'image_base64 required'}), 400
import base64
from PIL import Image
import numpy as np
img_data = base64.b64decode(data['image_base64'])
img = Image.open(BytesIO(img_data))
pipeline = get_pipeline()
embedding = pipeline.clip_embedder.embed_image(img)
return jsonify({
'success': True,
'dimensions': len(embedding),
'embedding': embedding.tolist()
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/embed/text', methods=['POST'])
def embed_text():
"""Génère un embedding pour du texte."""
try:
data = request.get_json()
text = data.get('text')
if not text:
return jsonify({'error': 'text required'}), 400
pipeline = get_pipeline()
embedding = pipeline.clip_embedder.embed_text(text)
return jsonify({
'success': True,
'dimensions': len(embedding),
'embedding': embedding.tolist()
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/embed/similarity', methods=['POST'])
def compute_similarity():
"""Calcule la similarité entre deux embeddings."""
try:
data = request.get_json()
emb1 = data.get('embedding1')
emb2 = data.get('embedding2')
if not emb1 or not emb2:
return jsonify({'error': 'embedding1 and embedding2 required'}), 400
import numpy as np
v1 = np.array(emb1)
v2 = np.array(emb2)
# Cosine similarity
similarity = float(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
return jsonify({
'success': True,
'similarity': similarity
})
except Exception as e:
return jsonify({'error': str(e)}), 500
# =============================================================================
# FAISS API
# =============================================================================
@api_core.route('/faiss/stats', methods=['GET'])
def faiss_stats():
"""Statistiques de l'index FAISS."""
try:
pipeline = get_pipeline()
stats = pipeline.faiss_manager.get_stats()
return jsonify(stats)
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/faiss/add', methods=['POST'])
def faiss_add():
"""Ajoute un embedding à l'index FAISS."""
try:
data = request.get_json()
embedding_id = data.get('embedding_id')
embedding = data.get('embedding')
metadata = data.get('metadata', {})
if not embedding_id or not embedding:
return jsonify({'error': 'embedding_id and embedding required'}), 400
import numpy as np
vector = np.array(embedding, dtype=np.float32)
pipeline = get_pipeline()
pipeline.faiss_manager.add_embedding(embedding_id, vector, metadata)
return jsonify({
'success': True,
'embedding_id': embedding_id,
'total_embeddings': pipeline.faiss_manager.get_stats().get('total_embeddings', 0)
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/faiss/search', methods=['POST'])
def faiss_search():
"""Recherche les embeddings similaires."""
try:
data = request.get_json()
query = data.get('query')
k = data.get('k', 5)
if not query:
return jsonify({'error': 'query embedding required'}), 400
import numpy as np
query_vector = np.array(query, dtype=np.float32)
pipeline = get_pipeline()
results = pipeline.faiss_manager.search(query_vector, k=k)
return jsonify({
'success': True,
'results': results
})
except Exception as e:
return jsonify({'error': str(e)}), 500
# =============================================================================
# Pipeline API
# =============================================================================
@api_core.route('/pipeline/match', methods=['POST'])
def pipeline_match():
"""Match l'écran actuel avec les workflows connus."""
try:
data = request.get_json() or {}
workflow_id = data.get('workflow_id')
# Capture current screen
capturer = get_capturer()
frame = capturer.capture_frame()
if frame is None:
return jsonify({'error': 'Capture failed'}), 500
# Save to temp file
from PIL import Image
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:
img = Image.fromarray(frame.image)
img.save(f.name)
screenshot_path = f.name
pipeline = get_pipeline()
match = pipeline.match_current_state(
screenshot_path,
workflow_id=workflow_id,
window_title=frame.window_info.get('title') if frame.window_info else None
)
return jsonify({
'success': True,
'match': match,
'window_info': frame.window_info
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/pipeline/process', methods=['POST'])
def pipeline_process():
"""Traite une session complète."""
try:
data = request.get_json()
session_id = data.get('session_id')
if not session_id:
return jsonify({'error': 'session_id required'}), 400
# This would trigger async processing
return jsonify({
'success': True,
'message': f'Processing started for {session_id}',
'status': 'queued'
})
except Exception as e:
return jsonify({'error': str(e)}), 500
# =============================================================================
# Workflow Export/Import API
# =============================================================================
@api_core.route('/workflows/export/<workflow_id>', methods=['GET'])
def export_workflow(workflow_id):
"""Exporte un workflow en JSON."""
try:
pipeline = get_pipeline()
workflow = pipeline.load_workflow(workflow_id)
if not workflow:
return jsonify({'error': 'Workflow not found'}), 404
# Return as downloadable JSON
workflow_json = workflow.to_json()
return jsonify({
'success': True,
'workflow_id': workflow_id,
'workflow': json.loads(workflow_json)
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/workflows/export/<workflow_id>/download', methods=['GET'])
def download_workflow(workflow_id):
"""Télécharge un workflow en fichier JSON."""
try:
pipeline = get_pipeline()
workflow = pipeline.load_workflow(workflow_id)
if not workflow:
return jsonify({'error': 'Workflow not found'}), 404
workflow_json = workflow.to_json()
buffer = BytesIO(workflow_json.encode('utf-8'))
buffer.seek(0)
return send_file(
buffer,
mimetype='application/json',
as_attachment=True,
download_name=f'{workflow_id}.json'
)
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/workflows/import', methods=['POST'])
def import_workflow():
"""Importe un workflow depuis JSON."""
try:
data = request.get_json()
workflow_data = data.get('workflow')
overwrite = data.get('overwrite', False)
if not workflow_data:
return jsonify({'error': 'workflow data required'}), 400
from core.models.workflow_graph import Workflow
workflow = Workflow.from_dict(workflow_data)
workflow_id = workflow.workflow_id
# Check if exists
workflows_dir = Path('data/training/workflows')
workflows_dir.mkdir(parents=True, exist_ok=True)
workflow_path = workflows_dir / f'{workflow_id}.json'
if workflow_path.exists() and not overwrite:
return jsonify({
'error': f'Workflow {workflow_id} already exists. Set overwrite=true to replace.'
}), 409
# Save
workflow.save_to_file(workflow_path)
return jsonify({
'success': True,
'workflow_id': workflow_id,
'message': 'Workflow imported successfully'
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@api_core.route('/workflows/clone/<workflow_id>', methods=['POST'])
def clone_workflow(workflow_id):
"""Clone un workflow avec un nouveau nom."""
try:
data = request.get_json() or {}
new_id = data.get('new_id', f'{workflow_id}_copy')
new_name = data.get('new_name')
pipeline = get_pipeline()
workflow = pipeline.load_workflow(workflow_id)
if not workflow:
return jsonify({'error': 'Workflow not found'}), 404
# Clone
workflow_dict = workflow.to_dict()
workflow_dict['workflow_id'] = new_id
if new_name:
workflow_dict['name'] = new_name
workflow_dict['created_at'] = datetime.now().isoformat()
workflow_dict['updated_at'] = datetime.now().isoformat()
from core.models.workflow_graph import Workflow
new_workflow = Workflow.from_dict(workflow_dict)
# Save
workflows_dir = Path('data/training/workflows')
new_workflow.save_to_file(workflows_dir / f'{new_id}.json')
return jsonify({
'success': True,
'original_id': workflow_id,
'new_id': new_id
})
except Exception as e:
return jsonify({'error': str(e)}), 500
# =============================================================================
# Health & Info
# =============================================================================
@api_core.route('/health', methods=['GET'])
def health():
"""Health check."""
return jsonify({
'status': 'healthy',
'timestamp': datetime.now().isoformat()
})
@api_core.route('/info', methods=['GET'])
def info():
"""Informations sur l'API."""
return jsonify({
'name': 'RPA Vision V3 Core API',
'version': '1.0.0',
'endpoints': [
'/api/core/capture',
'/api/core/capture/stats',
'/api/core/capture/window',
'/api/core/detect',
'/api/core/embed/image',
'/api/core/embed/text',
'/api/core/embed/similarity',
'/api/core/faiss/stats',
'/api/core/faiss/add',
'/api/core/faiss/search',
'/api/core/pipeline/match',
'/api/core/pipeline/process',
'/api/core/workflows/export/<id>',
'/api/core/workflows/export/<id>/download',
'/api/core/workflows/import',
'/api/core/workflows/clone/<id>',
'/api/core/health',
'/api/core/info'
]
})

View File

@@ -0,0 +1,458 @@
#!/usr/bin/env python3
"""
API Serveur pour recevoir les uploads de l'agent V0
Endpoints:
- POST /api/traces/upload - Upload fichier .enc chiffré
- GET /api/traces/status - Status du serveur
- GET /api/traces/sessions - Liste des sessions reçues
Usage:
python api_upload.py
# Ou avec uvicorn directement:
uvicorn api_upload:app --host 0.0.0.0 --port 8000
"""
import os
import sys
import logging
import zipfile
from pathlib import Path
from datetime import datetime
from typing import Optional
from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.responses import JSONResponse, Response
import uvicorn
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
# Ajouter le répertoire parent au path pour importer les modules RPA Vision V3
sys.path.insert(0, str(Path(__file__).parent.parent))
from core.persistence import StorageManager
from core.models import RawSession
# Fiche #23 - Sécurité/gouvernance API (middleware)
from core.security.fastapi_security import install_security_middlewares
# Fiche #22 - AutoHeal admin API (optionnel)
try:
from core.system.api_admin_autoheal import router as autoheal_admin_router
AUTOHEAL_API_AVAILABLE = True
except Exception as _e:
AUTOHEAL_API_AVAILABLE = False
autoheal_admin_router = None
# Fiche #23 - Security admin API (kill-switch status)
try:
from core.system.api_admin_security import router as security_admin_router
SECURITY_ADMIN_API_AVAILABLE = True
except Exception as _e:
SECURITY_ADMIN_API_AVAILABLE = False
security_admin_router = None
# Configuration
UPLOAD_DIR = Path("data/training/uploads")
SESSIONS_DIR = Path("data/training/sessions")
ENVIRONMENT = os.getenv("ENVIRONMENT", "development")
# Worker mode (prod):
# - thread : worker de processing dans le même process que l'API (défaut / simple)
# - external : worker séparé (systemd rpa-vision-v3-worker.service)
# - disabled : aucun traitement (API upload only)
PROCESSING_WORKER_MODE = os.getenv("RPA_PROCESSING_WORKER", "thread").strip().lower()
# Gestion sécurisée du mot de passe
ENCRYPTION_PASSWORD = os.getenv("ENCRYPTION_PASSWORD")
if not ENCRYPTION_PASSWORD:
if ENVIRONMENT == "production":
raise ValueError(
"ENCRYPTION_PASSWORD must be set in production! "
"Set it with: export ENCRYPTION_PASSWORD='your_secure_password'"
)
ENCRYPTION_PASSWORD = "rpa_vision_v3_default_key"
# Warning sera affiché au démarrage
# Créer les répertoires
UPLOAD_DIR.mkdir(parents=True, exist_ok=True)
SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
# Logging
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] [%(levelname)s] %(name)s: %(message)s'
)
logger = logging.getLogger("api_upload")
# FastAPI app
app = FastAPI(
title="RPA Vision V3 - Agent Upload API",
description="API pour recevoir les sessions enregistrées par l'agent V0",
version="1.0.0"
)
# Installer la sécurité (auth + allowlist + rate-limit + audit + kill-switch)
install_security_middlewares(app)
# Monter l'API admin AutoHeal (si dispo)
if AUTOHEAL_API_AVAILABLE and autoheal_admin_router is not None:
app.include_router(autoheal_admin_router, prefix="/admin/autoheal", tags=["autoheal"])
# Monter l'API admin sécurité (si dispo)
if SECURITY_ADMIN_API_AVAILABLE and security_admin_router is not None:
app.include_router(security_admin_router, prefix="/admin/security", tags=["security"])
# StorageManager
storage = StorageManager(base_path="data/training")
# Importer le module de déchiffrement
try:
from storage_encrypted import decrypt_session_file as decrypt_file
DECRYPTION_AVAILABLE = True
except ImportError:
logger.warning("Module storage_encrypted non trouvé, déchiffrement désactivé!")
DECRYPTION_AVAILABLE = False
def decrypt_file(encrypted_path: str, password: str) -> str:
"""Fallback si module non disponible."""
logger.error("Déchiffrement non disponible!")
return encrypted_path.replace('.enc', '.zip')
@app.post("/api/traces/upload")
async def upload_session(
file: UploadFile = File(...),
session_id: str = Form(...)
):
"""
Upload d'une session enregistrée par l'agent V0.
Args:
file: Fichier .enc (chiffré) ou .zip
session_id: ID de la session
Returns:
JSON avec status et infos
"""
try:
logger.info(f"Réception upload session: {session_id}")
logger.info(f"Fichier: {file.filename}, taille: {file.size} bytes")
# Sauvegarder le fichier uploadé
file_ext = '.enc' if file.filename.endswith('.enc') else '.zip'
upload_path = UPLOAD_DIR / f"{session_id}{file_ext}"
with open(upload_path, "wb") as f:
content = await file.read()
f.write(content)
logger.info(f"Fichier sauvegardé: {upload_path}")
# Déchiffrer si nécessaire
if file_ext == '.enc':
if not DECRYPTION_AVAILABLE:
raise HTTPException(
status_code=500,
detail="Déchiffrement non disponible sur le serveur"
)
try:
zip_path = decrypt_file(str(upload_path), ENCRYPTION_PASSWORD)
logger.info(f"Fichier déchiffré: {zip_path}")
except Exception as e:
logger.error(f"Erreur déchiffrement: {e}")
raise HTTPException(
status_code=400,
detail=f"Erreur déchiffrement: {str(e)}"
)
else:
zip_path = str(upload_path)
# Extraire le ZIP
extract_dir = SESSIONS_DIR / session_id
extract_dir.mkdir(parents=True, exist_ok=True)
try:
with zipfile.ZipFile(zip_path, 'r') as zf:
zf.extractall(extract_dir)
logger.info(f"ZIP extrait dans: {extract_dir}")
except Exception as e:
logger.error(f"Erreur extraction ZIP: {e}")
raise HTTPException(
status_code=400,
detail=f"Erreur extraction ZIP: {str(e)}"
)
# Charger et valider la RawSession
json_path = extract_dir / session_id / f"{session_id}.json"
if not json_path.exists():
logger.error(f"Fichier JSON introuvable: {json_path}")
raise HTTPException(
status_code=400,
detail=f"Fichier JSON introuvable dans le ZIP"
)
try:
session = RawSession.load_from_file(json_path)
logger.info(f"RawSession chargée: {session.session_id}")
logger.info(f" - Événements: {len(session.events)}")
logger.info(f" - Screenshots: {len(session.screenshots)}")
logger.info(f" - Utilisateur: {session.user}")
except Exception as e:
logger.error(f"Erreur chargement RawSession: {e}")
raise HTTPException(
status_code=400,
detail=f"Erreur chargement RawSession: {str(e)}"
)
# Sauvegarder avec StorageManager
try:
storage.save_raw_session(session, session_id)
logger.info(f"Session sauvegardée dans StorageManager")
except Exception as e:
logger.warning(f"Erreur sauvegarde StorageManager: {e}")
# Pas bloquant, on continue
# Ajouter à la queue de processing (traitement asynchrone robuste)
try:
from processing_queue import add_to_queue
add_to_queue(session_id, "data/training")
logger.info(f"Session {session_id} ajoutée à la queue de processing")
except Exception as e:
logger.warning(f"Impossible d'ajouter à la queue: {e}")
# Fallback: traitement direct en thread
try:
from processing_pipeline import process_session_async
import threading
def process_in_background():
try:
logger.info(f"Démarrage pipeline processing pour {session_id}")
stats = process_session_async(session_id, "data/training")
logger.info(f"Pipeline terminé: {stats}")
except Exception as e:
logger.error(f"Erreur pipeline: {e}")
thread = threading.Thread(target=process_in_background, daemon=True)
thread.start()
logger.info("Pipeline lancé en arrière-plan (fallback)")
except Exception as e2:
logger.warning(f"Impossible de lancer le pipeline: {e2}")
return JSONResponse({
"status": "success",
"session_id": session_id,
"events_count": len(session.events),
"screenshots_count": len(session.screenshots),
"user": session.user,
"received_at": datetime.now().isoformat()
})
except HTTPException:
raise
except Exception as e:
logger.exception(f"Erreur inattendue: {e}")
raise HTTPException(
status_code=500,
detail=f"Erreur serveur: {str(e)}"
)
@app.get("/api/traces/debug-env")
async def debug_env():
"""Debug endpoint to check environment variables."""
import os
return {
"RPA_TOKEN_ADMIN": os.getenv("RPA_TOKEN_ADMIN", "NOT_SET"),
"RPA_TOKEN_READONLY": os.getenv("RPA_TOKEN_READONLY", "NOT_SET"),
"ADMIN_TOKENS": os.getenv("ADMIN_TOKENS", "NOT_SET"),
"READ_ONLY_TOKENS": os.getenv("READ_ONLY_TOKENS", "NOT_SET"),
}
@app.get("/api/traces/debug-auth")
async def debug_auth():
"""Debug endpoint to check token loading."""
import os
from core.security.api_tokens import get_token_manager
token_manager = get_token_manager()
return {
"environment_vars": {
"RPA_TOKEN_ADMIN": bool(os.getenv("RPA_TOKEN_ADMIN")),
"RPA_TOKEN_READONLY": bool(os.getenv("RPA_TOKEN_READONLY")),
"ADMIN_TOKENS": bool(os.getenv("ADMIN_TOKENS")),
"READ_ONLY_TOKENS": bool(os.getenv("READ_ONLY_TOKENS")),
},
"token_counts": {
"admin_tokens": len(token_manager.admin_tokens),
"read_only_tokens": len(token_manager.read_only_tokens),
},
"admin_tokens_preview": [t[:8] + "..." for t in list(token_manager.admin_tokens)[:3]],
"read_only_tokens_preview": [t[:8] + "..." for t in list(token_manager.read_only_tokens)[:3]],
}
@app.get("/api/traces/status")
async def get_status():
"""Status du serveur."""
return {
"status": "online",
"version": "1.0.0",
"upload_dir": str(UPLOAD_DIR),
"sessions_dir": str(SESSIONS_DIR),
"encryption_enabled": ENCRYPTION_PASSWORD != "rpa_vision_v3_default_key"
}
@app.get("/api/traces/sessions")
async def list_sessions():
"""Liste des sessions reçues."""
sessions = []
for session_dir in SESSIONS_DIR.iterdir():
if session_dir.is_dir():
json_files = list(session_dir.glob("*/*.json"))
if json_files:
json_path = json_files[0]
try:
session = RawSession.load_from_file(json_path)
sessions.append({
"session_id": session.session_id,
"started_at": session.started_at.isoformat(),
"ended_at": session.ended_at.isoformat() if session.ended_at else None,
"events_count": len(session.events),
"screenshots_count": len(session.screenshots),
"user": session.user
})
except Exception as e:
logger.warning(f"Erreur lecture session {session_dir.name}: {e}")
return {
"sessions": sessions,
"total": len(sessions)
}
@app.get("/")
async def root():
"""Page d'accueil."""
return {
"message": "RPA Vision V3 - Agent Upload API",
"endpoints": {
"upload": "POST /api/traces/upload",
"status": "GET /api/traces/status",
"sessions": "GET /api/traces/sessions"
}
}
@app.get("/healthz")
async def healthz():
"""Healthcheck ultra simple (k8s/systemd timers)."""
# Note: on évite les dépendances lourdes ici. C'est un ping.
return {
"status": "ok",
"service": "rpa-vision-v3-api",
"environment": ENVIRONMENT,
"worker_mode": PROCESSING_WORKER_MODE,
}
@app.get("/metrics")
async def metrics():
"""Prometheus metrics endpoint (public)."""
try:
return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
except Exception as e:
# On ne veut jamais faire tomber l'API si Prometheus se plante.
return Response(content=f"# error: {e}\n", media_type="text/plain", status_code=500)
@app.on_event("startup")
async def startup_event():
"""Démarrage du serveur - lance le worker de processing."""
if PROCESSING_WORKER_MODE != "thread":
logger.info(f"Processing worker disabled for API (mode={PROCESSING_WORKER_MODE})")
return
try:
from processing_queue import start_processing_worker
from processing_pipeline import process_session_async
start_processing_worker(process_session_async)
logger.info("Worker de processing démarré (thread mode)")
except Exception as e:
logger.warning(f"Impossible de démarrer le worker: {e}")
@app.on_event("shutdown")
async def shutdown_event():
"""Arrêt du serveur - arrête le worker proprement."""
if PROCESSING_WORKER_MODE != "thread":
return
try:
from processing_queue import stop_processing_worker
stop_processing_worker()
logger.info("Worker de processing arrêté")
except Exception as e:
logger.warning(f"Erreur arrêt worker: {e}")
@app.get("/api/traces/queue")
async def get_queue_status():
"""Status de la queue de processing."""
try:
from processing_queue import get_queue
queue = get_queue()
all_items = queue.get_all()
return {
"total": len(all_items),
"pending": sum(1 for i in all_items if i["status"] == "pending"),
"processing": sum(1 for i in all_items if i["status"] == "processing"),
"completed": sum(1 for i in all_items if i["status"] == "completed"),
"failed": sum(1 for i in all_items if i["status"] == "failed"),
"items": all_items[-20:] # 20 dernières
}
except Exception as e:
return {"error": str(e)}
if __name__ == "__main__":
# Valider la sécurité en production
from core.security import validate_production_security, get_security_config
try:
config = get_security_config()
validate_production_security(config)
except Exception as e:
logger.error(f"Security validation failed: {e}")
sys.exit(1)
# Initialiser le système de cleanup
from core.system import initialize_system_cleanup, shutdown_system
initialize_system_cleanup()
logger.info("Démarrage du serveur API...")
logger.info(f"Upload dir: {UPLOAD_DIR.absolute()}")
logger.info(f"Sessions dir: {SESSIONS_DIR.absolute()}")
logger.info(f"Encryption password: {'***' if ENCRYPTION_PASSWORD != 'rpa_vision_v3_default_key' else 'DEFAULT (changer!)'}")
try:
uvicorn.run(
app,
host="0.0.0.0",
port=8001,
log_level="info"
)
except KeyboardInterrupt:
logger.info("Received keyboard interrupt, shutting down...")
shutdown_system()
except Exception as e:
logger.error(f"Server error: {e}")
shutdown_system()
raise

View File

@@ -0,0 +1,459 @@
#!/usr/bin/env python3
"""
API Serveur pour recevoir les uploads de l'agent V0
Endpoints:
- POST /api/traces/upload - Upload fichier .enc chiffré
- GET /api/traces/status - Status du serveur
- GET /api/traces/sessions - Liste des sessions reçues
Usage:
python api_upload.py
# Ou avec uvicorn directement:
uvicorn api_upload:app --host 0.0.0.0 --port 8000
"""
import os
import sys
import logging
import zipfile
from pathlib import Path
from datetime import datetime
from typing import Optional
from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.responses import JSONResponse, Response
import uvicorn
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
# Ajouter le répertoire parent au path pour importer les modules RPA Vision V3
sys.path.insert(0, str(Path(__file__).parent.parent))
from core.persistence import StorageManager
from core.models import RawSession
# Fiche #23 - Sécurité/gouvernance API (middleware)
from core.security.fastapi_security import install_security_middlewares
# Fiche #22 - AutoHeal admin API (optionnel)
try:
from core.system.api_admin_autoheal import router as autoheal_admin_router
AUTOHEAL_API_AVAILABLE = True
except Exception as _e:
AUTOHEAL_API_AVAILABLE = False
autoheal_admin_router = None
# Fiche #23 - Security admin API (kill-switch status)
try:
from core.system.api_admin_security import router as security_admin_router
SECURITY_ADMIN_API_AVAILABLE = True
except Exception as _e:
SECURITY_ADMIN_API_AVAILABLE = False
security_admin_router = None
# Configuration
UPLOAD_DIR = Path("data/training/uploads")
SESSIONS_DIR = Path("data/training/sessions")
ENVIRONMENT = os.getenv("ENVIRONMENT", "development")
# Worker mode (prod):
# - thread : worker de processing dans le même process que l'API (défaut / simple)
# - external : worker séparé (systemd rpa-vision-v3-worker.service)
# - disabled : aucun traitement (API upload only)
PROCESSING_WORKER_MODE = os.getenv("RPA_PROCESSING_WORKER", "thread").strip().lower()
# Gestion sécurisée du mot de passe
ENCRYPTION_PASSWORD = os.getenv("ENCRYPTION_PASSWORD")
if not ENCRYPTION_PASSWORD:
if ENVIRONMENT == "production":
raise ValueError(
"ENCRYPTION_PASSWORD must be set in production! "
"Set it with: export ENCRYPTION_PASSWORD='your_secure_password'"
)
ENCRYPTION_PASSWORD = "rpa_vision_v3_default_key"
# Warning sera affiché au démarrage
# Créer les répertoires
UPLOAD_DIR.mkdir(parents=True, exist_ok=True)
SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
# Logging
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] [%(levelname)s] %(name)s: %(message)s'
)
logger = logging.getLogger("api_upload")
# FastAPI app
app = FastAPI(
title="RPA Vision V3 - Agent Upload API",
description="API pour recevoir les sessions enregistrées par l'agent V0",
version="1.0.0"
)
# Installer la sécurité (auth + allowlist + rate-limit + audit + kill-switch)
install_security_middlewares(app)
# Monter l'API admin AutoHeal (si dispo)
if AUTOHEAL_API_AVAILABLE and autoheal_admin_router is not None:
app.include_router(autoheal_admin_router, prefix="/admin/autoheal", tags=["autoheal"])
# Monter l'API admin sécurité (si dispo)
if SECURITY_ADMIN_API_AVAILABLE and security_admin_router is not None:
app.include_router(security_admin_router, prefix="/admin/security", tags=["security"])
# StorageManager
storage = StorageManager(base_path="data/training")
# Importer le module de déchiffrement
try:
from storage_encrypted import decrypt_session_file as decrypt_file
DECRYPTION_AVAILABLE = True
logger.info("Module de déchiffrement server.storage_encrypted chargé")
except ImportError:
logger.warning("Module storage_encrypted non trouvé, déchiffrement désactivé!")
DECRYPTION_AVAILABLE = False
def decrypt_file(encrypted_path: str, password: str) -> str:
"""Fallback si module non disponible."""
logger.error("Déchiffrement non disponible!")
return encrypted_path.replace('.enc', '.zip')
@app.post("/api/traces/upload")
async def upload_session(
file: UploadFile = File(...),
session_id: str = Form(...)
):
"""
Upload d'une session enregistrée par l'agent V0.
Args:
file: Fichier .enc (chiffré) ou .zip
session_id: ID de la session
Returns:
JSON avec status et infos
"""
try:
logger.info(f"Réception upload session: {session_id}")
logger.info(f"Fichier: {file.filename}, taille: {file.size} bytes")
# Sauvegarder le fichier uploadé
file_ext = '.enc' if file.filename.endswith('.enc') else '.zip'
upload_path = UPLOAD_DIR / f"{session_id}{file_ext}"
with open(upload_path, "wb") as f:
content = await file.read()
f.write(content)
logger.info(f"Fichier sauvegardé: {upload_path}")
# Déchiffrer si nécessaire
if file_ext == '.enc':
if not DECRYPTION_AVAILABLE:
raise HTTPException(
status_code=500,
detail="Déchiffrement non disponible sur le serveur"
)
try:
zip_path = decrypt_file(str(upload_path), ENCRYPTION_PASSWORD)
logger.info(f"Fichier déchiffré: {zip_path}")
except Exception as e:
logger.error(f"Erreur déchiffrement: {e}")
raise HTTPException(
status_code=400,
detail=f"Erreur déchiffrement: {str(e)}"
)
else:
zip_path = str(upload_path)
# Extraire le ZIP
extract_dir = SESSIONS_DIR / session_id
extract_dir.mkdir(parents=True, exist_ok=True)
try:
with zipfile.ZipFile(zip_path, 'r') as zf:
zf.extractall(extract_dir)
logger.info(f"ZIP extrait dans: {extract_dir}")
except Exception as e:
logger.error(f"Erreur extraction ZIP: {e}")
raise HTTPException(
status_code=400,
detail=f"Erreur extraction ZIP: {str(e)}"
)
# Charger et valider la RawSession
json_path = extract_dir / session_id / f"{session_id}.json"
if not json_path.exists():
logger.error(f"Fichier JSON introuvable: {json_path}")
raise HTTPException(
status_code=400,
detail=f"Fichier JSON introuvable dans le ZIP"
)
try:
session = RawSession.load_from_file(json_path)
logger.info(f"RawSession chargée: {session.session_id}")
logger.info(f" - Événements: {len(session.events)}")
logger.info(f" - Screenshots: {len(session.screenshots)}")
logger.info(f" - Utilisateur: {session.user}")
except Exception as e:
logger.error(f"Erreur chargement RawSession: {e}")
raise HTTPException(
status_code=400,
detail=f"Erreur chargement RawSession: {str(e)}"
)
# Sauvegarder avec StorageManager
try:
storage.save_raw_session(session, session_id)
logger.info(f"Session sauvegardée dans StorageManager")
except Exception as e:
logger.warning(f"Erreur sauvegarde StorageManager: {e}")
# Pas bloquant, on continue
# Ajouter à la queue de processing (traitement asynchrone robuste)
try:
from processing_queue import add_to_queue
add_to_queue(session_id, "data/training")
logger.info(f"Session {session_id} ajoutée à la queue de processing")
except Exception as e:
logger.warning(f"Impossible d'ajouter à la queue: {e}")
# Fallback: traitement direct en thread
try:
from processing_pipeline import process_session_async
import threading
def process_in_background():
try:
logger.info(f"Démarrage pipeline processing pour {session_id}")
stats = process_session_async(session_id, "data/training")
logger.info(f"Pipeline terminé: {stats}")
except Exception as e:
logger.error(f"Erreur pipeline: {e}")
thread = threading.Thread(target=process_in_background, daemon=True)
thread.start()
logger.info("Pipeline lancé en arrière-plan (fallback)")
except Exception as e2:
logger.warning(f"Impossible de lancer le pipeline: {e2}")
return JSONResponse({
"status": "success",
"session_id": session_id,
"events_count": len(session.events),
"screenshots_count": len(session.screenshots),
"user": session.user,
"received_at": datetime.now().isoformat()
})
except HTTPException:
raise
except Exception as e:
logger.exception(f"Erreur inattendue: {e}")
raise HTTPException(
status_code=500,
detail=f"Erreur serveur: {str(e)}"
)
@app.get("/api/traces/debug-env")
async def debug_env():
"""Debug endpoint to check environment variables."""
import os
return {
"RPA_TOKEN_ADMIN": os.getenv("RPA_TOKEN_ADMIN", "NOT_SET"),
"RPA_TOKEN_READONLY": os.getenv("RPA_TOKEN_READONLY", "NOT_SET"),
"ADMIN_TOKENS": os.getenv("ADMIN_TOKENS", "NOT_SET"),
"READ_ONLY_TOKENS": os.getenv("READ_ONLY_TOKENS", "NOT_SET"),
}
@app.get("/api/traces/debug-auth")
async def debug_auth():
"""Debug endpoint to check token loading."""
import os
from core.security.api_tokens import get_token_manager
token_manager = get_token_manager()
return {
"environment_vars": {
"RPA_TOKEN_ADMIN": bool(os.getenv("RPA_TOKEN_ADMIN")),
"RPA_TOKEN_READONLY": bool(os.getenv("RPA_TOKEN_READONLY")),
"ADMIN_TOKENS": bool(os.getenv("ADMIN_TOKENS")),
"READ_ONLY_TOKENS": bool(os.getenv("READ_ONLY_TOKENS")),
},
"token_counts": {
"admin_tokens": len(token_manager.admin_tokens),
"read_only_tokens": len(token_manager.read_only_tokens),
},
"admin_tokens_preview": [t[:8] + "..." for t in list(token_manager.admin_tokens)[:3]],
"read_only_tokens_preview": [t[:8] + "..." for t in list(token_manager.read_only_tokens)[:3]],
}
@app.get("/api/traces/status")
async def get_status():
"""Status du serveur."""
return {
"status": "online",
"version": "1.0.0",
"upload_dir": str(UPLOAD_DIR),
"sessions_dir": str(SESSIONS_DIR),
"encryption_enabled": ENCRYPTION_PASSWORD != "rpa_vision_v3_default_key"
}
@app.get("/api/traces/sessions")
async def list_sessions():
"""Liste des sessions reçues."""
sessions = []
for session_dir in SESSIONS_DIR.iterdir():
if session_dir.is_dir():
json_files = list(session_dir.glob("*/*.json"))
if json_files:
json_path = json_files[0]
try:
session = RawSession.load_from_file(json_path)
sessions.append({
"session_id": session.session_id,
"started_at": session.started_at.isoformat(),
"ended_at": session.ended_at.isoformat() if session.ended_at else None,
"events_count": len(session.events),
"screenshots_count": len(session.screenshots),
"user": session.user
})
except Exception as e:
logger.warning(f"Erreur lecture session {session_dir.name}: {e}")
return {
"sessions": sessions,
"total": len(sessions)
}
@app.get("/")
async def root():
"""Page d'accueil."""
return {
"message": "RPA Vision V3 - Agent Upload API",
"endpoints": {
"upload": "POST /api/traces/upload",
"status": "GET /api/traces/status",
"sessions": "GET /api/traces/sessions"
}
}
@app.get("/healthz")
async def healthz():
"""Healthcheck ultra simple (k8s/systemd timers)."""
# Note: on évite les dépendances lourdes ici. C'est un ping.
return {
"status": "ok",
"service": "rpa-vision-v3-api",
"environment": ENVIRONMENT,
"worker_mode": PROCESSING_WORKER_MODE,
}
@app.get("/metrics")
async def metrics():
"""Prometheus metrics endpoint (public)."""
try:
return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
except Exception as e:
# On ne veut jamais faire tomber l'API si Prometheus se plante.
return Response(content=f"# error: {e}\n", media_type="text/plain", status_code=500)
@app.on_event("startup")
async def startup_event():
"""Démarrage du serveur - lance le worker de processing."""
if PROCESSING_WORKER_MODE != "thread":
logger.info(f"Processing worker disabled for API (mode={PROCESSING_WORKER_MODE})")
return
try:
from processing_queue import start_processing_worker
from processing_pipeline import process_session_async
start_processing_worker(process_session_async)
logger.info("Worker de processing démarré (thread mode)")
except Exception as e:
logger.warning(f"Impossible de démarrer le worker: {e}")
@app.on_event("shutdown")
async def shutdown_event():
"""Arrêt du serveur - arrête le worker proprement."""
if PROCESSING_WORKER_MODE != "thread":
return
try:
from processing_queue import stop_processing_worker
stop_processing_worker()
logger.info("Worker de processing arrêté")
except Exception as e:
logger.warning(f"Erreur arrêt worker: {e}")
@app.get("/api/traces/queue")
async def get_queue_status():
"""Status de la queue de processing."""
try:
from processing_queue import get_queue
queue = get_queue()
all_items = queue.get_all()
return {
"total": len(all_items),
"pending": sum(1 for i in all_items if i["status"] == "pending"),
"processing": sum(1 for i in all_items if i["status"] == "processing"),
"completed": sum(1 for i in all_items if i["status"] == "completed"),
"failed": sum(1 for i in all_items if i["status"] == "failed"),
"items": all_items[-20:] # 20 dernières
}
except Exception as e:
return {"error": str(e)}
if __name__ == "__main__":
# Valider la sécurité en production
from core.security import validate_production_security, get_security_config
try:
config = get_security_config()
validate_production_security(config)
except Exception as e:
logger.error(f"Security validation failed: {e}")
sys.exit(1)
# Initialiser le système de cleanup
from core.system import initialize_system_cleanup, shutdown_system
initialize_system_cleanup()
logger.info("Démarrage du serveur API...")
logger.info(f"Upload dir: {UPLOAD_DIR.absolute()}")
logger.info(f"Sessions dir: {SESSIONS_DIR.absolute()}")
logger.info(f"Encryption password: {'***' if ENCRYPTION_PASSWORD != 'rpa_vision_v3_default_key' else 'DEFAULT (changer!)'}")
try:
uvicorn.run(
app,
host="0.0.0.0",
port=8002,
log_level="info"
)
except KeyboardInterrupt:
logger.info("Received keyboard interrupt, shutting down...")
shutdown_system()
except Exception as e:
logger.error(f"Server error: {e}")
shutdown_system()
raise

58
server/bootstrap_local_env.sh Executable file
View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
# server/bootstrap_local_env.sh
#
# Crée (si absent) et charge un fichier .env.local pour les lancements DEV.
# Objectif: éviter l'oubli des tokens (Fiche #23) + garder une expérience "plug&play".
#
# Usage:
# ./server/bootstrap_local_env.sh (depuis la racine projet)
set -euo pipefail
PROJECT_DIR="${PROJECT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
ENV_FILE="${ENV_FILE:-$PROJECT_DIR/.env.local}"
_has_cmd() { command -v "$1" >/dev/null 2>&1; }
_gen_hex_32() {
if _has_cmd openssl; then
openssl rand -hex 32
else
python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
fi
}
if [[ ! -f "$ENV_FILE" ]]; then
umask 077
cat >"$ENV_FILE" <<EOF
# .env.local (DEV) - généré automatiquement
ENVIRONMENT=development
# Secrets
ENCRYPTION_PASSWORD=$(_gen_hex_32)
SECRET_KEY=$(_gen_hex_32)
# Fiche #23 - tokens
RPA_TOKEN_ADMIN=$(_gen_hex_32)
RPA_TOKEN_READONLY=$(_gen_hex_32)
# Fiche #22 - admin autoheal
AUTOHEAL_ADMIN_TOKEN=$(_gen_hex_32)
# En dev on garde l'auth active pour éviter les surprises en prod
RPA_AUTH_REQUIRED=true
EOF
echo "✅ Créé: $ENV_FILE"
else
echo "✅ Env local présent: $ENV_FILE"
fi
# Charge l'env dans le process courant (si sourcé)
set -a
source "$ENV_FILE"
set +a
echo "🔑 Dashboard: http://localhost:${RPA_DASHBOARD_PORT:-5001}/?token=${RPA_TOKEN_READONLY}"
echo "🔐 API: utilisez le header Authorization: Bearer <token> (read-only ou admin)"

87
server/bootstrap_secrets_env.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
# server/bootstrap_secrets_env.sh
#
# Génère automatiquement les secrets/tokens manquants dans un fichier .env (PROD)
# sans afficher les valeurs (pour éviter de les leak dans les logs).
#
# Usage:
# sudo ./server/bootstrap_secrets_env.sh /etc/rpa_vision_v3/rpa_vision_v3.env
set -euo pipefail
ENV_FILE="${1:-/etc/rpa_vision_v3/rpa_vision_v3.env}"
if [[ ! -f "$ENV_FILE" ]]; then
echo "❌ Env file introuvable: $ENV_FILE" >&2
exit 1
fi
umask 077
_has_cmd() { command -v "$1" >/dev/null 2>&1; }
_gen_hex_32() {
# 32 bytes => 64 hex chars
if _has_cmd openssl; then
openssl rand -hex 32
else
python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
fi
}
_get_value() {
local key="$1"
# Retourne la première occurence KEY=... (sans commentaires)
grep -E "^${key}=" "$ENV_FILE" 2>/dev/null | head -n 1 | cut -d'=' -f2- || true
}
_is_placeholder() {
local v="$1"
[[ -z "$v" ]] && return 0
[[ "$v" == "CHANGE_ME" ]] && return 0
[[ "$v" == CHANGE_ME_* ]] && return 0
[[ "$v" == "rpa_vision_v3_default_key" ]] && return 0
return 1
}
_set_key() {
local key="$1"
local value="$2"
if grep -qE "^${key}=" "$ENV_FILE"; then
# Remplace la ligne complète
sed -i -E "s|^${key}=.*|${key}=${value}|" "$ENV_FILE"
else
echo "${key}=${value}" >> "$ENV_FILE"
fi
}
_ensure_key() {
local key="$1"
local cur
cur="$(_get_value "$key")"
if _is_placeholder "$cur"; then
_set_key "$key" "$(_gen_hex_32)"
echo "✅ Secret généré: $key"
else
echo "✅ Secret OK: $key"
fi
}
echo "🔐 Bootstrap secrets: $ENV_FILE"
# Secrets (toujours utiles)
_ensure_key "ENCRYPTION_PASSWORD"
_ensure_key "SECRET_KEY"
# Sécurité API (#23)
_ensure_key "RPA_TOKEN_ADMIN"
_ensure_key "RPA_TOKEN_READONLY"
# AutoHeal admin token (#22)
_ensure_key "AUTOHEAL_ADMIN_TOKEN"
echo "✅ Bootstrap terminé (les valeurs sont écrites dans $ENV_FILE)."

58
server/healthcheck.sh Executable file
View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
# server/healthcheck.sh
#
# Fiche #21 (prod): healthcheck multi-composants.
# - API upload (FastAPI)
# - Dashboard (Flask)
# - Worker externe (heartbeat file)
# - Disque (minimum free)
set -euo pipefail
API_HOST="${RPA_API_HOST:-127.0.0.1}"
API_PORT="${RPA_API_PORT:-8000}"
DASH_HOST="${RPA_DASHBOARD_HOST:-127.0.0.1}"
DASH_PORT="${RPA_DASHBOARD_PORT:-5001}"
CHECK_DASHBOARD="${RPA_CHECK_DASHBOARD:-1}"
HEARTBEAT_PATH="${RPA_WORKER_HEARTBEAT_PATH:-data/runtime/health/worker_heartbeat.json}"
HEARTBEAT_MAX_AGE_S="${RPA_WORKER_HEARTBEAT_MAX_AGE_S:-60}"
MIN_FREE_MB="${RPA_MIN_FREE_MB:-1024}"
fail() {
echo "$1" >&2
exit 2
}
# --- API ---
curl -fsS --max-time 2 "http://${API_HOST}:${API_PORT}/healthz" >/dev/null \
|| fail "API healthz unreachable (http://${API_HOST}:${API_PORT}/healthz)"
# --- Dashboard (optionnel) ---
if [[ "${CHECK_DASHBOARD}" == "1" ]]; then
curl -fsS --max-time 2 "http://${DASH_HOST}:${DASH_PORT}/healthz" >/dev/null \
|| fail "Dashboard healthz unreachable (http://${DASH_HOST}:${DASH_PORT}/healthz)"
fi
# --- Worker heartbeat (optionnel) ---
if [[ -f "${HEARTBEAT_PATH}" ]]; then
# age in seconds
now=$(date +%s)
mtime=$(stat -c %Y "${HEARTBEAT_PATH}" 2>/dev/null || echo 0)
age=$((now - mtime))
if (( age > HEARTBEAT_MAX_AGE_S )); then
fail "Worker heartbeat too old: ${age}s (> ${HEARTBEAT_MAX_AGE_S}s)"
fi
fi
# --- Disk space ---
free_kb=$(df -Pk . | awk 'NR==2 {print $4}')
free_mb=$((free_kb / 1024))
if (( free_mb < MIN_FREE_MB )); then
fail "Low disk space: ${free_mb}MB free (< ${MIN_FREE_MB}MB)"
fi
echo "✅ healthcheck ok"

93
server/install_prod_stack.sh Executable file
View File

@@ -0,0 +1,93 @@
#!/usr/bin/env bash
# server/install_prod_stack.sh
#
# Fiche #21 (prod): installation systemd + env + timers (healthcheck + retention)
set -euo pipefail
if [[ "${EUID}" -ne 0 ]]; then
echo "❌ Ce script doit être exécuté en root (sudo)" >&2
exit 1
fi
INSTALL_DIR="${INSTALL_DIR:-/opt/rpa_vision_v3}"
SERVICE_USER="${SERVICE_USER:-rpa}"
echo "========================================"
echo "Installation RPA Vision V3 - PROD stack"
echo "========================================"
# 1) Utilisateur
if ! id "$SERVICE_USER" &>/dev/null; then
echo "📝 Création utilisateur $SERVICE_USER"
useradd --system --no-create-home --shell /bin/false "$SERVICE_USER"
fi
# 2) Vérifier le répertoire
if [[ ! -d "$INSTALL_DIR" ]]; then
echo "❌ Répertoire $INSTALL_DIR introuvable." >&2
echo " Copiez le projet dans $INSTALL_DIR puis relancez." >&2
exit 1
fi
# 3) Dossiers data/logs
mkdir -p "$INSTALL_DIR/data" "$INSTALL_DIR/logs" "$INSTALL_DIR/data/runtime/health" "$INSTALL_DIR/data/archives"
chown -R "$SERVICE_USER":"$SERVICE_USER" "$INSTALL_DIR/data" "$INSTALL_DIR/logs"
# 4) Env file
mkdir -p /etc/rpa_vision_v3
if [[ ! -f /etc/rpa_vision_v3/rpa_vision_v3.env ]]; then
echo "📝 Création /etc/rpa_vision_v3/rpa_vision_v3.env (template)"
cp "$INSTALL_DIR/deploy/systemd/rpa_vision_v3.env.example" /etc/rpa_vision_v3/rpa_vision_v3.env
# Lecture nécessaire pour l'utilisateur systemd (rpa)
chown root:"$SERVICE_USER" /etc/rpa_vision_v3/rpa_vision_v3.env
chmod 640 /etc/rpa_vision_v3/rpa_vision_v3.env
else
echo "✅ Env file déjà présent : /etc/rpa_vision_v3/rpa_vision_v3.env"
# Si un ancien chmod 600 root:root traîne, on sécurise sans casser systemd
chown root:"$SERVICE_USER" /etc/rpa_vision_v3/rpa_vision_v3.env || true
chmod 640 /etc/rpa_vision_v3/rpa_vision_v3.env || true
fi
# 4.b) Génération automatique des secrets/tokens (anti-oubli)
chmod +x "$INSTALL_DIR/server/bootstrap_secrets_env.sh" "$INSTALL_DIR/server/validate_secrets.sh" 2>/dev/null || true
echo "🔐 Génération automatique des secrets/tokens (si placeholder)"
"$INSTALL_DIR/server/bootstrap_secrets_env.sh" /etc/rpa_vision_v3/rpa_vision_v3.env
"$INSTALL_DIR/server/validate_secrets.sh" /etc/rpa_vision_v3/rpa_vision_v3.env
# 5) systemd units
echo "📝 Installation unités systemd"
cp "$INSTALL_DIR"/deploy/systemd/*.service /etc/systemd/system/
cp "$INSTALL_DIR"/deploy/systemd/*.timer /etc/systemd/system/
# 6) Exécutables
chmod +x "$INSTALL_DIR/server/healthcheck.sh" || true
# 7) logrotate (optionnel)
if [[ -d /etc/logrotate.d ]]; then
cp "$INSTALL_DIR/deploy/logrotate/rpa-vision-v3" /etc/logrotate.d/rpa-vision-v3
fi
systemctl daemon-reload
# 8) Enable
systemctl enable rpa-vision-v3-api.service
systemctl enable rpa-vision-v3-dashboard.service
systemctl enable rpa-vision-v3-healthcheck.timer
systemctl enable rpa-vision-v3-artifact-retention.timer
# Worker external : on enable mais l'API doit être configurée en external (env)
systemctl enable rpa-vision-v3-worker.service || true
echo ""
echo "✅ Install terminé. Prochaines étapes :"
echo "1) Vérifier (et si besoin ajuster) : nano /etc/rpa_vision_v3/rpa_vision_v3.env"
echo " - Tokens générés automatiquement (RPA_TOKEN_*, AUTOHEAL_ADMIN_TOKEN)"
echo " - Secrets (ENCRYPTION_PASSWORD, SECRET_KEY)"
echo "2) Choisir le mode worker : RPA_PROCESSING_WORKER=thread|external|disabled"
echo "3) Démarrer :"
echo " systemctl start rpa-vision-v3-api rpa-vision-v3-dashboard rpa-vision-v3-worker"
echo "4) Vérifier :"
echo " systemctl status rpa-vision-v3-api rpa-vision-v3-dashboard rpa-vision-v3-worker"
echo " journalctl -u rpa-vision-v3-api -f"
echo ""

68
server/install_service.sh Executable file
View File

@@ -0,0 +1,68 @@
#!/bin/bash
# install_service.sh
# Installe le service systemd pour RPA Vision V3 API
set -e
echo "========================================"
echo "Installation Service RPA Vision V3 API"
echo "========================================"
# Vérifier root
if [ "$EUID" -ne 0 ]; then
echo "❌ Ce script doit être exécuté en root (sudo)"
exit 1
fi
# Variables
INSTALL_DIR="/opt/rpa_vision_v3"
SERVICE_USER="rpa"
SERVICE_FILE="rpa-vision-api.service"
# Créer utilisateur si nécessaire
if ! id "$SERVICE_USER" &>/dev/null; then
echo "📝 Création utilisateur $SERVICE_USER..."
useradd --system --no-create-home --shell /bin/false $SERVICE_USER
fi
# Vérifier que le projet existe
if [ ! -d "$INSTALL_DIR" ]; then
echo "❌ Répertoire $INSTALL_DIR non trouvé"
echo " Copiez le projet dans $INSTALL_DIR ou modifiez le chemin dans le service"
exit 1
fi
# Créer répertoires data
mkdir -p "$INSTALL_DIR/data/training/uploads"
mkdir -p "$INSTALL_DIR/data/training/sessions"
mkdir -p "$INSTALL_DIR/logs"
# Permissions
chown -R $SERVICE_USER:$SERVICE_USER "$INSTALL_DIR/data"
chown -R $SERVICE_USER:$SERVICE_USER "$INSTALL_DIR/logs"
# Copier le fichier service
echo "📝 Installation du service systemd..."
cp "$INSTALL_DIR/server/$SERVICE_FILE" /etc/systemd/system/
# Recharger systemd
systemctl daemon-reload
# Activer le service au démarrage
systemctl enable rpa-vision-api
echo ""
echo "✅ Service installé!"
echo ""
echo "⚠️ IMPORTANT: Configurez le mot de passe de chiffrement:"
echo " sudo systemctl edit rpa-vision-api"
echo " Ajoutez:"
echo " [Service]"
echo " Environment=\"ENCRYPTION_PASSWORD=VotreCléSecrète\""
echo ""
echo "📋 Commandes utiles:"
echo " sudo systemctl start rpa-vision-api # Démarrer"
echo " sudo systemctl stop rpa-vision-api # Arrêter"
echo " sudo systemctl status rpa-vision-api # Status"
echo " sudo journalctl -u rpa-vision-api -f # Logs en temps réel"
echo ""

427
server/nginx_https_setup.md Normal file
View File

@@ -0,0 +1,427 @@
# Configuration HTTPS Production - RPA Vision V3
**Guide complet pour sécuriser l'API et le Dashboard avec HTTPS**
## 📋 Prérequis
- Serveur Linux (Ubuntu/Debian recommandé)
- Nom de domaine pointant vers votre serveur
- Ports 80 et 443 ouverts dans le firewall
- Accès root/sudo
## 🔧 Installation
### 1. Installer Nginx
```bash
sudo apt update
sudo apt install nginx -y
```
### 2. Installer Certbot (Let's Encrypt)
```bash
sudo apt install certbot python3-certbot-nginx -y
```
## 🌐 Configuration Nginx
### 1. Créer la configuration pour l'API
**Fichier:** `/etc/nginx/sites-available/rpa-api`
```nginx
# Configuration API Upload
server {
listen 80;
server_name api.votre-domaine.com;
# Redirection HTTP → HTTPS (sera ajoutée par certbot)
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Augmenter la taille max pour les uploads
client_max_body_size 100M;
client_body_timeout 300s;
# Timeouts
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
}
}
```
### 2. Créer la configuration pour le Dashboard
**Fichier:** `/etc/nginx/sites-available/rpa-dashboard`
```nginx
# Configuration Dashboard Web
server {
listen 80;
server_name dashboard.votre-domaine.com;
# Redirection HTTP → HTTPS (sera ajoutée par certbot)
location / {
proxy_pass http://127.0.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support (si nécessaire)
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
```
### 3. Activer les configurations
```bash
# Créer les liens symboliques
sudo ln -s /etc/nginx/sites-available/rpa-api /etc/nginx/sites-enabled/
sudo ln -s /etc/nginx/sites-available/rpa-dashboard /etc/nginx/sites-enabled/
# Tester la configuration
sudo nginx -t
# Recharger Nginx
sudo systemctl reload nginx
```
## 🔒 Obtenir les Certificats SSL
### 1. Certificat pour l'API
```bash
sudo certbot --nginx -d api.votre-domaine.com
```
**Questions interactives:**
- Email: votre@email.com
- Accepter les termes: Yes
- Partager email: No (optionnel)
- Redirection HTTPS: Yes (recommandé)
### 2. Certificat pour le Dashboard
```bash
sudo certbot --nginx -d dashboard.votre-domaine.com
```
### 3. Vérifier les certificats
```bash
sudo certbot certificates
```
**Sortie attendue:**
```
Found the following certs:
Certificate Name: api.votre-domaine.com
Domains: api.votre-domaine.com
Expiry Date: 2026-02-23 (VALID: 89 days)
Certificate Name: dashboard.votre-domaine.com
Domains: dashboard.votre-domaine.com
Expiry Date: 2026-02-23 (VALID: 89 days)
```
## 🚀 Démarrer les Services
### 1. Créer le service systemd pour l'API
**Fichier:** `/etc/systemd/system/rpa-api.service`
```ini
[Unit]
Description=RPA Vision V3 - API Upload
After=network.target
[Service]
Type=simple
User=rpa
WorkingDirectory=/home/rpa/rpa_vision_v3/server
Environment="ENCRYPTION_PASSWORD=VotreCléSecrète2025"
Environment="PATH=/home/rpa/rpa_vision_v3/venv_v3/bin:/usr/local/bin:/usr/bin:/bin"
ExecStart=/home/rpa/rpa_vision_v3/venv_v3/bin/uvicorn api_upload:app --host 127.0.0.1 --port 8000
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
```
### 2. Créer le service systemd pour le Dashboard
**Fichier:** `/etc/systemd/system/rpa-dashboard.service`
```ini
[Unit]
Description=RPA Vision V3 - Dashboard Web
After=network.target
[Service]
Type=simple
User=rpa
WorkingDirectory=/home/rpa/rpa_vision_v3/web_dashboard
Environment="PATH=/home/rpa/rpa_vision_v3/venv_v3/bin:/usr/local/bin:/usr/bin:/bin"
ExecStart=/home/rpa/rpa_vision_v3/venv_v3/bin/python app.py
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
```
### 3. Activer et démarrer les services
```bash
# Recharger systemd
sudo systemctl daemon-reload
# Activer au démarrage
sudo systemctl enable rpa-api
sudo systemctl enable rpa-dashboard
# Démarrer les services
sudo systemctl start rpa-api
sudo systemctl start rpa-dashboard
# Vérifier le statut
sudo systemctl status rpa-api
sudo systemctl status rpa-dashboard
```
## 🔥 Configuration Firewall
```bash
# Autoriser HTTP et HTTPS
sudo ufw allow 80/tcp
sudo ufw allow 443/tcp
# Bloquer l'accès direct aux ports internes
sudo ufw deny 8000/tcp
sudo ufw deny 5001/tcp
# Activer le firewall
sudo ufw enable
# Vérifier
sudo ufw status
```
## ✅ Vérification
### 1. Tester l'API
```bash
# HTTP (devrait rediriger vers HTTPS)
curl -I http://api.votre-domaine.com/api/traces/status
# HTTPS
curl https://api.votre-domaine.com/api/traces/status
```
**Réponse attendue:**
```json
{
"status": "online",
"version": "1.0.0",
"encryption_enabled": true
}
```
### 2. Tester le Dashboard
Ouvrir dans un navigateur:
```
https://dashboard.votre-domaine.com
```
### 3. Tester l'upload depuis l'agent
**Configurer l'agent:**
```json
{
"server_url": "https://api.votre-domaine.com/api/traces/upload",
"encryption_password": "VotreCléSecrète2025"
}
```
## 🔄 Renouvellement Automatique
Certbot configure automatiquement le renouvellement. Vérifier:
```bash
# Tester le renouvellement (dry-run)
sudo certbot renew --dry-run
# Voir le timer systemd
sudo systemctl list-timers | grep certbot
```
**Le renouvellement se fait automatiquement tous les 60 jours.**
## 📊 Monitoring
### 1. Logs Nginx
```bash
# Logs d'accès
sudo tail -f /var/log/nginx/access.log
# Logs d'erreur
sudo tail -f /var/log/nginx/error.log
```
### 2. Logs Services
```bash
# API
sudo journalctl -u rpa-api -f
# Dashboard
sudo journalctl -u rpa-dashboard -f
```
### 3. Vérifier les certificats
```bash
# Expiration
sudo certbot certificates
# Tester SSL
curl -vI https://api.votre-domaine.com 2>&1 | grep -i ssl
```
## 🔒 Sécurité Avancée
### 1. Améliorer la configuration SSL
**Ajouter dans les blocs `server` Nginx:**
```nginx
# SSL Configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Security Headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
```
### 2. Limiter les tentatives de connexion
```nginx
# Dans le bloc http de /etc/nginx/nginx.conf
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
# Dans le bloc location de l'API
location /api/traces/upload {
limit_req zone=api_limit burst=20 nodelay;
# ... reste de la config
}
```
### 3. Authentification basique (optionnel)
```bash
# Créer un fichier de mots de passe
sudo apt install apache2-utils
sudo htpasswd -c /etc/nginx/.htpasswd admin
# Ajouter dans la config Nginx
auth_basic "RPA Vision V3";
auth_basic_user_file /etc/nginx/.htpasswd;
```
## 🚨 Troubleshooting
### Problème: Certificat non obtenu
```bash
# Vérifier DNS
nslookup api.votre-domaine.com
# Vérifier port 80 accessible
sudo netstat -tlnp | grep :80
# Logs certbot
sudo tail -f /var/log/letsencrypt/letsencrypt.log
```
### Problème: 502 Bad Gateway
```bash
# Vérifier que les services tournent
sudo systemctl status rpa-api
sudo systemctl status rpa-dashboard
# Vérifier les ports
sudo netstat -tlnp | grep -E '8000|5001'
# Logs Nginx
sudo tail -f /var/log/nginx/error.log
```
### Problème: Upload échoue
```bash
# Vérifier taille max
grep client_max_body_size /etc/nginx/sites-available/rpa-api
# Augmenter si nécessaire
client_max_body_size 200M;
# Recharger Nginx
sudo systemctl reload nginx
```
## 📝 Checklist Déploiement
- [ ] Nom de domaine configuré (DNS A record)
- [ ] Nginx installé et configuré
- [ ] Certificats SSL obtenus (Let's Encrypt)
- [ ] Services systemd créés et démarrés
- [ ] Firewall configuré (80, 443 ouverts)
- [ ] Test API: `curl https://api.votre-domaine.com/api/traces/status`
- [ ] Test Dashboard: Ouvrir dans navigateur
- [ ] Test upload depuis agent
- [ ] Renouvellement auto vérifié: `sudo certbot renew --dry-run`
- [ ] Monitoring configuré (logs)
## 🎯 Résultat Final
**Après configuration:**
- ✅ API accessible via: `https://api.votre-domaine.com`
- ✅ Dashboard accessible via: `https://dashboard.votre-domaine.com`
- ✅ Certificats SSL valides (Let's Encrypt)
- ✅ Renouvellement automatique
- ✅ Firewall configuré
- ✅ Services auto-start au boot
**Les agents peuvent maintenant uploader en HTTPS sécurisé!** 🔒
---
**Besoin d'aide?**
- Logs Nginx: `/var/log/nginx/`
- Logs Certbot: `/var/log/letsencrypt/`
- Logs Services: `sudo journalctl -u rpa-api`

237
server/processing_queue.py Normal file
View File

@@ -0,0 +1,237 @@
#!/usr/bin/env python3
"""
Queue persistante pour le traitement des sessions.
Gère une file d'attente de sessions à traiter avec:
- Persistance sur disque (survit aux redémarrages)
- Worker en arrière-plan
- Retry automatique en cas d'échec
- Status de traitement
"""
import json
import logging
import os
import threading
import time
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Optional
from enum import Enum
logger = logging.getLogger("processing_queue")
class ProcessingStatus(str, Enum):
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
class ProcessingQueue:
"""
Queue persistante pour le traitement des sessions.
"""
def __init__(self, queue_file: str = "data/training/.processing_queue.json"):
self.queue_file = Path(queue_file)
self.queue_file.parent.mkdir(parents=True, exist_ok=True)
self._lock = threading.Lock()
self._worker_thread: Optional[threading.Thread] = None
self._running = False
def add(self, session_id: str, base_path: str = "data/training") -> None:
"""Ajoute une session à la queue de traitement."""
with self._lock:
queue = self._load()
# Éviter les doublons
if any(item["session_id"] == session_id for item in queue):
logger.info(f"Session {session_id} déjà dans la queue")
return
queue.append({
"session_id": session_id,
"base_path": base_path,
"status": ProcessingStatus.PENDING,
"added_at": datetime.now().isoformat(),
"attempts": 0,
"last_error": None
})
self._save(queue)
logger.info(f"Session {session_id} ajoutée à la queue")
def get_status(self, session_id: str) -> Optional[Dict]:
"""Retourne le status d'une session."""
with self._lock:
queue = self._load()
for item in queue:
if item["session_id"] == session_id:
return item
return None
def get_all(self) -> List[Dict]:
"""Retourne toutes les sessions dans la queue."""
with self._lock:
return self._load()
def get_pending_count(self) -> int:
"""Retourne le nombre de sessions en attente."""
with self._lock:
queue = self._load()
return sum(1 for item in queue if item["status"] == ProcessingStatus.PENDING)
def start_worker(self, process_func) -> None:
"""
Démarre le worker de traitement en arrière-plan.
Args:
process_func: Fonction de traitement (session_id, base_path) -> dict
"""
if self._running:
logger.warning("Worker déjà en cours d'exécution")
return
self._running = True
self._worker_thread = threading.Thread(
target=self._worker_loop,
args=(process_func,),
name="ProcessingWorker",
daemon=False # Non-daemon pour finir le traitement en cours
)
self._worker_thread.start()
logger.info("Worker de traitement démarré")
def stop_worker(self, wait: bool = True) -> None:
"""Arrête le worker de traitement."""
self._running = False
if wait and self._worker_thread:
self._worker_thread.join(timeout=30)
logger.info("Worker de traitement arrêté")
def _worker_loop(self, process_func) -> None:
"""Boucle principale du worker."""
while self._running:
try:
# Chercher une session à traiter
session = self._get_next_pending()
if session:
session_id = session["session_id"]
base_path = session["base_path"]
logger.info(f"Traitement de la session: {session_id}")
self._update_status(session_id, ProcessingStatus.PROCESSING)
try:
# Traiter la session
result = process_func(session_id, base_path)
if result.get("status") == "success":
self._update_status(session_id, ProcessingStatus.COMPLETED)
logger.info(f"Session {session_id} traitée avec succès")
else:
error = result.get("errors", ["Unknown error"])
self._mark_failed(session_id, str(error))
logger.error(f"Échec traitement session {session_id}: {error}")
except Exception as e:
self._mark_failed(session_id, str(e))
logger.exception(f"Erreur traitement session {session_id}: {e}")
else:
# Pas de session à traiter, attendre
time.sleep(5)
except Exception as e:
logger.exception(f"Erreur dans le worker: {e}")
time.sleep(10)
def _get_next_pending(self) -> Optional[Dict]:
"""Retourne la prochaine session à traiter."""
with self._lock:
queue = self._load()
for item in queue:
if item["status"] == ProcessingStatus.PENDING:
return item
return None
def _update_status(self, session_id: str, status: ProcessingStatus) -> None:
"""Met à jour le status d'une session."""
with self._lock:
queue = self._load()
for item in queue:
if item["session_id"] == session_id:
item["status"] = status
item["updated_at"] = datetime.now().isoformat()
break
self._save(queue)
def _mark_failed(self, session_id: str, error: str) -> None:
"""Marque une session comme échouée."""
with self._lock:
queue = self._load()
for item in queue:
if item["session_id"] == session_id:
item["attempts"] = item.get("attempts", 0) + 1
item["last_error"] = error
item["updated_at"] = datetime.now().isoformat()
# Après 3 tentatives, marquer comme failed définitivement
if item["attempts"] >= 3:
item["status"] = ProcessingStatus.FAILED
else:
item["status"] = ProcessingStatus.PENDING # Retry
break
self._save(queue)
def _load(self) -> List[Dict]:
"""Charge la queue depuis le fichier."""
if self.queue_file.exists():
try:
with open(self.queue_file, "r") as f:
return json.load(f)
except Exception:
pass
return []
def _save(self, queue: List[Dict]) -> None:
"""Sauvegarde la queue dans le fichier."""
try:
with open(self.queue_file, "w") as f:
json.dump(queue, f, indent=2, default=str)
except Exception as e:
logger.error(f"Erreur sauvegarde queue: {e}")
# Instance globale
_queue: Optional[ProcessingQueue] = None
def get_queue() -> ProcessingQueue:
"""Retourne l'instance globale de la queue."""
global _queue
if _queue is None:
_queue = ProcessingQueue()
return _queue
def add_to_queue(session_id: str, base_path: str = "data/training") -> None:
"""Ajoute une session à la queue de traitement."""
get_queue().add(session_id, base_path)
def get_processing_status(session_id: str) -> Optional[Dict]:
"""Retourne le status de traitement d'une session."""
return get_queue().get_status(session_id)
def start_processing_worker(process_func) -> None:
"""Démarre le worker de traitement."""
get_queue().start_worker(process_func)
def stop_processing_worker() -> None:
"""Arrête le worker de traitement."""
get_queue().stop_worker()

View File

@@ -0,0 +1,10 @@
# Requirements pour le serveur API
fastapi>=0.115.0
uvicorn[standard]>=0.30.0
python-multipart>=0.0.6
cryptography>=41.0.0
# Dépendances RPA Vision V3 (déjà installées normalement)
# numpy>=1.24.0
# pillow>=10.0.0

View File

@@ -0,0 +1,27 @@
[Unit]
Description=RPA Vision V3 - API Upload Server
After=network.target
Wants=network-online.target
[Service]
Type=simple
User=rpa
Group=rpa
WorkingDirectory=/opt/rpa_vision_v3/server
Environment="PATH=/opt/rpa_vision_v3/venv_v3/bin:/usr/bin"
Environment="ENCRYPTION_PASSWORD=CHANGE_ME_IN_PRODUCTION"
Environment="ENVIRONMENT=production"
ExecStart=/opt/rpa_vision_v3/venv_v3/bin/python api_upload.py
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
# Sécurité
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/opt/rpa_vision_v3/data
[Install]
WantedBy=multi-user.target

201
server/setup_production.sh Executable file
View File

@@ -0,0 +1,201 @@
#!/bin/bash
# setup_production.sh
# Script d'installation automatique pour la production
set -e
echo "========================================"
echo "RPA Vision V3 - Setup Production"
echo "========================================"
echo ""
# Vérifier root
if [ "$EUID" -ne 0 ]; then
echo "⚠️ Ce script doit être exécuté en tant que root"
echo " Utilisez: sudo ./setup_production.sh"
exit 1
fi
# Variables
read -p "Nom de domaine pour l'API (ex: api.votre-domaine.com): " API_DOMAIN
read -p "Nom de domaine pour le Dashboard (ex: dashboard.votre-domaine.com): " DASHBOARD_DOMAIN
read -p "Email pour Let's Encrypt: " LETSENCRYPT_EMAIL
read -sp "Password de chiffrement: " ENCRYPTION_PASSWORD
echo ""
# Vérifier les domaines
echo ""
echo "Vérification DNS..."
if ! nslookup "$API_DOMAIN" > /dev/null 2>&1; then
echo "⚠️ ATTENTION: $API_DOMAIN ne résout pas correctement"
read -p "Continuer quand même? (y/N): " CONTINUE
if [ "$CONTINUE" != "y" ]; then
exit 1
fi
fi
# 1. Installer Nginx
echo ""
echo "📦 Installation de Nginx..."
apt update
apt install -y nginx
# 2. Installer Certbot
echo ""
echo "🔒 Installation de Certbot..."
apt install -y certbot python3-certbot-nginx
# 3. Créer configuration Nginx pour l'API
echo ""
echo "⚙️ Configuration Nginx pour l'API..."
cat > /etc/nginx/sites-available/rpa-api << EOF
server {
listen 80;
server_name $API_DOMAIN;
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
client_max_body_size 100M;
client_body_timeout 300s;
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
}
}
EOF
# 4. Créer configuration Nginx pour le Dashboard
echo ""
echo "⚙️ Configuration Nginx pour le Dashboard..."
cat > /etc/nginx/sites-available/rpa-dashboard << EOF
server {
listen 80;
server_name $DASHBOARD_DOMAIN;
location / {
proxy_pass http://127.0.0.1:5001;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
}
}
EOF
# 5. Activer les sites
ln -sf /etc/nginx/sites-available/rpa-api /etc/nginx/sites-enabled/
ln -sf /etc/nginx/sites-available/rpa-dashboard /etc/nginx/sites-enabled/
# Tester la config
nginx -t
# Recharger Nginx
systemctl reload nginx
# 6. Obtenir les certificats SSL
echo ""
echo "🔒 Obtention des certificats SSL..."
certbot --nginx -d "$API_DOMAIN" --non-interactive --agree-tos --email "$LETSENCRYPT_EMAIL" --redirect
certbot --nginx -d "$DASHBOARD_DOMAIN" --non-interactive --agree-tos --email "$LETSENCRYPT_EMAIL" --redirect
# 7. Créer le service systemd pour l'API
echo ""
echo "⚙️ Configuration service API..."
# Détecter le chemin du projet
PROJECT_DIR=$(dirname $(dirname $(readlink -f "$0")))
VENV_DIR="$PROJECT_DIR/venv_v3"
cat > /etc/systemd/system/rpa-api.service << EOF
[Unit]
Description=RPA Vision V3 - API Upload
After=network.target
[Service]
Type=simple
User=$SUDO_USER
WorkingDirectory=$PROJECT_DIR/server
Environment="ENCRYPTION_PASSWORD=$ENCRYPTION_PASSWORD"
Environment="PATH=$VENV_DIR/bin:/usr/local/bin:/usr/bin:/bin"
ExecStart=$VENV_DIR/bin/uvicorn api_upload:app --host 127.0.0.1 --port 8000
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# 8. Créer le service systemd pour le Dashboard
echo ""
echo "⚙️ Configuration service Dashboard..."
cat > /etc/systemd/system/rpa-dashboard.service << EOF
[Unit]
Description=RPA Vision V3 - Dashboard Web
After=network.target
[Service]
Type=simple
User=$SUDO_USER
WorkingDirectory=$PROJECT_DIR/web_dashboard
Environment="PATH=$VENV_DIR/bin:/usr/local/bin:/usr/bin:/bin"
ExecStart=$VENV_DIR/bin/python app.py
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# 9. Activer et démarrer les services
echo ""
echo "🚀 Démarrage des services..."
systemctl daemon-reload
systemctl enable rpa-api
systemctl enable rpa-dashboard
systemctl start rpa-api
systemctl start rpa-dashboard
# 10. Configurer le firewall
echo ""
echo "🔥 Configuration firewall..."
ufw allow 80/tcp
ufw allow 443/tcp
ufw deny 8000/tcp
ufw deny 5001/tcp
echo "y" | ufw enable
# 11. Vérifier tout
echo ""
echo "========================================"
echo "✅ Installation terminée!"
echo "========================================"
echo ""
echo "📊 Statut des services:"
systemctl status rpa-api --no-pager | head -5
systemctl status rpa-dashboard --no-pager | head -5
echo ""
echo "🔒 Certificats SSL:"
certbot certificates | grep -E "Certificate Name|Domains|Expiry"
echo ""
echo "🌐 URLs:"
echo " API: https://$API_DOMAIN"
echo " Dashboard: https://$DASHBOARD_DOMAIN"
echo ""
echo "🔑 Password chiffrement: ****** (configuré)"
echo ""
echo "📝 Commandes utiles:"
echo " sudo systemctl status rpa-api"
echo " sudo systemctl status rpa-dashboard"
echo " sudo journalctl -u rpa-api -f"
echo " sudo certbot renew --dry-run"
echo ""
echo "✅ Prêt pour la production!"

128
server/start_all.sh Executable file
View File

@@ -0,0 +1,128 @@
#!/bin/bash
# start_all.sh
# Démarre l'API et le Dashboard en mode développement
set -e
echo "========================================"
echo "RPA Vision V3 - Démarrage Complet"
echo "========================================"
echo ""
# Détecter le répertoire du projet
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
echo "📁 Répertoire projet: $PROJECT_DIR"
echo ""
# Vérifier l'environnement virtuel
if [ ! -d "$PROJECT_DIR/venv_v3" ]; then
echo "❌ Environnement virtuel non trouvé: $PROJECT_DIR/venv_v3"
echo " Créez-le avec: python3 -m venv venv_v3"
exit 1
fi
# Activer l'environnement virtuel
source "$PROJECT_DIR/venv_v3/bin/activate"
# Fiche #23 - Anti-oubli tokens (DEV)
chmod +x "$SCRIPT_DIR/bootstrap_local_env.sh" 2>/dev/null || true
"$SCRIPT_DIR/bootstrap_local_env.sh" >/dev/null || true
# Vérifier les dépendances
echo "📦 Vérification des dépendances..."
# Installer les dépendances serveur si nécessaire
if ! python -c "import fastapi" 2>/dev/null; then
echo "Installation des dépendances serveur..."
pip install -r "$SCRIPT_DIR/requirements_server.txt"
fi
if ! python -c "import flask" 2>/dev/null; then
echo "Installation des dépendances dashboard..."
pip install -r "$PROJECT_DIR/web_dashboard/requirements.txt"
fi
echo "✅ Dépendances OK"
echo ""
# Créer les répertoires nécessaires
mkdir -p "$PROJECT_DIR/data/training/uploads"
mkdir -p "$PROJECT_DIR/data/training/sessions"
mkdir -p "$PROJECT_DIR/logs"
# Configuration (bootstrap_local_env.sh charge déjà les variables)
export ENCRYPTION_PASSWORD="${ENCRYPTION_PASSWORD:-rpa_vision_v3_default_key}"
# Fonction pour arrêter proprement
cleanup() {
echo ""
echo "🛑 Arrêt des services..."
kill $API_PID 2>/dev/null || true
kill $DASHBOARD_PID 2>/dev/null || true
exit 0
}
trap cleanup SIGINT SIGTERM
# Démarrer l'API
echo "🚀 Démarrage de l'API (port 8000)..."
cd "$SCRIPT_DIR"
python api_upload.py > "$PROJECT_DIR/logs/api.log" 2>&1 &
API_PID=$!
# Attendre que l'API démarre
sleep 2
if ! kill -0 $API_PID 2>/dev/null; then
echo "❌ Erreur démarrage API"
cat "$PROJECT_DIR/logs/api.log"
exit 1
fi
echo "✅ API démarrée (PID: $API_PID)"
echo ""
# Démarrer le Dashboard
echo "🚀 Démarrage du Dashboard (port 5001)..."
cd "$PROJECT_DIR/web_dashboard"
python app.py > "$PROJECT_DIR/logs/dashboard.log" 2>&1 &
DASHBOARD_PID=$!
# Attendre que le Dashboard démarre
sleep 2
if ! kill -0 $DASHBOARD_PID 2>/dev/null; then
echo "❌ Erreur démarrage Dashboard"
cat "$PROJECT_DIR/logs/dashboard.log"
kill $API_PID 2>/dev/null || true
exit 1
fi
echo "✅ Dashboard démarré (PID: $DASHBOARD_PID)"
echo ""
# Afficher les informations
echo "========================================"
echo "✅ Services démarrés!"
echo "========================================"
echo ""
echo "🌐 URLs:"
echo " API: http://localhost:8000"
echo " Dashboard: http://localhost:5001/?token=${RPA_TOKEN_READONLY:-}"
echo ""
echo "📝 Logs:"
echo " API: tail -f $PROJECT_DIR/logs/api.log"
echo " Dashboard: tail -f $PROJECT_DIR/logs/dashboard.log"
echo ""
echo "🔑 Encryption password: ${ENCRYPTION_PASSWORD:0:3}***"
echo ""
echo "📊 Test API:"
echo " curl -H \"Authorization: Bearer ${RPA_TOKEN_READONLY:-<read_ONLY_TOKEN>}\" http://localhost:8000/api/traces/status"
echo ""
echo "🛑 Appuyez sur Ctrl+C pour arrêter"
echo ""
# Attendre
wait

47
server/start_server.sh Executable file
View File

@@ -0,0 +1,47 @@
#!/bin/bash
# start_server.sh
# Démarre le serveur API pour recevoir les uploads
set -e
echo "========================================"
echo "RPA Vision V3 - Serveur API Upload"
echo "========================================"
# Fiche #23 - Anti-oubli tokens (DEV)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
chmod +x "$SCRIPT_DIR/bootstrap_local_env.sh" 2>/dev/null || true
"$SCRIPT_DIR/bootstrap_local_env.sh" >/dev/null || true
# Vérifier Python
if ! command -v python3 &> /dev/null; then
echo "ERREUR: Python 3 n'est pas installé"
exit 1
fi
# Installer dépendances si nécessaire
if ! python3 -c "import fastapi" 2>/dev/null; then
echo "Installation des dépendances..."
pip install -r requirements_server.txt
fi
# NB: bootstrap_local_env.sh a déjà créé/chargé les secrets/tokens si besoin.
# Démarrer le serveur
echo "Démarrage du serveur sur http://0.0.0.0:8000"
echo ""
echo "Endpoints disponibles:"
echo " POST http://localhost:8000/api/traces/upload"
echo " GET http://localhost:8000/api/traces/status"
echo " GET http://localhost:8000/api/traces/sessions"
echo ""
echo "Auth (Fiche #23):"
echo " Header: Authorization: Bearer <token>"
echo " Read-only token: ${RPA_TOKEN_READONLY:-<READ_ONLY_TOKEN>}"
echo " Dashboard: http://localhost:5001/?token=${RPA_TOKEN_READONLY:-}"
echo ""
echo "Appuyez sur Ctrl+C pour arrêter"
echo ""
python3 api_upload.py

124
server/storage_encrypted.py Normal file
View File

@@ -0,0 +1,124 @@
#!/usr/bin/env python3
"""
Module de déchiffrement pour le serveur API.
Copie des fonctions de déchiffrement depuis agent_v0/storage_encrypted.py
sans les dépendances sur config et raw_session.
"""
import os
import logging
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
logger = logging.getLogger("server.storage_encrypted")
def decrypt_session_file(
encrypted_path: str,
password: str,
output_path: str | None = None
) -> str:
"""
Déchiffre un fichier .enc et restaure le ZIP original.
Args:
encrypted_path: Chemin du fichier .enc
password: Mot de passe de déchiffrement
output_path: Chemin de sortie (défaut: même nom avec .zip)
Returns:
Chemin du ZIP déchiffré
Note: Version serveur sans dépendances agent_v0
"""
if output_path is None:
output_path = encrypted_path.replace('.enc', '.zip')
try:
# Lire le fichier chiffré
with open(encrypted_path, 'rb') as f:
salt = f.read(16)
iv = f.read(16)
ciphertext = f.read()
if len(salt) != 16 or len(iv) != 16:
raise ValueError("Fichier chiffré corrompu: salt ou IV manquant")
# Dériver la clé depuis le password
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = kdf.derive(password.encode('utf-8'))
# Créer cipher pour déchiffrement
cipher = Cipher(
algorithms.AES(key),
modes.CBC(iv),
backend=default_backend()
)
decryptor = cipher.decryptor()
# Déchiffrer
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
# Retirer padding PKCS7
if len(plaintext) == 0:
raise ValueError("Données déchiffrées vides")
padding_length = plaintext[-1]
if padding_length > 16 or padding_length == 0:
raise ValueError(f"Padding invalide: {padding_length}")
# Vérifier que tous les bytes de padding sont identiques
for i in range(padding_length):
if plaintext[-(i+1)] != padding_length:
raise ValueError("Padding PKCS7 invalide")
plaintext = plaintext[:-padding_length]
# Écrire le ZIP déchiffré
with open(output_path, 'wb') as f:
f.write(plaintext)
logger.info(f"Fichier déchiffré: {output_path}")
return os.path.abspath(output_path)
except Exception as e:
logger.exception(f"Erreur lors du déchiffrement: {e}")
raise
def test_decryption_password(password: str) -> bool:
"""
Test rapide pour vérifier qu'un mot de passe de déchiffrement fonctionne.
Args:
password: Le mot de passe à tester
Returns:
True si le mot de passe semble valide
"""
if not password:
return False
# Test basique: le mot de passe doit être une chaîne non vide
if len(password.strip()) == 0:
return False
# En production, on pourrait faire un test plus sophistiqué
# avec un fichier de test connu
return True
if __name__ == "__main__":
# Test rapide
print("Module de déchiffrement serveur chargé")
print(f"Test password 'test': {test_decryption_password('test')}")
print(f"Test password '': {test_decryption_password('')}")

View File

80
server/validate_secrets.sh Executable file
View File

@@ -0,0 +1,80 @@
#!/usr/bin/env bash
# server/validate_secrets.sh
#
# Vérifie que les secrets/tokens nécessaires sont bien renseignés.
# Retour !=0 -> permet de bloquer un démarrage "silencieux" en prod.
#
# Usage:
# ./server/validate_secrets.sh /etc/rpa_vision_v3/rpa_vision_v3.env
set -euo pipefail
ENV_FILE="${1:-/etc/rpa_vision_v3/rpa_vision_v3.env}"
if [[ ! -f "$ENV_FILE" ]]; then
echo "❌ Env file introuvable: $ENV_FILE" >&2
exit 1
fi
_get() {
local key="$1"
grep -E "^${key}=" "$ENV_FILE" 2>/dev/null | head -n 1 | cut -d'=' -f2- || true
}
_is_placeholder() {
local v="$1"
[[ -z "$v" ]] && return 0
[[ "$v" == "CHANGE_ME" ]] && return 0
[[ "$v" == CHANGE_ME_* ]] && return 0
[[ "$v" == "rpa_vision_v3_default_key" ]] && return 0
return 1
}
ENVIRONMENT_VAL="$(_get ENVIRONMENT)"
AUTH_REQUIRED_VAL="$(_get RPA_AUTH_REQUIRED)"
AUTH_REQUIRED=false
if [[ "${ENVIRONMENT_VAL}" == "production" ]]; then
AUTH_REQUIRED=true
fi
if [[ "${AUTH_REQUIRED_VAL,,}" == "true" ]]; then
AUTH_REQUIRED=true
fi
MISSING=0
_require_key() {
local key="$1"
local v
v="$(_get "$key")"
if _is_placeholder "$v"; then
echo "❌ Secret manquant ou placeholder: $key" >&2
MISSING=1
fi
}
echo "🔎 Validation secrets: $ENV_FILE"
# Toujours requis (prod)
_require_key "ENCRYPTION_PASSWORD"
_require_key "SECRET_KEY"
if [[ "$AUTH_REQUIRED" == "true" ]]; then
_require_key "RPA_TOKEN_ADMIN"
_require_key "RPA_TOKEN_READONLY"
_require_key "AUTOHEAL_ADMIN_TOKEN"
fi
if [[ "$MISSING" -ne 0 ]]; then
cat >&2 <<'EOF'
👉 Correctif rapide:
sudo ./server/bootstrap_secrets_env.sh /etc/rpa_vision_v3/rpa_vision_v3.env
Puis redémarre:
sudo systemctl restart rpa-vision-v3-api rpa-vision-v3-dashboard rpa-vision-v3-worker
EOF
exit 1
fi
echo "✅ Secrets OK"

272
server/verify_installation.sh Executable file
View File

@@ -0,0 +1,272 @@
#!/bin/bash
# verify_installation.sh
# Vérifie que tous les composants serveur sont correctement installés
set -e
echo "========================================"
echo "RPA Vision V3 - Vérification Serveur"
echo "========================================"
echo ""
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
ERRORS=0
WARNINGS=0
# Fonction pour afficher les résultats
check_ok() {
echo "$1"
}
check_warning() {
echo "⚠️ $1"
((WARNINGS++))
}
check_error() {
echo "$1"
((ERRORS++))
}
# 1. Vérifier l'environnement virtuel
echo "📦 Vérification environnement virtuel..."
if [ -d "$PROJECT_DIR/venv_v3" ]; then
check_ok "Environnement virtuel trouvé"
source "$PROJECT_DIR/venv_v3/bin/activate"
else
check_error "Environnement virtuel non trouvé: $PROJECT_DIR/venv_v3"
fi
echo ""
# 2. Vérifier les dépendances Python
echo "🐍 Vérification dépendances Python..."
# FastAPI
if python -c "import fastapi" 2>/dev/null; then
check_ok "FastAPI installé"
else
check_error "FastAPI non installé (pip install fastapi)"
fi
# Uvicorn
if python -c "import uvicorn" 2>/dev/null; then
check_ok "Uvicorn installé"
else
check_error "Uvicorn non installé (pip install uvicorn)"
fi
# Flask
if python -c "import flask" 2>/dev/null; then
check_ok "Flask installé"
else
check_error "Flask non installé (pip install flask)"
fi
# Cryptography
if python -c "import cryptography" 2>/dev/null; then
check_ok "Cryptography installé"
else
check_error "Cryptography non installé (pip install cryptography)"
fi
# Dépendances optionnelles
if python -c "import torch" 2>/dev/null; then
check_ok "PyTorch installé (optionnel)"
else
check_warning "PyTorch non installé (embeddings désactivés)"
fi
if python -c "import clip" 2>/dev/null; then
check_ok "CLIP installé (optionnel)"
else
check_warning "CLIP non installé (embeddings désactivés)"
fi
if python -c "import faiss" 2>/dev/null; then
check_ok "FAISS installé (optionnel)"
else
check_warning "FAISS non installé (indexation désactivée)"
fi
echo ""
# 3. Vérifier les fichiers serveur
echo "📁 Vérification fichiers serveur..."
FILES=(
"server/api_upload.py"
"server/processing_pipeline.py"
"server/storage_encrypted.py"
"server/requirements_server.txt"
"server/start_all.sh"
"server/setup_production.sh"
"web_dashboard/app.py"
"web_dashboard/templates/index.html"
"web_dashboard/requirements.txt"
)
for file in "${FILES[@]}"; do
if [ -f "$PROJECT_DIR/$file" ]; then
check_ok "$file"
else
check_error "$file manquant"
fi
done
echo ""
# 4. Vérifier les répertoires
echo "📂 Vérification répertoires..."
DIRS=(
"data/training/uploads"
"data/training/sessions"
"logs"
)
for dir in "${DIRS[@]}"; do
if [ -d "$PROJECT_DIR/$dir" ]; then
check_ok "$dir"
else
check_warning "$dir manquant (sera créé au démarrage)"
mkdir -p "$PROJECT_DIR/$dir"
fi
done
echo ""
# 5. Vérifier les permissions
echo "🔐 Vérification permissions..."
if [ -x "$SCRIPT_DIR/start_all.sh" ]; then
check_ok "start_all.sh exécutable"
else
check_warning "start_all.sh non exécutable"
chmod +x "$SCRIPT_DIR/start_all.sh"
check_ok "Permissions corrigées"
fi
if [ -x "$SCRIPT_DIR/setup_production.sh" ]; then
check_ok "setup_production.sh exécutable"
else
check_warning "setup_production.sh non exécutable"
chmod +x "$SCRIPT_DIR/setup_production.sh"
check_ok "Permissions corrigées"
fi
echo ""
# 6. Vérifier les ports
echo "🔌 Vérification ports..."
if netstat -tlnp 2>/dev/null | grep -q ":8000"; then
check_warning "Port 8000 déjà utilisé"
else
check_ok "Port 8000 disponible"
fi
if netstat -tlnp 2>/dev/null | grep -q ":5001"; then
check_warning "Port 5001 déjà utilisé"
else
check_ok "Port 5001 disponible"
fi
echo ""
# 7. Test rapide des modules
echo "🧪 Test rapide des modules..."
# Test import API
if python -c "import sys; sys.path.insert(0, '$SCRIPT_DIR'); from api_upload import app; print('OK')" 2>/dev/null | grep -q "OK"; then
check_ok "Module api_upload importable"
else
check_error "Erreur import api_upload"
fi
# Test import Pipeline
if python -c "import sys; sys.path.insert(0, '$SCRIPT_DIR'); from processing_pipeline import ProcessingPipeline; print('OK')" 2>/dev/null | grep -q "OK"; then
check_ok "Module processing_pipeline importable"
else
check_error "Erreur import processing_pipeline"
fi
# Test import Dashboard
if python -c "import sys; sys.path.insert(0, '$PROJECT_DIR/web_dashboard'); from app import app; print('OK')" 2>/dev/null | grep -q "OK"; then
check_ok "Module dashboard importable"
else
check_error "Erreur import dashboard"
fi
# Test import Encryption
if python -c "import sys; sys.path.insert(0, '$SCRIPT_DIR'); from storage_encrypted import decrypt_file; print('OK')" 2>/dev/null | grep -q "OK"; then
check_ok "Module storage_encrypted importable"
else
check_error "Erreur import storage_encrypted"
fi
echo ""
# 8. Vérifier la configuration
echo "⚙️ Vérification configuration..."
if [ -z "$ENCRYPTION_PASSWORD" ]; then
check_warning "ENCRYPTION_PASSWORD non défini (utilisera le défaut)"
else
if [ "$ENCRYPTION_PASSWORD" = "rpa_vision_v3_default_key" ]; then
check_warning "ENCRYPTION_PASSWORD utilise la valeur par défaut (changer pour production!)"
else
check_ok "ENCRYPTION_PASSWORD personnalisé défini"
fi
fi
echo ""
# 9. Résumé
echo "========================================"
echo "📊 Résumé"
echo "========================================"
echo ""
if [ $ERRORS -eq 0 ] && [ $WARNINGS -eq 0 ]; then
echo "✅ Tout est OK! Le serveur est prêt."
echo ""
echo "🚀 Pour démarrer:"
echo " ./server/start_all.sh"
echo ""
echo "📚 Documentation:"
echo " - Guide de test: SERVER_TESTING_GUIDE.md"
echo " - Guide HTTPS: server/nginx_https_setup.md"
echo " - Résumé complet: SERVER_COMPLETE.md"
exit 0
elif [ $ERRORS -eq 0 ]; then
echo "⚠️ $WARNINGS avertissement(s) - Le serveur devrait fonctionner"
echo ""
echo "🚀 Pour démarrer:"
echo " ./server/start_all.sh"
echo ""
echo "💡 Recommandations:"
if python -c "import torch" 2>/dev/null; then
:
else
echo " - Installer PyTorch pour les embeddings"
fi
if [ "$ENCRYPTION_PASSWORD" = "rpa_vision_v3_default_key" ] || [ -z "$ENCRYPTION_PASSWORD" ]; then
echo " - Définir ENCRYPTION_PASSWORD pour la production"
fi
exit 0
else
echo "$ERRORS erreur(s), $WARNINGS avertissement(s)"
echo ""
echo "🔧 Actions requises:"
echo " 1. Installer les dépendances manquantes:"
echo " pip install -r server/requirements_server.txt"
echo " pip install -r web_dashboard/requirements.txt"
echo ""
echo " 2. Vérifier les fichiers manquants"
echo ""
echo " 3. Relancer la vérification:"
echo " ./server/verify_installation.sh"
exit 1
fi

129
server/worker_daemon.py Normal file
View File

@@ -0,0 +1,129 @@
#!/usr/bin/env python3
"""
server/worker_daemon.py
Fiche #21 (prod): worker "external" pour traiter la queue de sessions.
Pourquoi : séparer l'API (réponse HTTP) du traitement lourd (ScreenStates, embeddings,
FAISS, graph building) pour éviter qu'un traitement long bloque la prod.
Mode d'emploi:
# Lancer à la main
python server/worker_daemon.py
# En systemd : voir deploy/systemd/rpa-vision-v3-worker.service
Variables d'env:
RPA_WORKER_HEARTBEAT_PATH=data/runtime/health/worker_heartbeat.json
RPA_WORKER_HEARTBEAT_EVERY_S=10
RPA_TRAINING_BASE_PATH=data/training
"""
from __future__ import annotations
import json
import logging
import os
import signal
import sys
import time
from datetime import datetime
from pathlib import Path
# Ajouter le répertoire parent au path
sys.path.insert(0, str(Path(__file__).parent.parent))
# NOTE: ce script vit dans server/ ; on importe donc en "local" pour rester compatible
# avec les usages existants (api_upload.py fait pareil).
from processing_queue import get_queue, start_processing_worker, stop_processing_worker
from processing_pipeline import process_session_async
from core.system import initialize_system_cleanup, shutdown_system
logger = logging.getLogger("rpa.worker")
def _ensure_dir(p: Path) -> None:
p.parent.mkdir(parents=True, exist_ok=True)
class _GracefulStop:
stop = False
def _handle_signal(signum, frame):
logger.info(f"Received signal {signum}, stopping worker...")
_GracefulStop.stop = True
def _write_heartbeat(path: Path, payload: dict) -> None:
_ensure_dir(path)
tmp = path.with_suffix(path.suffix + ".tmp")
with open(tmp, "w", encoding="utf-8") as f:
json.dump(payload, f, indent=2, ensure_ascii=False)
tmp.replace(path)
def main() -> int:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
)
# Init cleanup (safe on server)
initialize_system_cleanup()
heartbeat_path = Path(os.getenv("RPA_WORKER_HEARTBEAT_PATH", "data/runtime/health/worker_heartbeat.json"))
every_s = int(os.getenv("RPA_WORKER_HEARTBEAT_EVERY_S", "10"))
# Capture signals
signal.signal(signal.SIGINT, _handle_signal)
signal.signal(signal.SIGTERM, _handle_signal)
# Start worker
logger.info("Starting external processing worker...")
start_processing_worker(process_session_async)
queue = get_queue()
last_hb = 0.0
try:
while not _GracefulStop.stop:
now = time.time()
if now - last_hb >= every_s:
items = queue.get_all()
payload = {
"timestamp": datetime.now().isoformat(),
"queue": {
"total": len(items),
"pending": sum(1 for i in items if i.get("status") == "pending"),
"processing": sum(1 for i in items if i.get("status") == "processing"),
"completed": sum(1 for i in items if i.get("status") == "completed"),
"failed": sum(1 for i in items if i.get("status") == "failed"),
},
"pid": os.getpid(),
}
try:
_write_heartbeat(heartbeat_path, payload)
except Exception as e:
logger.warning(f"Could not write heartbeat: {e}")
last_hb = now
time.sleep(1.0)
logger.info("Stopping external worker...")
stop_processing_worker()
shutdown_system()
return 0
except Exception as e:
logger.exception(f"Worker crashed: {e}")
try:
stop_processing_worker()
except Exception:
pass
shutdown_system()
return 2
if __name__ == "__main__":
raise SystemExit(main())