From f96f6322ec287e4ea08a2843fe878f92f1929148 Mon Sep 17 00:00:00 2001 From: Dom Date: Sun, 12 Apr 2026 10:37:29 +0200 Subject: [PATCH] =?UTF-8?q?chore:=20nettoyage=20code=20mort=20=E2=80=94=20?= =?UTF-8?q?suppression=20=5Fa=5Ftrier/,=20archives/,=20.bak,=20scaffold=20?= =?UTF-8?q?vide?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Supprime ~8.2 Go de fichiers parasites qui polluent les grep, consomment des tokens, et ajoutent du bruit au repo : - _a_trier/ (561 Mo) — scripts legacy, backups, sessions logs, démos - archives/ (21 Mo) — copie figée code décembre 2024 (déjà dans git history) - visual_workflow_builder/_a_trier/ (7.6 Go) — backups VWB legacy + anciens frontends - web_dashboard/app.py.bak_20260304_2225 — fichier .bak oublié - agent_v1/ (top-level) — scaffold vide jamais alimenté - core/detection/ui_detector_old.py.bak — .bak traqué par erreur Retire aussi du tracking git : - 2 fichiers __pycache__ traqués par erreur dans VWB backend Met à jour .gitignore pour prévenir la récurrence : - *.bak, *.bak_*, *.orig, *.old - _a_trier/, archives/ Tout ce contenu reste récupérable via git history (tag pre-cleanup-phase1-20260410). Co-Authored-By: Claude Opus 4.6 (1M context) --- .gitignore | 8 + core/detection/ui_detector_old.py.bak | 622 ------------------ .../__pycache__/__init__.cpython-312.pyc | Bin 598 -> 0 bytes .../__pycache__/file_actions.cpython-312.pyc | Bin 13470 -> 0 bytes 4 files changed, 8 insertions(+), 622 deletions(-) delete mode 100644 core/detection/ui_detector_old.py.bak delete mode 100644 visual_workflow_builder/backend/actions/files/__pycache__/__init__.cpython-312.pyc delete mode 100644 visual_workflow_builder/backend/actions/files/__pycache__/file_actions.cpython-312.pyc diff --git a/.gitignore b/.gitignore index cef6188e1..abafb5b9d 100644 --- a/.gitignore +++ b/.gitignore @@ -75,3 +75,11 @@ htmlcov/ # === Backups === *_backup_* backups/ +*.bak +*.bak_* +*.orig +*.old + +# === Legacy / Triage === +_a_trier/ +archives/ diff --git a/core/detection/ui_detector_old.py.bak b/core/detection/ui_detector_old.py.bak deleted file mode 100644 index ac9a793d1..000000000 --- a/core/detection/ui_detector_old.py.bak +++ /dev/null @@ -1,622 +0,0 @@ -""" -UIDetector - Détection Sémantique d'Éléments UI avec VLM - -Utilise un Vision-Language Model (VLM) pour détecter et classifier -les éléments UI avec leurs types et rôles sémantiques. -""" - -from typing import List, Dict, Optional, Any, Tuple -from pathlib import Path -from dataclasses import dataclass -import numpy as np -from PIL import Image -import json -import re - -from ..models.ui_element import UIElement, UIElementEmbeddings, VisualFeatures -from .ollama_client import OllamaClient, check_ollama_available - - -@dataclass -class DetectionConfig: - """Configuration de la détection UI""" - vlm_model: str = "qwen3-vl:8b" # Modèle VLM à utiliser (qwen3-vl:8b recommandé) - vlm_endpoint: str = "http://localhost:11434" # Endpoint Ollama - confidence_threshold: float = 0.7 # Seuil de confiance minimum - max_elements: int = 50 # Nombre max d'éléments à détecter - detect_regions: bool = True # Détecter régions d'intérêt d'abord - use_embeddings: bool = True # Générer embeddings duaux - - -class UIDetector: - """ - Détecteur d'éléments UI sémantique - - Utilise un VLM (Vision-Language Model) pour : - 1. Détecter les régions d'intérêt dans un screenshot - 2. Classifier le type de chaque élément UI - 3. Déterminer le rôle sémantique - 4. Extraire les features visuelles - 5. Générer des embeddings duaux (image + texte) - """ - - def __init__(self, config: Optional[DetectionConfig] = None): - """ - Initialiser le détecteur - - Args: - config: Configuration (utilise config par défaut si None) - """ - self.config = config or DetectionConfig() - self.vlm_client = None - self._initialize_vlm() - - def _initialize_vlm(self) -> None: - """Initialiser le client VLM (Ollama)""" - try: - # Vérifier si Ollama est disponible - if check_ollama_available(self.config.vlm_endpoint): - self.vlm_client = OllamaClient( - endpoint=self.config.vlm_endpoint, - model=self.config.vlm_model - ) - print(f"✓ VLM initialized: {self.config.vlm_model} at {self.config.vlm_endpoint}") - else: - print(f"⚠ Ollama not available at {self.config.vlm_endpoint}, using simulation mode") - self.vlm_client = None - except Exception as e: - print(f"⚠ Failed to initialize VLM: {e}, using simulation mode") - self.vlm_client = None - - def detect(self, - screenshot_path: str, - window_context: Optional[Dict[str, Any]] = None) -> List[UIElement]: - """ - Détecter tous les éléments UI dans un screenshot - - Args: - screenshot_path: Chemin vers le screenshot - window_context: Contexte de la fenêtre (titre, process, etc.) - - Returns: - Liste d'UIElements détectés - """ - # Charger image - image = self._load_image(screenshot_path) - if image is None: - return [] - - # Détecter régions d'intérêt si activé - if self.config.detect_regions: - regions = self._detect_regions_of_interest(image, window_context) - else: - # Utiliser image complète - regions = [{"bbox": (0, 0, image.width, image.height), "confidence": 1.0}] - - # Détecter éléments UI dans chaque région - ui_elements = [] - for region in regions: - elements = self._detect_elements_in_region( - image, - region, - screenshot_path, - window_context - ) - ui_elements.extend(elements) - - # Filtrer par confiance - ui_elements = [ - el for el in ui_elements - if el.confidence >= self.config.confidence_threshold - ] - - # Limiter nombre d'éléments - if len(ui_elements) > self.config.max_elements: - # Trier par confiance et garder les meilleurs - ui_elements.sort(key=lambda x: x.confidence, reverse=True) - ui_elements = ui_elements[:self.config.max_elements] - - return ui_elements - - def _load_image(self, screenshot_path: str) -> Optional[Image.Image]: - """Charger une image depuis un fichier""" - try: - return Image.open(screenshot_path) - except Exception as e: - print(f"Error loading image {screenshot_path}: {e}") - return None - - def _detect_regions_of_interest(self, - image: Image.Image, - window_context: Optional[Dict] = None) -> List[Dict]: - """ - Détecter les régions d'intérêt dans l'image - - Utilise le VLM pour identifier les zones contenant des éléments UI. - - Args: - image: Image PIL - window_context: Contexte de la fenêtre - - Returns: - Liste de régions {bbox: (x, y, w, h), confidence: float} - """ - if self.vlm_client is None: - # Mode simulation : diviser l'image en grille - return self._simulate_region_detection(image) - - # Utiliser VLM pour détecter régions - # Pour l'instant, on utilise l'image complète (plus simple et efficace) - width, height = image.size - return [{ - "bbox": (0, 0, width, height), - "confidence": 1.0 - }] - - def _simulate_region_detection(self, image: Image.Image) -> List[Dict]: - """Simulation de détection de régions (pour développement)""" - width, height = image.size - - # Diviser en grille 3x3 pour simulation - regions = [] - grid_size = 3 - cell_w = width // grid_size - cell_h = height // grid_size - - for i in range(grid_size): - for j in range(grid_size): - regions.append({ - "bbox": (j * cell_w, i * cell_h, cell_w, cell_h), - "confidence": 0.8 - }) - - return regions - - def _detect_elements_in_region(self, - image: Image.Image, - region: Dict, - screenshot_path: str, - window_context: Optional[Dict] = None) -> List[UIElement]: - """ - Détecter éléments UI dans une région spécifique - - Args: - image: Image complète - region: Région à analyser - screenshot_path: Chemin du screenshot - window_context: Contexte de la fenêtre - - Returns: - Liste d'UIElements dans cette région - """ - bbox = region["bbox"] - x, y, w, h = bbox - - # Extraire crop de la région - region_image = image.crop((x, y, x + w, y + h)) - - # Détecter éléments avec VLM - if self.vlm_client is None: - # Mode simulation - return self._simulate_element_detection( - region_image, bbox, screenshot_path, window_context - ) - - # Vraie détection avec VLM ! - return self._detect_with_vlm( - region_image, bbox, screenshot_path, window_context - ) - - def _detect_with_vlm(self, - region_image: Image.Image, - region_bbox: Tuple[int, int, int, int], - screenshot_path: str, - window_context: Optional[Dict] = None) -> List[UIElement]: - """ - Détecter éléments UI avec le VLM (vraie détection) - - Args: - region_image: Image de la région - region_bbox: Bbox de la région (x, y, w, h) - screenshot_path: Chemin du screenshot - window_context: Contexte de la fenêtre - - Returns: - Liste d'UIElements détectés - """ - x_offset, y_offset, w, h = region_bbox - - # Construire le prompt pour le VLM - context_str = "" - if window_context: - context_str = f"\nWindow context: {window_context.get('title', 'Unknown')}" - - # Approche simplifiée : demander une description structurée - prompt = f"""List all interactive UI elements in this screenshot.{context_str} - -For each element, provide: -- type (button, text_input, checkbox, link, etc.) -- label (visible text) -- approximate position (top/middle/bottom, left/center/right) - -Format as JSON array: -[{{"type": "button", "label": "Submit", "position": "middle-center"}}] - -Return ONLY the JSON array, no other text.""" - - # Appeler le VLM - # Note: Utiliser le chemin du screenshot complet plutôt que le crop - # car certains VLM gèrent mieux les fichiers que les images PIL - result = self.vlm_client.generate( - prompt=prompt, - image_path=screenshot_path, # Utiliser le chemin au lieu de l'image PIL - temperature=0.1, - max_tokens=1000 - ) - - if not result["success"]: - print(f"❌ VLM detection failed: {result.get('error', 'Unknown error')}") - return [] - - if not result["response"] or len(result["response"].strip()) == 0: - print(f"⚠ VLM returned empty response") - return [] - - # Parser la réponse JSON - elements = self._parse_vlm_response( - result["response"], - region_bbox, - screenshot_path, - window_context - ) - - return elements - - def _parse_vlm_response(self, - response: str, - region_bbox: Tuple[int, int, int, int], - screenshot_path: str, - window_context: Optional[Dict] = None) -> List[UIElement]: - """ - Parser la réponse JSON du VLM - - Args: - response: Réponse texte du VLM - region_bbox: Bbox de la région - screenshot_path: Chemin du screenshot - window_context: Contexte de la fenêtre - - Returns: - Liste d'UIElements - """ - x_offset, y_offset, region_w, region_h = region_bbox - - try: - # Extraire le JSON de la réponse (peut contenir du texte avant/après) - json_match = re.search(r'\[.*\]', response, re.DOTALL) - if not json_match: - print(f"No JSON array found in VLM response") - print(f"VLM response was: {response[:500]}...") - return [] - - elements_data = json.loads(json_match.group(0)) - - if not isinstance(elements_data, list): - print(f"VLM response is not a JSON array") - return [] - - elements = [] - for i, elem_data in enumerate(elements_data): - try: - # Gérer les positions (pourcentages ou textuelles) - if 'x' in elem_data and 'y' in elem_data: - # Format avec pourcentages - x_pct = float(elem_data.get('x', 0)) - y_pct = float(elem_data.get('y', 0)) - w_pct = float(elem_data.get('width', 10)) - h_pct = float(elem_data.get('height', 5)) - - elem_x = x_offset + int(region_w * x_pct / 100) - elem_y = y_offset + int(region_h * y_pct / 100) - elem_w = int(region_w * w_pct / 100) - elem_h = int(region_h * h_pct / 100) - else: - # Format avec position textuelle (top/middle/bottom, left/center/right) - position = elem_data.get('position', 'middle-center').lower() - - # Parser la position - if 'top' in position: - elem_y = y_offset + region_h // 4 - elif 'bottom' in position: - elem_y = y_offset + 3 * region_h // 4 - else: # middle - elem_y = y_offset + region_h // 2 - - if 'left' in position: - elem_x = x_offset + region_w // 4 - elif 'right' in position: - elem_x = x_offset + 3 * region_w // 4 - else: # center - elem_x = x_offset + region_w // 2 - - # Taille par défaut basée sur le type - elem_type = elem_data.get('type', 'button') - if elem_type == 'button': - elem_w, elem_h = 100, 40 - elif elem_type == 'text_input': - elem_w, elem_h = 200, 35 - elif elem_type == 'checkbox': - elem_w, elem_h = 25, 25 - else: - elem_w, elem_h = 80, 30 - - # Créer l'UIElement - element = UIElement( - element_id=f"vlm_{elem_x}_{elem_y}", - type=elem_data.get('type', 'unknown'), - role=elem_data.get('role', 'unknown'), - bbox=(elem_x, elem_y, elem_w, elem_h), - center=(elem_x + elem_w // 2, elem_y + elem_h // 2), - label=elem_data.get('label', ''), - label_confidence=0.85, # Confiance par défaut pour VLM - embeddings=UIElementEmbeddings(), - visual_features=VisualFeatures( - dominant_color="rgb(128, 128, 128)", - has_icon=elem_data.get('type') == 'icon', - shape="rectangle", - size_category="medium" - ), - confidence=0.85, # Confiance par défaut pour VLM - metadata={ - "detected_by": "vlm", - "model": self.config.vlm_model, - "screenshot_path": screenshot_path - } - ) - - elements.append(element) - - except (KeyError, ValueError, TypeError) as e: - print(f"Error parsing element {i}: {e}") - continue - - return elements - - except json.JSONDecodeError as e: - print(f"Failed to parse VLM JSON response: {e}") - print(f"Response was: {response[:200]}...") - return [] - - def _simulate_element_detection(self, - region_image: Image.Image, - region_bbox: Tuple[int, int, int, int], - screenshot_path: str, - window_context: Optional[Dict] = None) -> List[UIElement]: - """Simulation de détection d'éléments (pour développement)""" - # Pour simulation, créer quelques éléments fictifs - elements = [] - - x_offset, y_offset, w, h = region_bbox - - # Simuler 2-3 éléments par région - num_elements = np.random.randint(2, 4) - - for i in range(num_elements): - # Position aléatoire dans la région - elem_w = np.random.randint(50, 150) - elem_h = np.random.randint(20, 60) - elem_x = x_offset + np.random.randint(0, max(1, w - elem_w)) - elem_y = y_offset + np.random.randint(0, max(1, h - elem_h)) - - # Type et rôle aléatoires - types = ["button", "text_input", "checkbox", "link", "icon"] - roles = ["primary_action", "cancel", "submit", "form_input", "navigation"] - - element = UIElement( - element_id=f"elem_{elem_x}_{elem_y}", - type=np.random.choice(types), - role=np.random.choice(roles), - bbox=(elem_x, elem_y, elem_w, elem_h), - center=(elem_x + elem_w // 2, elem_y + elem_h // 2), - label=f"Element {i}", - label_confidence=np.random.uniform(0.7, 0.95), - embeddings=UIElementEmbeddings(), # Embeddings vides - visual_features=VisualFeatures( - dominant_color="rgb(128, 128, 128)", - has_icon=np.random.choice([True, False]), - shape="rectangle", - size_category="medium" - ), - confidence=np.random.uniform(0.7, 0.95), - metadata={"simulated": True, "screenshot_path": screenshot_path} - ) - - elements.append(element) - - return elements - - def classify_type(self, - element_image: Image.Image, - context: Optional[Dict] = None) -> Tuple[str, float]: - """ - Classifier le type d'un élément UI - - Args: - element_image: Image de l'élément - context: Contexte additionnel - - Returns: - (type, confidence) - """ - if self.vlm_client is None: - # Simulation - types = ["button", "text_input", "checkbox", "radio", "dropdown", - "tab", "link", "icon", "table_row", "menu_item"] - return np.random.choice(types), np.random.uniform(0.7, 0.95) - - # Vraie classification avec VLM - result = self.vlm_client.classify_element_type(element_image, context) - - if result["success"]: - return result["type"], result["confidence"] - - return "unknown", 0.0 - - def classify_role(self, - element_image: Image.Image, - element_type: str, - context: Optional[Dict] = None) -> Tuple[str, float]: - """ - Classifier le rôle sémantique d'un élément - - Args: - element_image: Image de l'élément - element_type: Type de l'élément - context: Contexte additionnel - - Returns: - (role, confidence) - """ - if self.vlm_client is None: - # Simulation - roles = ["primary_action", "cancel", "submit", "form_input", - "search_field", "navigation", "settings", "close"] - return np.random.choice(roles), np.random.uniform(0.7, 0.95) - - # Vraie classification avec VLM - result = self.vlm_client.classify_element_role( - element_image, - element_type, - context - ) - - if result["success"]: - return result["role"], result["confidence"] - - return "unknown", 0.0 - - def extract_visual_features(self, - element_image: Image.Image) -> VisualFeatures: - """ - Extraire les features visuelles d'un élément - - Args: - element_image: Image de l'élément - - Returns: - VisualFeatures - """ - # Calculer couleur dominante - img_array = np.array(element_image) - if len(img_array.shape) == 3: - # Moyenne des couleurs - dominant_color = tuple(img_array.mean(axis=(0, 1)).astype(int).tolist()) - else: - dominant_color = (128, 128, 128) - - # Déterminer forme (simplifié) - width, height = element_image.size - aspect_ratio = width / height if height > 0 else 1.0 - - if aspect_ratio > 3: - shape = "horizontal_bar" - elif aspect_ratio < 0.33: - shape = "vertical_bar" - elif 0.8 <= aspect_ratio <= 1.2: - shape = "square" - else: - shape = "rectangle" - - # Catégorie de taille - area = width * height - if area < 1000: - size_category = "small" - elif area < 10000: - size_category = "medium" - else: - size_category = "large" - - # Détection d'icône (simplifié) - has_icon = width < 100 and height < 100 and 0.8 <= aspect_ratio <= 1.2 - - return VisualFeatures( - dominant_color=dominant_color, - has_icon=has_icon, - shape=shape, - size_category=size_category - ) - - def generate_embeddings(self, - element_image: Image.Image, - element_label: str, - embedder: Optional[Any] = None) -> Optional[UIElementEmbeddings]: - """ - Générer embeddings duaux (image + texte) pour un élément - - Args: - element_image: Image de l'élément - element_label: Label textuel de l'élément - embedder: Embedder à utiliser (optionnel) - - Returns: - UIElementEmbeddings ou None - """ - if not self.config.use_embeddings or embedder is None: - return None - - try: - # Générer embedding image - image_embedding_id = None - if hasattr(embedder, 'embed_image'): - # Sauvegarder temporairement l'image - # TODO: Implémenter sauvegarde et embedding - pass - - # Générer embedding texte - text_embedding_id = None - if element_label and hasattr(embedder, 'embed_text'): - # TODO: Implémenter embedding texte - pass - - if image_embedding_id or text_embedding_id: - return UIElementEmbeddings( - image_embedding_id=image_embedding_id, - text_embedding_id=text_embedding_id, - provider="openclip_ViT-B-32", - dimensions=512 - ) - except Exception as e: - print(f"Warning: Failed to generate embeddings: {e}") - - return None - - def set_vlm_client(self, client: Any) -> None: - """Définir le client VLM""" - self.vlm_client = client - - def get_config(self) -> DetectionConfig: - """Récupérer la configuration""" - return self.config - - -# ============================================================================ -# Fonctions utilitaires -# ============================================================================ - -def create_detector(vlm_model: str = "qwen3-vl:8b", - confidence_threshold: float = 0.7) -> UIDetector: - """ - Créer un UIDetector avec configuration personnalisée - - Args: - vlm_model: Modèle VLM à utiliser - confidence_threshold: Seuil de confiance - - Returns: - UIDetector configuré - """ - config = DetectionConfig( - vlm_model=vlm_model, - confidence_threshold=confidence_threshold - ) - return UIDetector(config) diff --git a/visual_workflow_builder/backend/actions/files/__pycache__/__init__.cpython-312.pyc b/visual_workflow_builder/backend/actions/files/__pycache__/__init__.cpython-312.pyc deleted file mode 100644 index 53c84eee3935833293eecb31798019298f9e45be..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 598 zcmZWnF-sgl6rS0;pscY-C0NAi!okH!6oQBZ^nzGOs$k%n$K1|S2WNMdncXwj#m*lP z$Zxp65U{eeN$XW1snQvaG*)@ztqGVZhHv=3_kHiZ-K)jLl+n4q_+rjEV?W~K{v-{U zE))37H0v|Xbz*s*^b^i>>xlPTqju*}y-ua+c2*fz1ffwJVTd@wLz5jF^g)!a_QIk# zc)zonrrULe^xhP2T;3Hut7_7I|M*F~+z`1U`Pz%MS0rmZH`C|jWD%?h6=)MH_X%RO z4F5v(vzk&}2o!l=l6h1WHbp&uIi{ljRk<5tZ0B-ZT9wTtP49n=Tsb01&z0mJEgtZ} z`3eRjzz1_ZPEJ{F>gEV$sB-o8NlT6 z-S}#@k2^YY%36Rio5nA7`%WIa9Hn--R7Uzz!O+AU7_Lh~wX$&P{K=tpr!c6E)#&9w zWhYo@*%-^15+r~rOa-u7j&A3-a;IyHuYLSeV#i^LuA3I;{8zHT+rO6??|xf&`ZZlS JoAdL;{Q+`Wyj1`I diff --git a/visual_workflow_builder/backend/actions/files/__pycache__/file_actions.cpython-312.pyc b/visual_workflow_builder/backend/actions/files/__pycache__/file_actions.cpython-312.pyc deleted file mode 100644 index 291ac513bbce4b37fc0c5e5840484f9da81526d9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13470 zcmeHNTW}lKdENyUS1t*W;GH75l1P!DDbl)HrY%_)N-`yey2i2?*(iivlAv*+&n|cq zXloKRrc$+{BBi7%WklDR2^qT$AK5mOD&b3KI_-caJ%l%!Mo#q5JamRKV>wJDp$wF7a-xH9CEc8X9 zLja*Ih}i>iUjEvp*0d}6!r&wQBe|pb>X_Od8ZKaLGdjwZrR~<9_^c(CO)kt zDA28aLx5zvb-LVJ_rhb8E}lVePVNrk|1XV@z*)S-q0T8GDZui5A@J zBW#TAwdjAR1u@1bd6H4A#vi?QBbExERoi2Y9)!{z5fXyMktHD-*4IUVS%;~XBf)-O zSP`0<1Xy0k3rmU;8N#d}%s=hqwR=MdB+tTqH9P}g8*|P-CfXAXbWyMDnc_08w zVht-eX{tR8-=i^8L{>SBc}xVsfaHTGV^u+T^d_C|W-$_K_63?rpU>MLzzyl`-`WhZ zs4wUph>&->f{_95xo99LN~9ULqZAgK_4C!-6@YG1->YXe^$n>mFZKXNJS=(f>!cXI zxDh{%vvGl0wlemd(Vj6f!Hx#!ZRHa^OI4fat2WPUT&#Ne*Y=`>tLV!6m)@TkTyWLL z?e+Iv`Bw%m4J^5;=Uvs4_63(GZuhJ}OHB4XVTINI!2AI>&M3)!kXqC4aF;kzaEa^1 zsmyR6>uHhgX9zn4JL(cAa-s$I)gUY9vkcVBvSUQ6v14#gk%rW+88JZ(N5*F$X?*EB zW+lh?chua)infdPn5i}SztJx2MBXSjVoIySK^^;(>UbQ{e}P(W0E`8?0+R4hMB(tH^k7laF>2*O8> zSG%RKG}uRW4-4La?3I08+SzG(H5l>xg7WUBWQkU&Ag>4SxBtrg_5$;1!SUH`V=YVW z`gwQ#RN;@yr^`QHyX4+6UvT_)G6LndJdNzC$-qoH8G9g-HW|>9Cr?JkfJ=-46O{8F zmJZotCRLCk)!=Dkr63@#8Y@``|2}(^hoF>jnon4Cs#}{13 zQvruev00(4{{{i=NiDrqDERwYh z{=;9FN#rhCI&I<_5pqSVWEX7^+9i9JU39=d4w{E)jmJ6D(#Yd-($YMn`LuK{q;u2K zd63Qnj+d|I9|S?Omy){IePJ;uk?4=0H;3k?fbOBBjHFRDnNqzJ`O{(Rm=p!TtCI$i zGy!EpaZAq@5GslEBO#>x$|FRgv`rMfp}8pqK!^fOpA_wv0OIC;1X+ldVL23#g}F%~ zSm%S|t0ZY#f53-e)<+_PLmEhR5D)a%uQOMOG7O$V#DJB3-~kx1q*8&H=CVPL$XPOF@tfS~G&M3TSY66lk_UB#F5I&L}2RgY9Q!_A(!1bu7)# zgqB*U4WNq9qOwHiFZ94^ShDG{-O=Rr24Et*-YoF>CNv`>22k|;k});pC5lU?_AYKe z@@eCd>+AzF7e8re@^OnufRO%21!ATIuGLRV&cH zZb>0XBK)|c3}=N1-CZvN2SQQTYy6WILT>tw)c=9p^p;Y&*k*=>6%_H*^&+3m za3U9DW7dyx?BGC0H7RZ_&M=U#Gr|DtH=O|ubj13e^`ssSumjAX`3y6_0&m|11rO*@ zLAi%E`-bNC^ut7fiY`=(a0-?ysgVlx$e6vTGkV1UXo)oGClYXcEkjcLO`haw!c(QK zbc=8-1xHA$0CVVPR+ZIuLyJ&Hfvrwcdv(MEYA@(O*C^EKl1(q71DM9|@S`WF&XoZY zsLKhUHu@3VkfKF{icdkfL~|ifu6fqjOfpaBn8AE12@RDNe1;zv?8=9 zIwSxLB;f_%$0FKdCIfyH?3P;-jrOjvqV9%gbX*E`rcBD#oGb5G& zGh)pSmPV~s=B=NA)U?HHy{P46IHMFu(Lx1PAL@FOFlpq#$Zfr-nq;_`*_bhCVLOy5 z$S45GUQ|;uoai**v=>=rhBJBvin9q|IA_ETEhC4_a7MY_@{B(qIpT;}dQnzqxF?Lm z9&;$Ddr%j%8NCzvTS?;&3ql~<-ip~%hD9-N7+Q!~V|F~ymw(DJia?tLuXlqHONF8< zW>SzJP#3cp-vx1_3sU&1C3}gONnCS_JIQ$Rqn|-_G?ZzKHfdrMQD@i9icb|ghM+iK zhensW76E_=kRB4icEegajkHk{Wfz$Egt~@0&&V^d|9l-HqYSNlqp7LsEoj#tiG~$O z2f_-ZlJ-n2avq9|j3h+?DTI*`Tzb`}_eNzKC|Z713b`rCMtj>+2YUaz(SZCF?yCLN zBr1eKdL_+~0OpU1TQy57exF35{XVpxLjJn;q!l4t2j&BipJ+TVW)O-H+-Pc^4E!C% zi-zf7u;jwN5J;kA7H~YUU}>F4MEMPJhvBWbL_qhG4I0u#G`NA>n6mE5RPI3I7CL>Z zh0dDlOnyYB?R(s0A2d!CMuIqnOcTBX1P`=OU<#@&0E?(aWKA&Hy22r!;_o3RXn8r> z)fE^7QA9Oh)q?sefgyl|R&78xa4f3T2PTQINDkn~TCpA%2zNzPb5IIw8y zj%W>o=^V`1Z(t5`XQ-k`A=L~+Ag3TLV_7P_sHgGrU640i@woU(3g02DIRrBb0<3^3 zEFW*V+A`5Kd2Ye&8MP-0O2-?nHcXtEGA|S~j@rI(m7s{Kd@1hU8RvK2b*-IvbJ4Yt zime?pgEPJH%Dr*-zBs?{etywd&$X&iE7}l8E{$A%pBCCa(=*cbTmmP+0RNljqnq+L~~=uf#6J{`9^3B~|0^ zUwwa4`QgxqLo;=YB`t}c*t1t z-OD_)Vbc$rK5UwC&-fPB@0{H{zy8%R=fu|Ux|S=UV-E$!;kuG@DJKCV;|s6*Cu*o{2TU>FssC1&}T4$oX2D%CDu5m6i7Kc-_1n8m?R)b+s_)#JJ?+|%TA&deR zMD9}rtXTv;ZwSY%0@fTe_dZ3yf&eyW6RB`>Wc|cuh%S~!t>HA;$80J9y7<*jN>HcVG^EFy5ye zM^3b+9Y_5b0NXJetOfWi2b|da)FX?^{Zo!CZ6l2nrycv`cToM$W<1DdG!Px4;}d7v zQBShqY%EtvngwD>-j|k_d-}23CoHksG}OJFiv0?L}o6 zj$0%YqVS=UxaXv8GL^J&)-~b=S1C#$OmB3xyl2A&s<3Hb@$3c-xlOuY5E@0xN&BwG z(Hh~u6i1VVI=x-Iv#d)io#bt({OP<+EBY9H`)Wg~NLmYzm3ssypI+e$dPA$(-YJ4+ zt>`-NjB@(DJG~O8`1Y%Bf0tSS0LCnIezdU?sN>1W3S4Mh1IQ#1D56^Bo~RNCsvOo$ z5hsdh)7BKC!Bs3>K6+lcbPjXD$VGD@04QzsVOFOJ&3aFu;nN@ zc{uJq66cSsVo>|;?7Y*ERQ!=a^)BSE%cM}R&UVhW$1B_7?!$5Z@O_ti?Dao+Z>}B)zvdNm6!nnO`(Y`~+$?N+ktk;hGz&&~DBglP0&y5$pG+Tb>%|&|$Ty_+f zCyIqc#fIlX%q>7y{vR=6L#%SogP4HVNz6=DHgs>|NwXVdu@I!0P*aGSGOK1m)Np-T zi-oB#8nNHMMFn3GP?yb$AU+kNhyQ6mzQI8~P={mzU%y})luEeGfd?}9SO+gR{>Q)? zY>a_F6s-xun8x~Kj49|v(d!3Khew2C10M0CS+UWobterRbF+aX8N|jB8A&I4sK=>x zs5=DHsNo|jxqU-hQpAs(heD6R-yX=!3V&Fes<BL89%9kuC|#6|-1PvtBc$Y@bOSgmuuJfvah@PcCL3 z&Y>1fGjYcpS$QV+32(4)mI-fxQcvU9>z{#$xiPf3X0fWKnC#_0i#f8Fp?!76oKNck zISh5z8e>j{BROK;2>*<_jqv51n+(`a`I~5$&NAjJ@mw z3v^2Q)uno2#KZ!yK;sJo>YiwU&RPO2&~9phE{t9PtcBOG@JN*!J9JSAuV>RAtZL7( zfYBl)PZ>kf#AecZl3pOAk@`Nds~Jqt5&s|ry?x!_ufF>P<-&MF3(vxQ_~gux-Sj~i zn0r^X?~lOcpWGJ-!@s|u5Jv-RQtQ&q3Uc%uMA(qEJsMWrj{hhoYO$sR@W2g>vK=dbnT7aq!@^RuF>dVen3- zt)X^N3J*Wftb>pFUy5T_^r4zL>`8qVGvR-^5jK*%&1doR) zJwY2|v>iNcZz0Lb*bQ_+A2*~M8vw4Fv)I(h05nNXqlb2BauGaFQC8AIsmeF75MG=T zkOmC#qW4GWQ8v>GG1WHUBjG@}TeGQy@mvZ~hC_z29JQMJF(#rXf#=Ch(*W~eKZ2V7&5B-2Hl-e;rlC@IroVri!?ICLG^>AYOSe?miUf z4`rI!tKxf(#Ve1;-6!JwiEMWE6{d2Pojvg!CiTh%dfy>J=~qN{dXq#a%kUj^YD~2 ztL?wMKEw8}zud&DB6wlyapB`7yaMxj;Q<7^h7hChRK=_EUhli`7+n9y=Jf(*d%Xnj zxCv5E?O1Gq7}z=Bg$QZGOuPW1wp)T`4zdS5+`7{YHHd0^84XJWJwzh;Bk(RV4@{iJ z{(yH_@+Nzxl$rh0L-QM7USS~ppt!xpsco>uAaC~u$SPcBAegFI!C?6?yHU%70P`SNKE@tov7u!K!pPz*BAb1_Yr4>7b?-;bmt}FmR6E=n)R{Em+hFPq?ro8iWWkJZ&Cv zMpXwqEos+YlZ@d%t?1RFhhkrwYor2z79f|j3qv}M-^0K0!jw+!Bm1k`0`4X^ArFv% zk-r7rvYBPs2c{gBTPbH)*Dsj7UoeheFuA{EcFr?9f5|j1Fpc*d<)bA_j`Dd&`NYlz zN6iK6mv*k)6laRz>4=3-aL#3mxfIfc%e