From 31d8652ff552756954907af27b8d866be08fe07a Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 19 Nov 2024 09:51:34 +0000 Subject: [PATCH 01/88] Fix: correct the early return error when save_epochs=1 and delelte the redunant snapshots at the end --- .../runners/snapshots.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 341b3c4be4..bc5a30a44c 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,41 +113,43 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] + + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } torch.save(parsed_state_dict, save_path) + # Handle previous best model if current_best is not None: - # rename if the current best should have been saved, otherwise delete if current_best.epochs % self.save_epochs == 0: new_name = self.snapshot_path(epoch=current_best.epochs) current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - return - - if not (last or epoch % self.save_epochs == 0): - return - + else: + # Save regular snapshot if needed + should_save = last or epoch % self.save_epochs == 0 + if should_save: + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) + + # Clean up old snapshots if needed existing_snapshots = [s for s in self.snapshots() if not s.best] if len(existing_snapshots) >= self.max_snapshots: - num_to_delete = 1 + len(existing_snapshots) - self.max_snapshots + num_to_delete = len(existing_snapshots) - self.max_snapshots to_delete = existing_snapshots[:num_to_delete] for snapshot in to_delete: snapshot.path.unlink(missing_ok=False) - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) - def best(self) -> Snapshot | None: """Returns: the path to the best snapshot, if it exists""" snapshots = self.snapshots() From 12ed9bd66d0d1d0109a0fbbf2381eef5b21ca685 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:46:52 +0000 Subject: [PATCH 02/88] add plot_gt_and_prediction function --- .../pose_estimation_pytorch/apis/evaluate.py | 229 ++++++++++++++++++ 1 file changed, 229 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index f2b3bfea6b..4eb7381772 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -18,6 +18,7 @@ import numpy as np import pandas as pd from tqdm import tqdm +import matplotlib.pyplot as plt import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization @@ -37,6 +38,14 @@ from deeplabcut.pose_estimation_pytorch.task import Task from deeplabcut.utils import auxiliaryfunctions from deeplabcut.utils.visualization import plot_evaluation_results +from deeplabcut.utils import auxfun_videos +from deeplabcut.utils.visualization import ( + create_minimal_figure, + get_cmap, + make_multianimal_labeled_image, + save_labeled_frame, + erase_artists, +) def predict( @@ -167,6 +176,224 @@ def evaluate( return results, predictions +import random +# def plot_predictions( +# loader: Loader, +# predictions: dict[str, dict[str, np.ndarray]], +# plotting: str = "bodypart", +# sample: int | None = None, +# sample_random: bool = False, +# ) -> None: + +def plot_predictions( + loader: Loader, + predictions: dict[str, dict[str, np.ndarray]], + plotting: str = "bodypart", + sample: int | None = None, + sample_random: bool = False, +) -> None: + """ + Process COCO format data and visualize using plot_evaluation_results + + Args: + loader: COCOLoader instance containing dataset info + predictions: Model predictions dictionary + plotting: How to color the points ("bodypart" or "individual") + sample: Number of images to visualize (None for all) + sample_random: Whether to sample images randomly + """ + + # Get paths and create output folder + project_root = loader.project_root + output_folder = Path(project_root) / "labeled_frames" + output_folder.mkdir(exist_ok=True) + + # 2. Get ground truth data + ground_truth = loader.load_data(mode="test") + + # 3. Create image list for sampling + image_ids = [img['id'] for img in ground_truth['images']] + if sample is not None: + if sample_random: + image_ids = random.sample(image_ids, min(sample, len(image_ids))) + else: + image_ids = image_ids[:sample] + + # 4. Create DataFrame structure + data = [] + + # Process ground truth + for img_id in image_ids: + img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) + img_name = img_info['file_name'] + + # Get ground truth annotations + gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] + + # Get predictions for this image + pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] + + # Process each keypoint + for gt_ann, pred_ann in zip(gt_anns, pred_anns): + gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) + pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) + + # Get keypoint names + keypoint_names = ground_truth['categories'][0]['keypoints'] + + # Add ground truth points + for idx, (x, y, v) in enumerate(gt_kpts): + if v > 0: # visible keypoint + data.append({ + 'image': img_name, + 'scorer': 'ground_truth', + 'individual': f"instance_{gt_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': 1.0 + }) + + # Add predictions + for idx, (x, y, score) in enumerate(pred_kpts): + if score > 0: # detected keypoint + data.append({ + 'image': img_name, + 'scorer': 'dlc_model', + 'individual': f"instance_{pred_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': score + }) + + # 5. Create MultiIndex DataFrame + df = pd.DataFrame(data) + df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) + df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) + + # 6. Call plot_evaluation_results + plot_evaluation_results( + df_combined=df_combined, + project_root=project_root, + scorer='ground_truth', + model_name='dlc_model', + output_folder=str(output_folder), + in_train_set=False, # Since we're using test data + mode=plotting, + plot_unique_bodyparts=False, # whether we should plot unique bodyparts + colormap='rainbow', # default values + dot_size=12, # default values + alpha_value=0.7, # default values + p_cutoff=0.6 # default values + ) + +def plot_gt_and_predictions( + image_path: str | Path, + output_dir: str | Path, + gt_bodyparts: np.ndarray, + pred_bodyparts: np.ndarray, # (num_predicted_animals, num_keypoints, 3) + gt_unique_bodyparts: np.ndarray | None = None, + pred_unique_bodyparts: np.ndarray | None = None, + mode: str = "bodypart", + colormap: str = "rainbow", + dot_size: int = 12, + alpha_value: float = 0.7, + p_cutoff: float = 0.6, +): + """Plot ground truth and predictions on an image. + + Args: + image_path: Path to the image + gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) + pred_bodyparts: Predicted keypoints array (num_animals, num_keypoints, 3) + output_dir: Directory where labeled images will be saved + gt_unique_bodyparts: Ground truth unique bodyparts if any + pred_unique_bodyparts: Predicted unique bodyparts if any + mode: How to color the points ("bodypart" or "individual") + colormap: Matplotlib colormap name + dot_size: Size of the plotted points + alpha_value: Transparency of the points + p_cutoff: Confidence threshold for showing predictions + """ + # Ensure output directory exists + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # Read the image + frame = auxfun_videos.imread(str(image_path), mode="skimage") + num_pred, num_keypoints = pred_bodyparts.shape[:2] + + # Create figure and set dimensions + fig, ax = create_minimal_figure() + h, w, _ = np.shape(frame) + fig.set_size_inches(w / 100, h / 100) + ax.set_xlim(0, w) + ax.set_ylim(0, h) + ax.invert_yaxis() + ax.imshow(frame, "gray") + + # Set up colors based on mode + if mode == "bodypart": + num_colors = num_keypoints + if pred_unique_bodyparts is not None: + num_colors += pred_unique_bodyparts.shape[1] + colors = get_cmap(num_colors, name=colormap) + + predictions = pred_bodyparts.swapaxes(0, 1) + ground_truth = gt_bodyparts.swapaxes(0, 1) + elif mode == "individual": + colors = get_cmap(num_pred + 1, name=colormap) + predictions = pred_bodyparts + ground_truth = gt_bodyparts + else: + raise ValueError(f"Invalid mode: {mode}") + + # Plot regular bodyparts + ax = make_multianimal_labeled_image( + frame, + ground_truth, + predictions[:, :, :2], + predictions[:, :, 2:], + colors, + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Plot unique bodyparts if present + if pred_unique_bodyparts is not None and gt_unique_bodyparts is not None: + if mode == "bodypart": + unique_predictions = pred_unique_bodyparts.swapaxes(0, 1) + unique_ground_truth = gt_unique_bodyparts.swapaxes(0, 1) + else: + unique_predictions = pred_unique_bodyparts + unique_ground_truth = gt_unique_bodyparts + + ax = make_multianimal_labeled_image( + frame, + unique_ground_truth, + unique_predictions[:, :, :2], + unique_predictions[:, :, 2:], + colors[num_keypoints:], + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Save the labeled image + save_labeled_frame( + fig, + str(image_path), + str(output_dir), + belongs_to_train=False, + ) + erase_artists(ax) + plt.close() + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, @@ -289,6 +516,7 @@ def evaluate_snapshot( df_ground_truth, left_index=True, right_index=True ) unique_bodyparts = loader.get_dataset_parameters().unique_bpts + plot_evaluation_results( df_combined=df_combined, project_root=cfg["project_path"], @@ -502,6 +730,7 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From 614b1acabaf9a811436bd984c953a7207572bcf8 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:58:40 +0000 Subject: [PATCH 03/88] delete the initial attempt at function --- .../pose_estimation_pytorch/apis/evaluate.py | 113 ------------------ 1 file changed, 113 deletions(-) mode change 100644 => 100755 deeplabcut/pose_estimation_pytorch/apis/evaluate.py diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py old mode 100644 new mode 100755 index 4eb7381772..a19ea9e0d7 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -174,119 +174,6 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions - - -import random -# def plot_predictions( -# loader: Loader, -# predictions: dict[str, dict[str, np.ndarray]], -# plotting: str = "bodypart", -# sample: int | None = None, -# sample_random: bool = False, -# ) -> None: - -def plot_predictions( - loader: Loader, - predictions: dict[str, dict[str, np.ndarray]], - plotting: str = "bodypart", - sample: int | None = None, - sample_random: bool = False, -) -> None: - """ - Process COCO format data and visualize using plot_evaluation_results - - Args: - loader: COCOLoader instance containing dataset info - predictions: Model predictions dictionary - plotting: How to color the points ("bodypart" or "individual") - sample: Number of images to visualize (None for all) - sample_random: Whether to sample images randomly - """ - - # Get paths and create output folder - project_root = loader.project_root - output_folder = Path(project_root) / "labeled_frames" - output_folder.mkdir(exist_ok=True) - - # 2. Get ground truth data - ground_truth = loader.load_data(mode="test") - - # 3. Create image list for sampling - image_ids = [img['id'] for img in ground_truth['images']] - if sample is not None: - if sample_random: - image_ids = random.sample(image_ids, min(sample, len(image_ids))) - else: - image_ids = image_ids[:sample] - - # 4. Create DataFrame structure - data = [] - - # Process ground truth - for img_id in image_ids: - img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) - img_name = img_info['file_name'] - - # Get ground truth annotations - gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] - - # Get predictions for this image - pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] - - # Process each keypoint - for gt_ann, pred_ann in zip(gt_anns, pred_anns): - gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) - pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) - - # Get keypoint names - keypoint_names = ground_truth['categories'][0]['keypoints'] - - # Add ground truth points - for idx, (x, y, v) in enumerate(gt_kpts): - if v > 0: # visible keypoint - data.append({ - 'image': img_name, - 'scorer': 'ground_truth', - 'individual': f"instance_{gt_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': 1.0 - }) - - # Add predictions - for idx, (x, y, score) in enumerate(pred_kpts): - if score > 0: # detected keypoint - data.append({ - 'image': img_name, - 'scorer': 'dlc_model', - 'individual': f"instance_{pred_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': score - }) - - # 5. Create MultiIndex DataFrame - df = pd.DataFrame(data) - df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) - df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) - - # 6. Call plot_evaluation_results - plot_evaluation_results( - df_combined=df_combined, - project_root=project_root, - scorer='ground_truth', - model_name='dlc_model', - output_folder=str(output_folder), - in_train_set=False, # Since we're using test data - mode=plotting, - plot_unique_bodyparts=False, # whether we should plot unique bodyparts - colormap='rainbow', # default values - dot_size=12, # default values - alpha_value=0.7, # default values - p_cutoff=0.6 # default values - ) def plot_gt_and_predictions( image_path: str | Path, From 11c322ed084d65b8fb71e1892742466c54bbb8a7 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 17:31:15 +0000 Subject: [PATCH 04/88] add def visualize_coco_predictions(*) --- .../pose_estimation_pytorch/apis/evaluate.py | 75 ++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index a19ea9e0d7..0173d41cb8 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -19,7 +19,8 @@ import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt - +import json +import os import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -174,7 +175,79 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions + +def visualize_coco_predictions( + predictions: dict, + num_samples: int = 1, + test_file_json: str | Path = "test.json", + output_dir: str | Path | None = None, + draw_skeleton: bool = True, +) -> None: + """ + Visualize predictions using DeepLabCut's plot_gt_and_predictions function + Args: + predictions: Dictionary with image paths as keys and prediction data as values. + Each prediction contains: + - bodyparts: numpy array of shape (1, 37, 3) + - bboxes: numpy array of shape (1, 4) + - bbox_scores: numpy array of shape (1,) + num_samples: Number of samples to visualize + test_file_json: Path to test set JSON file + output_dir: Directory to save visualization outputs. If None, will create + a directory next to test_file_json + draw_skeleton: Whether to draw skeleton connections between keypoints + """ + # Load ground truth data + with open(test_file_json, "r") as f: + ground_truth = json.load(f) + + if output_dir is None: + output_dir = os.path.join(os.path.dirname(test_file_json), "predictions_visualizations") + os.makedirs(output_dir, exist_ok=True) + + image_paths = list(predictions.keys()) + if num_samples: + image_paths = image_paths[:num_samples] + + # Process each image + for image_path in image_paths: + pred_data = predictions[image_path] + img_info = next((img for img in ground_truth['images'] + if img['file_name'] == os.path.basename(image_path)), None) + if img_info is None: + print(f"Warning: Could not find image info for {image_path}") + continue + + gt_anns = [ann for ann in ground_truth['annotations'] + if ann['image_id'] == img_info['id']] + + if not gt_anns: + print(f"Warning: No ground truth annotations found for {image_path}") + continue + + gt_keypoints = np.array(gt_anns[0]['keypoints']).reshape(1, -1, 3) + vis_mask = gt_keypoints[:, :, 2] != -1 + + visible_gt = gt_keypoints[vis_mask] + visible_gt = visible_gt[None, :, :2] + + pred_keypoints = pred_data['bodyparts'] # Keep batch dimension + visible_pred = pred_keypoints + visible_pred = pred_keypoints[vis_mask].copy() + visible_pred = np.expand_dims(visible_pred, axis=0) + + try: + plot_gt_and_predictions( + image_path=image_path, + output_dir=output_dir, + gt_bodyparts=visible_gt, + pred_bodyparts=visible_pred + ) + print(f"Successfully plotted predictions for {image_path}") + except Exception as e: + print(f"Error plotting predictions for {image_path}: {str(e)}") + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, From 598d02bbf28f998ed63aad3b50c4ed5049bce575 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 19:22:26 +0000 Subject: [PATCH 05/88] Fix: correct the early return error when save_epochs=1 and delelte the redunant snapshots at the end --- .../runners/snapshots.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 341b3c4be4..bc5a30a44c 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,41 +113,43 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] + + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } torch.save(parsed_state_dict, save_path) + # Handle previous best model if current_best is not None: - # rename if the current best should have been saved, otherwise delete if current_best.epochs % self.save_epochs == 0: new_name = self.snapshot_path(epoch=current_best.epochs) current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - return - - if not (last or epoch % self.save_epochs == 0): - return - + else: + # Save regular snapshot if needed + should_save = last or epoch % self.save_epochs == 0 + if should_save: + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) + + # Clean up old snapshots if needed existing_snapshots = [s for s in self.snapshots() if not s.best] if len(existing_snapshots) >= self.max_snapshots: - num_to_delete = 1 + len(existing_snapshots) - self.max_snapshots + num_to_delete = len(existing_snapshots) - self.max_snapshots to_delete = existing_snapshots[:num_to_delete] for snapshot in to_delete: snapshot.path.unlink(missing_ok=False) - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) - def best(self) -> Snapshot | None: """Returns: the path to the best snapshot, if it exists""" snapshots = self.snapshots() From 76e10b406940ffa35eaac02f10e93df05b4cc666 Mon Sep 17 00:00:00 2001 From: Ti Wang <81274389+xiu-cs@users.noreply.github.com> Date: Fri, 22 Nov 2024 13:47:09 +0100 Subject: [PATCH 06/88] Update deeplabcut/pose_estimation_pytorch/runners/snapshots.py Co-authored-by: n-poulsen <45132115+n-poulsen@users.noreply.github.com> --- .../runners/snapshots.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index bc5a30a44c..45bf339d0b 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -130,17 +130,15 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - else: + elif last or epoch % self.save_epochs == 0: # Save regular snapshot if needed - should_save = last or epoch % self.save_epochs == 0 - if should_save: - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) # Clean up old snapshots if needed existing_snapshots = [s for s in self.snapshots() if not s.best] From 275fff1b3af8f7c97f8f2648eabd4e7d403366d6 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:46:52 +0000 Subject: [PATCH 07/88] add plot_gt_and_prediction function --- .../pose_estimation_pytorch/apis/evaluate.py | 229 ++++++++++++++++++ 1 file changed, 229 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index f2b3bfea6b..4eb7381772 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -18,6 +18,7 @@ import numpy as np import pandas as pd from tqdm import tqdm +import matplotlib.pyplot as plt import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization @@ -37,6 +38,14 @@ from deeplabcut.pose_estimation_pytorch.task import Task from deeplabcut.utils import auxiliaryfunctions from deeplabcut.utils.visualization import plot_evaluation_results +from deeplabcut.utils import auxfun_videos +from deeplabcut.utils.visualization import ( + create_minimal_figure, + get_cmap, + make_multianimal_labeled_image, + save_labeled_frame, + erase_artists, +) def predict( @@ -167,6 +176,224 @@ def evaluate( return results, predictions +import random +# def plot_predictions( +# loader: Loader, +# predictions: dict[str, dict[str, np.ndarray]], +# plotting: str = "bodypart", +# sample: int | None = None, +# sample_random: bool = False, +# ) -> None: + +def plot_predictions( + loader: Loader, + predictions: dict[str, dict[str, np.ndarray]], + plotting: str = "bodypart", + sample: int | None = None, + sample_random: bool = False, +) -> None: + """ + Process COCO format data and visualize using plot_evaluation_results + + Args: + loader: COCOLoader instance containing dataset info + predictions: Model predictions dictionary + plotting: How to color the points ("bodypart" or "individual") + sample: Number of images to visualize (None for all) + sample_random: Whether to sample images randomly + """ + + # Get paths and create output folder + project_root = loader.project_root + output_folder = Path(project_root) / "labeled_frames" + output_folder.mkdir(exist_ok=True) + + # 2. Get ground truth data + ground_truth = loader.load_data(mode="test") + + # 3. Create image list for sampling + image_ids = [img['id'] for img in ground_truth['images']] + if sample is not None: + if sample_random: + image_ids = random.sample(image_ids, min(sample, len(image_ids))) + else: + image_ids = image_ids[:sample] + + # 4. Create DataFrame structure + data = [] + + # Process ground truth + for img_id in image_ids: + img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) + img_name = img_info['file_name'] + + # Get ground truth annotations + gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] + + # Get predictions for this image + pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] + + # Process each keypoint + for gt_ann, pred_ann in zip(gt_anns, pred_anns): + gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) + pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) + + # Get keypoint names + keypoint_names = ground_truth['categories'][0]['keypoints'] + + # Add ground truth points + for idx, (x, y, v) in enumerate(gt_kpts): + if v > 0: # visible keypoint + data.append({ + 'image': img_name, + 'scorer': 'ground_truth', + 'individual': f"instance_{gt_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': 1.0 + }) + + # Add predictions + for idx, (x, y, score) in enumerate(pred_kpts): + if score > 0: # detected keypoint + data.append({ + 'image': img_name, + 'scorer': 'dlc_model', + 'individual': f"instance_{pred_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': score + }) + + # 5. Create MultiIndex DataFrame + df = pd.DataFrame(data) + df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) + df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) + + # 6. Call plot_evaluation_results + plot_evaluation_results( + df_combined=df_combined, + project_root=project_root, + scorer='ground_truth', + model_name='dlc_model', + output_folder=str(output_folder), + in_train_set=False, # Since we're using test data + mode=plotting, + plot_unique_bodyparts=False, # whether we should plot unique bodyparts + colormap='rainbow', # default values + dot_size=12, # default values + alpha_value=0.7, # default values + p_cutoff=0.6 # default values + ) + +def plot_gt_and_predictions( + image_path: str | Path, + output_dir: str | Path, + gt_bodyparts: np.ndarray, + pred_bodyparts: np.ndarray, # (num_predicted_animals, num_keypoints, 3) + gt_unique_bodyparts: np.ndarray | None = None, + pred_unique_bodyparts: np.ndarray | None = None, + mode: str = "bodypart", + colormap: str = "rainbow", + dot_size: int = 12, + alpha_value: float = 0.7, + p_cutoff: float = 0.6, +): + """Plot ground truth and predictions on an image. + + Args: + image_path: Path to the image + gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) + pred_bodyparts: Predicted keypoints array (num_animals, num_keypoints, 3) + output_dir: Directory where labeled images will be saved + gt_unique_bodyparts: Ground truth unique bodyparts if any + pred_unique_bodyparts: Predicted unique bodyparts if any + mode: How to color the points ("bodypart" or "individual") + colormap: Matplotlib colormap name + dot_size: Size of the plotted points + alpha_value: Transparency of the points + p_cutoff: Confidence threshold for showing predictions + """ + # Ensure output directory exists + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # Read the image + frame = auxfun_videos.imread(str(image_path), mode="skimage") + num_pred, num_keypoints = pred_bodyparts.shape[:2] + + # Create figure and set dimensions + fig, ax = create_minimal_figure() + h, w, _ = np.shape(frame) + fig.set_size_inches(w / 100, h / 100) + ax.set_xlim(0, w) + ax.set_ylim(0, h) + ax.invert_yaxis() + ax.imshow(frame, "gray") + + # Set up colors based on mode + if mode == "bodypart": + num_colors = num_keypoints + if pred_unique_bodyparts is not None: + num_colors += pred_unique_bodyparts.shape[1] + colors = get_cmap(num_colors, name=colormap) + + predictions = pred_bodyparts.swapaxes(0, 1) + ground_truth = gt_bodyparts.swapaxes(0, 1) + elif mode == "individual": + colors = get_cmap(num_pred + 1, name=colormap) + predictions = pred_bodyparts + ground_truth = gt_bodyparts + else: + raise ValueError(f"Invalid mode: {mode}") + + # Plot regular bodyparts + ax = make_multianimal_labeled_image( + frame, + ground_truth, + predictions[:, :, :2], + predictions[:, :, 2:], + colors, + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Plot unique bodyparts if present + if pred_unique_bodyparts is not None and gt_unique_bodyparts is not None: + if mode == "bodypart": + unique_predictions = pred_unique_bodyparts.swapaxes(0, 1) + unique_ground_truth = gt_unique_bodyparts.swapaxes(0, 1) + else: + unique_predictions = pred_unique_bodyparts + unique_ground_truth = gt_unique_bodyparts + + ax = make_multianimal_labeled_image( + frame, + unique_ground_truth, + unique_predictions[:, :, :2], + unique_predictions[:, :, 2:], + colors[num_keypoints:], + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Save the labeled image + save_labeled_frame( + fig, + str(image_path), + str(output_dir), + belongs_to_train=False, + ) + erase_artists(ax) + plt.close() + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, @@ -289,6 +516,7 @@ def evaluate_snapshot( df_ground_truth, left_index=True, right_index=True ) unique_bodyparts = loader.get_dataset_parameters().unique_bpts + plot_evaluation_results( df_combined=df_combined, project_root=cfg["project_path"], @@ -502,6 +730,7 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From 564dbe100c74d6c3af7944d21b51705a312ceb09 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:58:40 +0000 Subject: [PATCH 08/88] delete the initial attempt at function --- .../pose_estimation_pytorch/apis/evaluate.py | 113 ------------------ 1 file changed, 113 deletions(-) mode change 100644 => 100755 deeplabcut/pose_estimation_pytorch/apis/evaluate.py diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py old mode 100644 new mode 100755 index 4eb7381772..a19ea9e0d7 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -174,119 +174,6 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions - - -import random -# def plot_predictions( -# loader: Loader, -# predictions: dict[str, dict[str, np.ndarray]], -# plotting: str = "bodypart", -# sample: int | None = None, -# sample_random: bool = False, -# ) -> None: - -def plot_predictions( - loader: Loader, - predictions: dict[str, dict[str, np.ndarray]], - plotting: str = "bodypart", - sample: int | None = None, - sample_random: bool = False, -) -> None: - """ - Process COCO format data and visualize using plot_evaluation_results - - Args: - loader: COCOLoader instance containing dataset info - predictions: Model predictions dictionary - plotting: How to color the points ("bodypart" or "individual") - sample: Number of images to visualize (None for all) - sample_random: Whether to sample images randomly - """ - - # Get paths and create output folder - project_root = loader.project_root - output_folder = Path(project_root) / "labeled_frames" - output_folder.mkdir(exist_ok=True) - - # 2. Get ground truth data - ground_truth = loader.load_data(mode="test") - - # 3. Create image list for sampling - image_ids = [img['id'] for img in ground_truth['images']] - if sample is not None: - if sample_random: - image_ids = random.sample(image_ids, min(sample, len(image_ids))) - else: - image_ids = image_ids[:sample] - - # 4. Create DataFrame structure - data = [] - - # Process ground truth - for img_id in image_ids: - img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) - img_name = img_info['file_name'] - - # Get ground truth annotations - gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] - - # Get predictions for this image - pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] - - # Process each keypoint - for gt_ann, pred_ann in zip(gt_anns, pred_anns): - gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) - pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) - - # Get keypoint names - keypoint_names = ground_truth['categories'][0]['keypoints'] - - # Add ground truth points - for idx, (x, y, v) in enumerate(gt_kpts): - if v > 0: # visible keypoint - data.append({ - 'image': img_name, - 'scorer': 'ground_truth', - 'individual': f"instance_{gt_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': 1.0 - }) - - # Add predictions - for idx, (x, y, score) in enumerate(pred_kpts): - if score > 0: # detected keypoint - data.append({ - 'image': img_name, - 'scorer': 'dlc_model', - 'individual': f"instance_{pred_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': score - }) - - # 5. Create MultiIndex DataFrame - df = pd.DataFrame(data) - df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) - df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) - - # 6. Call plot_evaluation_results - plot_evaluation_results( - df_combined=df_combined, - project_root=project_root, - scorer='ground_truth', - model_name='dlc_model', - output_folder=str(output_folder), - in_train_set=False, # Since we're using test data - mode=plotting, - plot_unique_bodyparts=False, # whether we should plot unique bodyparts - colormap='rainbow', # default values - dot_size=12, # default values - alpha_value=0.7, # default values - p_cutoff=0.6 # default values - ) def plot_gt_and_predictions( image_path: str | Path, From 41ff721888535fb71500e1fb622d2449cb7a4754 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 17:31:15 +0000 Subject: [PATCH 09/88] add def visualize_coco_predictions(*) --- .../pose_estimation_pytorch/apis/evaluate.py | 75 ++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index a19ea9e0d7..0173d41cb8 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -19,7 +19,8 @@ import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt - +import json +import os import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -174,7 +175,79 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions + +def visualize_coco_predictions( + predictions: dict, + num_samples: int = 1, + test_file_json: str | Path = "test.json", + output_dir: str | Path | None = None, + draw_skeleton: bool = True, +) -> None: + """ + Visualize predictions using DeepLabCut's plot_gt_and_predictions function + Args: + predictions: Dictionary with image paths as keys and prediction data as values. + Each prediction contains: + - bodyparts: numpy array of shape (1, 37, 3) + - bboxes: numpy array of shape (1, 4) + - bbox_scores: numpy array of shape (1,) + num_samples: Number of samples to visualize + test_file_json: Path to test set JSON file + output_dir: Directory to save visualization outputs. If None, will create + a directory next to test_file_json + draw_skeleton: Whether to draw skeleton connections between keypoints + """ + # Load ground truth data + with open(test_file_json, "r") as f: + ground_truth = json.load(f) + + if output_dir is None: + output_dir = os.path.join(os.path.dirname(test_file_json), "predictions_visualizations") + os.makedirs(output_dir, exist_ok=True) + + image_paths = list(predictions.keys()) + if num_samples: + image_paths = image_paths[:num_samples] + + # Process each image + for image_path in image_paths: + pred_data = predictions[image_path] + img_info = next((img for img in ground_truth['images'] + if img['file_name'] == os.path.basename(image_path)), None) + if img_info is None: + print(f"Warning: Could not find image info for {image_path}") + continue + + gt_anns = [ann for ann in ground_truth['annotations'] + if ann['image_id'] == img_info['id']] + + if not gt_anns: + print(f"Warning: No ground truth annotations found for {image_path}") + continue + + gt_keypoints = np.array(gt_anns[0]['keypoints']).reshape(1, -1, 3) + vis_mask = gt_keypoints[:, :, 2] != -1 + + visible_gt = gt_keypoints[vis_mask] + visible_gt = visible_gt[None, :, :2] + + pred_keypoints = pred_data['bodyparts'] # Keep batch dimension + visible_pred = pred_keypoints + visible_pred = pred_keypoints[vis_mask].copy() + visible_pred = np.expand_dims(visible_pred, axis=0) + + try: + plot_gt_and_predictions( + image_path=image_path, + output_dir=output_dir, + gt_bodyparts=visible_gt, + pred_bodyparts=visible_pred + ) + print(f"Successfully plotted predictions for {image_path}") + except Exception as e: + print(f"Error plotting predictions for {image_path}: {str(e)}") + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, From 7f6c8c1c7912e9b3b9558ad68a006763c74bc64b Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 19:03:51 +0100 Subject: [PATCH 10/88] =?UTF-8?q?Fix:=20correct=20the=20early=20return=20e?= =?UTF-8?q?rror=20when=20save=5Fepochs=3D1=20and=20delelte=20th=E2=80=A6?= =?UTF-8?q?=20=E2=80=A6e=20redunant=20snapshots=20at=20the=20end?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- deeplabcut/pose_estimation_pytorch/runners/snapshots.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 deeplabcut/pose_estimation_pytorch/runners/snapshots.py diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py old mode 100644 new mode 100755 From b91a3f8af7e591c3e8be0d2343a036652f5f7190 Mon Sep 17 00:00:00 2001 From: ti Date: Fri, 22 Nov 2024 16:38:42 +0100 Subject: [PATCH 11/88] black -> snapshots.py --- deeplabcut/pose_estimation_pytorch/runners/snapshots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 45bf339d0b..74203f5be1 100755 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,11 +113,11 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] - + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } From f6704073db2ce2b422e63166277a5b7d4d3e478e Mon Sep 17 00:00:00 2001 From: ti Date: Fri, 22 Nov 2024 17:14:17 +0100 Subject: [PATCH 12/88] black -> snapshots.py --- deeplabcut/pose_estimation_pytorch/runners/snapshots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) mode change 100644 => 100755 deeplabcut/pose_estimation_pytorch/runners/snapshots.py diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py old mode 100644 new mode 100755 index 45bf339d0b..74203f5be1 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,11 +113,11 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] - + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } From 971f737e01a2585c1ca44b28e3eb341278e87422 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 25 Nov 2024 12:12:04 +0100 Subject: [PATCH 13/88] Revert "Fix: correct the early return error when save_epochs=1 and delelte the redunant snapshots at the end" This reverts commit 31d8652ff552756954907af27b8d866be08fe07a. --- .../runners/snapshots.py | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index bc5a30a44c..341b3c4be4 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,43 +113,41 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] - - # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } torch.save(parsed_state_dict, save_path) - # Handle previous best model if current_best is not None: + # rename if the current best should have been saved, otherwise delete if current_best.epochs % self.save_epochs == 0: new_name = self.snapshot_path(epoch=current_best.epochs) current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - else: - # Save regular snapshot if needed - should_save = last or epoch % self.save_epochs == 0 - if should_save: - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) - - # Clean up old snapshots if needed + return + + if not (last or epoch % self.save_epochs == 0): + return + existing_snapshots = [s for s in self.snapshots() if not s.best] if len(existing_snapshots) >= self.max_snapshots: - num_to_delete = len(existing_snapshots) - self.max_snapshots + num_to_delete = 1 + len(existing_snapshots) - self.max_snapshots to_delete = existing_snapshots[:num_to_delete] for snapshot in to_delete: snapshot.path.unlink(missing_ok=False) + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) + def best(self) -> Snapshot | None: """Returns: the path to the best snapshot, if it exists""" snapshots = self.snapshots() From b2077ee34d24dbace6f5fd20bc0d85ed82e0b7c8 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 25 Nov 2024 12:20:22 +0100 Subject: [PATCH 14/88] isort and black -> evaluate.py --- .../pose_estimation_pytorch/apis/evaluate.py | 78 +++++++++++-------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 0173d41cb8..32dc2c0b8a 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -11,16 +11,17 @@ from __future__ import annotations import argparse +import json +import os from pathlib import Path from typing import Iterable import albumentations as A +import matplotlib.pyplot as plt import numpy as np import pandas as pd from tqdm import tqdm -import matplotlib.pyplot as plt -import json -import os + import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -37,15 +38,14 @@ from deeplabcut.pose_estimation_pytorch.runners import InferenceRunner from deeplabcut.pose_estimation_pytorch.runners.snapshots import Snapshot from deeplabcut.pose_estimation_pytorch.task import Task -from deeplabcut.utils import auxiliaryfunctions -from deeplabcut.utils.visualization import plot_evaluation_results -from deeplabcut.utils import auxfun_videos +from deeplabcut.utils import auxfun_videos, auxiliaryfunctions from deeplabcut.utils.visualization import ( create_minimal_figure, + erase_artists, get_cmap, make_multianimal_labeled_image, + plot_evaluation_results, save_labeled_frame, - erase_artists, ) @@ -176,6 +176,7 @@ def evaluate( return results, predictions + def visualize_coco_predictions( predictions: dict, num_samples: int = 1, @@ -185,12 +186,12 @@ def visualize_coco_predictions( ) -> None: """ Visualize predictions using DeepLabCut's plot_gt_and_predictions function - + Args: predictions: Dictionary with image paths as keys and prediction data as values. Each prediction contains: - bodyparts: numpy array of shape (1, 37, 3) - - bboxes: numpy array of shape (1, 4) + - bboxes: numpy array of shape (1, 4) - bbox_scores: numpy array of shape (1,) num_samples: Number of samples to visualize test_file_json: Path to test set JSON file @@ -203,7 +204,9 @@ def visualize_coco_predictions( ground_truth = json.load(f) if output_dir is None: - output_dir = os.path.join(os.path.dirname(test_file_json), "predictions_visualizations") + output_dir = os.path.join( + os.path.dirname(test_file_json), "predictions_visualizations" + ) os.makedirs(output_dir, exist_ok=True) image_paths = list(predictions.keys()) @@ -212,42 +215,52 @@ def visualize_coco_predictions( # Process each image for image_path in image_paths: - pred_data = predictions[image_path] - img_info = next((img for img in ground_truth['images'] - if img['file_name'] == os.path.basename(image_path)), None) + pred_data = predictions[image_path] + img_info = next( + ( + img + for img in ground_truth["images"] + if img["file_name"] == os.path.basename(image_path) + ), + None, + ) if img_info is None: print(f"Warning: Could not find image info for {image_path}") continue - - gt_anns = [ann for ann in ground_truth['annotations'] - if ann['image_id'] == img_info['id']] - + + gt_anns = [ + ann + for ann in ground_truth["annotations"] + if ann["image_id"] == img_info["id"] + ] + if not gt_anns: print(f"Warning: No ground truth annotations found for {image_path}") continue - gt_keypoints = np.array(gt_anns[0]['keypoints']).reshape(1, -1, 3) + gt_keypoints = np.array(gt_anns[0]["keypoints"]).reshape(1, -1, 3) vis_mask = gt_keypoints[:, :, 2] != -1 - + visible_gt = gt_keypoints[vis_mask] visible_gt = visible_gt[None, :, :2] - - pred_keypoints = pred_data['bodyparts'] # Keep batch dimension - visible_pred = pred_keypoints + + pred_keypoints = pred_data["bodyparts"] # Keep batch dimension + visible_pred = pred_keypoints visible_pred = pred_keypoints[vis_mask].copy() visible_pred = np.expand_dims(visible_pred, axis=0) - + try: plot_gt_and_predictions( image_path=image_path, output_dir=output_dir, gt_bodyparts=visible_gt, - pred_bodyparts=visible_pred + pred_bodyparts=visible_pred, ) print(f"Successfully plotted predictions for {image_path}") except Exception as e: print(f"Error plotting predictions for {image_path}: {str(e)}") - + + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, @@ -262,7 +275,7 @@ def plot_gt_and_predictions( p_cutoff: float = 0.6, ): """Plot ground truth and predictions on an image. - + Args: image_path: Path to the image gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) @@ -299,7 +312,7 @@ def plot_gt_and_predictions( if pred_unique_bodyparts is not None: num_colors += pred_unique_bodyparts.shape[1] colors = get_cmap(num_colors, name=colormap) - + predictions = pred_bodyparts.swapaxes(0, 1) ground_truth = gt_bodyparts.swapaxes(0, 1) elif mode == "individual": @@ -330,7 +343,7 @@ def plot_gt_and_predictions( else: unique_predictions = pred_unique_bodyparts unique_ground_truth = gt_unique_bodyparts - + ax = make_multianimal_labeled_image( frame, unique_ground_truth, @@ -342,7 +355,7 @@ def plot_gt_and_predictions( p_cutoff, ax=ax, ) - + # Save the labeled image save_labeled_frame( fig, @@ -352,8 +365,8 @@ def plot_gt_and_predictions( ) erase_artists(ax) plt.close() - - + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, @@ -409,7 +422,7 @@ def evaluate_snapshot( parameters = PoseDatasetParameters( bodyparts=project_bodyparts, unique_bpts=parameters.unique_bpts, - individuals=parameters.individuals + individuals=parameters.individuals, ) predictions = {} @@ -690,7 +703,6 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) - if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From f02bb366e902ac98349145cb7b3d14f6f4b1d5f9 Mon Sep 17 00:00:00 2001 From: Mackenzie Mathis Date: Thu, 5 Sep 2024 17:51:32 +0200 Subject: [PATCH 15/88] Update README.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - adding in CZI badge! 🔥 --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 56525620db..4613fcb54d 100644 --- a/README.md +++ b/README.md @@ -48,8 +48,7 @@ [![Gitter](https://badges.gitter.im/DeepLabCut/community.svg)](https://gitter.im/DeepLabCut/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Twitter Follow](https://img.shields.io/twitter/follow/DeepLabCut.svg?label=DeepLabCut&style=social)](https://twitter.com/DeepLabCut) [![Generic badge](https://img.shields.io/badge/Contributions-Welcome-brightgreen.svg)](CONTRIBUTING.md) - - +[![CZI's Essential Open Source Software for Science](https://chanzuckerberg.github.io/open-science/badges/CZI-EOSS.svg)](https://czi.co/EOSS) @@ -276,3 +275,7 @@ importing a project into the new data format for DLC 2.0 - August 2018: NVIDIA AI Developer News: [AI Enables Markerless Animal Tracking](https://news.developer.nvidia.com/ai-enables-markerless-animal-tracking/) - July 2018: Ed Yong covered DeepLabCut and interviewed several users for the [Atlantic](https://www.theatlantic.com/science/archive/2018/07/deeplabcut-tracking-animal-movements/564338). - April 2018: first DeepLabCut preprint on [arXiv.org](https://arxiv.org/abs/1804.03142) + + ## Funding + + We are grateful for the follow support over the years! This software project was supported in part by the Essential Open Source Software for Science (EOSS) program at Chan Zuckerberg Initiative (cycles 1, 3, 3-DEI, 6). We also thank the Rowland Institute at Harvard for funding from 2017-2020, and EPFL from 2020-present. From 3bb50988d0d1bffb147e0fc2c941d9c396e6c192 Mon Sep 17 00:00:00 2001 From: segashin Date: Mon, 12 Aug 2024 15:44:53 +0900 Subject: [PATCH 16/88] Load min_n_links from inference_cfg --- deeplabcut/pose_estimation_tensorflow/predict_videos.py | 1 + 1 file changed, 1 insertion(+) diff --git a/deeplabcut/pose_estimation_tensorflow/predict_videos.py b/deeplabcut/pose_estimation_tensorflow/predict_videos.py index e868ae515a..3efa72de9a 100644 --- a/deeplabcut/pose_estimation_tensorflow/predict_videos.py +++ b/deeplabcut/pose_estimation_tensorflow/predict_videos.py @@ -1461,6 +1461,7 @@ def _convert_detections_to_tracklets( greedy=greedy, pcutoff=inference_cfg.get("pcutoff", 0.1), min_affinity=inference_cfg.get("pafthreshold", 0.05), + min_n_links=inference_cfg.get("minimalnumberofconnections", 2) ) if calibrate: trainingsetfolder = auxiliaryfunctions.get_training_set_folder(cfg) From 193a243f6113538886a3ed04d39d2b2054f04881 Mon Sep 17 00:00:00 2001 From: segashin Date: Wed, 14 Aug 2024 21:27:35 +0900 Subject: [PATCH 17/88] set min_n_links two locations --- deeplabcut/pose_estimation_tensorflow/predict_videos.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_tensorflow/predict_videos.py b/deeplabcut/pose_estimation_tensorflow/predict_videos.py index 3efa72de9a..45a29b160e 100644 --- a/deeplabcut/pose_estimation_tensorflow/predict_videos.py +++ b/deeplabcut/pose_estimation_tensorflow/predict_videos.py @@ -1461,7 +1461,7 @@ def _convert_detections_to_tracklets( greedy=greedy, pcutoff=inference_cfg.get("pcutoff", 0.1), min_affinity=inference_cfg.get("pafthreshold", 0.05), - min_n_links=inference_cfg.get("minimalnumberofconnections", 2) + min_n_links=inference_cfg["minimalnumberofconnections"] ) if calibrate: trainingsetfolder = auxiliaryfunctions.get_training_set_folder(cfg) @@ -1754,6 +1754,7 @@ def convert_detections2tracklets( min_affinity=inferencecfg.get("pafthreshold", 0.05), window_size=window_size, identity_only=identity_only, + min_n_links=inferencecfg["minimalnumberofconnections"] ) assemblies_filename = dataname.split(".h5")[0] + "_assemblies.pickle" if not os.path.exists(assemblies_filename) or overwrite: From ae13531e742045337f57f80b5fd2da952ab5247c Mon Sep 17 00:00:00 2001 From: dikra_masrour <79920322+dikraMasrour@users.noreply.github.com> Date: Fri, 6 Sep 2024 19:00:35 +0200 Subject: [PATCH 18/88] Benchmarking user guide (#2716) * Benchmarking user guide * Update Benchmarking_shuffle_guide.md * Update _toc.yml --------- Co-authored-by: Mackenzie Mathis Co-authored-by: Mackenzie Mathis Co-authored-by: Anna Stuckert Co-authored-by: Marc Canela --- _toc.yml | 9 +- docs/pytorch/Benchmarking_shuffle_guide.md | 135 +++++++++++++++++++++ docs/pytorch/assets/img1.png | Bin 0 -> 233790 bytes docs/pytorch/assets/img2.png | Bin 0 -> 164383 bytes docs/pytorch/assets/img3.png | Bin 0 -> 17862 bytes docs/pytorch/assets/img4.png | Bin 0 -> 28157 bytes docs/pytorch/assets/img5.png | Bin 0 -> 97295 bytes 7 files changed, 140 insertions(+), 4 deletions(-) create mode 100644 docs/pytorch/Benchmarking_shuffle_guide.md create mode 100644 docs/pytorch/assets/img1.png create mode 100644 docs/pytorch/assets/img2.png create mode 100644 docs/pytorch/assets/img3.png create mode 100644 docs/pytorch/assets/img4.png create mode 100644 docs/pytorch/assets/img5.png diff --git a/_toc.yml b/_toc.yml index 0cb5c2cb83..486846eb1c 100644 --- a/_toc.yml +++ b/_toc.yml @@ -29,7 +29,7 @@ parts: chapters: - file: docs/quick-start/single_animal_quick_guide - file: docs/quick-start/tutorial_maDLC -- caption: Beginner's Guide to DeepLabCut +- caption: 🚀 Beginner's Guide to DeepLabCut chapters: - file: docs/beginner-guides/beginners-guide - file: docs/beginner-guides/manage-project @@ -42,12 +42,12 @@ parts: - caption: DeepLabCut-Live! chapters: - file: docs/deeplabcutlive -- caption: DeepLabCut Model Zoo +- caption: 🦄 DeepLabCut Model Zoo chapters: - file: docs/ModelZoo - file: docs/recipes/UsingModelZooPupil - file: docs/recipes/MegaDetectorDLCLive -- caption: Cookbook (detailed helper guides) +- caption: 🧑‍🍳 Cookbook (detailed helper guides) chapters: - file: docs/tutorial - file: docs/convert_maDLC @@ -61,9 +61,10 @@ parts: - file: docs/recipes/flip_and_rotate - file: docs/recipes/pose_cfg_file_breakdown - file: docs/recipes/publishing_notebooks_into_the_DLC_main_cookbook -- caption: DeepLabCut Benchmark +- caption: DeepLabCut Benchmarking chapters: - file: docs/benchmark + - file: docs/pytorch/Benchmarking_shuffle_guide - caption: Mission & Contribute chapters: - file: docs/MISSION_AND_VALUES diff --git a/docs/pytorch/Benchmarking_shuffle_guide.md b/docs/pytorch/Benchmarking_shuffle_guide.md new file mode 100644 index 0000000000..61c42107d1 --- /dev/null +++ b/docs/pytorch/Benchmarking_shuffle_guide.md @@ -0,0 +1,135 @@ +# DeepLabCut Benchmarking - User Guide + +## Reasoning for benchmarking models in DLC (across DLC versions and architectures) + +DeepLabCut 3.0+ introduced using PyTorch 🔥 as a deep learning engine (and TensorFlow will be depreciated). +It is of importance for replicability of data analysis to benchmark existing models created using DeepLabCut versions +prior to 3.0 against new models created in DeepLabCut 3.0+ and later versions. + +When comparing different models, it's important to use the same train-test data +split to ensure fair comparisons. If the models are trained on different datasets, +their performance metrics can't be accurately compared. This is crucial when +comparing the performance of models with different architectures or different +sets of hyperparameters. For example, if we compare the RMSE of a model on an +"easy" test image with the RMSE of another model on a "hard" test image, it +doesn't determine whether a model is better than the other because the +architecture performs better or because the training images were "better" to +learn from. Thus, we not only need to compare the models based on metrics +computed on the same test images, but also train them on an identical fixed +training set in order to "decouple" the dataset from the model architecture. + +Creating a model using the same data split can be carried out using a GUI or +using code, and this guide outlines the steps for both. + +## Important files & folders + +``` +dlc-project +| +|___dlc-models-pytorch +| |__ iterationX +| |__ shuffleX +| |__ pytorch_config.yaml +| +|___training-datasets +| |__ metadata.yaml +| +|___config.yaml +``` + +## Benchmarking a TensorFlow model against a PyTorch model + +### Creating a shuffle + +Creating a new shuffle with the same train/test split as an existing one: +#### In the DeepLabCut GUI +1. Front page > Load project > Open project folder > choose *config.yaml* +2. Select *'Create training dataset'* tab +3. Tick *Use an existing data split* option + + ![create_from_existing]() +4. Click 'View existing shuffles': + - This is used to view the indices of shuffles created for a project to determine which index is available to assign to a new shuffle. + - The elements described in this window are: + - train_fraction: The fraction of the dataset used for training. + - index: The index of the shuffle. + - split: The data split for the shuffle. The integer value on its own does not +hold any meaning, but this "split" value indicates which shuffles have the same split +(as their results can then be compared) + - engine: Whether it is a PyTorch or TensorFlow shuffle + + ![view_existing_sh]() +5. Choose the index of the training shuffle to replicate. Let us assume we want +to replicate the train-test split from OpenfieldOct30-trainset95shuffle3, in which +`split: 3`. In this case, we insert in the *'From shuffle'* menu + + ![choose_existing_index]() +6. To create this new dataset, set the shuffle option to an un-used shuffle +(here 4) + + ![choose_new_index]() +7. Click *'Create training dataset'* and move on to *'train network'*. Shuffle should be +set to the new shuffle entered at the previous step (in this case, 4) + + ![create_from_existing]() +8. To view/edit the specifications of the model you created, you can go to `pytoch_config.yaml` file at: + ``` + dlc-project + | + |___ dlc-models-pytorch + |__ iterationX + |__ shuffleX + |__ pytorch_config.yaml + ``` + +#### In Code + +With the `deeplabcut` module in Python, use the +`create_training_dataset_from_existing_split()` method to create new shuffles from +existing ones (e.g. TensorFlow shuffles). + +Similarly, here, we create a new shuffle '4' from the existing shuffle '3'. + +```python +import deeplabcut +from deeplabcut.core.engine import Engine + +config = "path/to/project/config.yaml" + +training_dataset = deeplabcut.create_training_dataset_from_existing_split( + config=config, + from_shuffle=3, + from_trainsetindex=0, + shuffles=[4], + net_type="resnet_50", +) +``` + +We can then train our new PyTorch model with the same data split as the +TensorFlow model. + +```python +deeplabcut.train_network(config, shuffle=4, engine=Engine.PYTORCH, batch_size=8) +``` + +Once trained we can evaluate our model using + +```python +deeplabcut.evaluate_network(config, Shuffles=[4], snapshotindex="all") +``` +Now, we can compare performances with peace of mind! + +#### Good practices: naming shuffles created from existing ones + +In a setting where one has multiple TensorFlow models and intends to benchmark +their performances against new PyTorch models, it is good practice to follow +a naming pattern for the shuffles we create. + +Say we have TensorFlow shuffles 0, 1, and 2. We can create new PyTorch shuffles +from them by naming them 1000, 1001, and 1002. This allows us to quickly +recognize that the shuffles belonging to the 100x range are PyTorch shuffles +and that shuffle 1001, for example, has the same data split as TensorFlow +shuffle 1. This way, the comparison can be more straightforward and guaranteed +to be correct! + +This was contributed by the [2024 DLC AI Residents](https://www.deeplabcutairesidency.org/our-team)! diff --git a/docs/pytorch/assets/img1.png b/docs/pytorch/assets/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..dde3d96115b4bfd6d158bfc08835daa082ba397d GIT binary patch literal 233790 zcmbTe2Ut_jwg(C*pdz5~2Lz-DB1%FK9tH>Jwv?TWjE0ho45NmtleHb#3I|6q{G&dhfp#Zl8Yn(i$~IHw zX2VS;{x^wtjF#x0>qwI^Cf>XD{D&zeg)uV!4Y3J@<{F=5Bd z*=Nx1nCVF{kkn&lK6{Hc#69sJo4r93A{IN$-aHNje=9lp&Zs%uC zkd_6*dlj0l75alo^kG9kG{a{gB_ z&@H#O+4VPn^!VVH$yjc`^Q&Q8IPtD~EXk%u&uW_3dNVw}J-8I-P zly@rkhYy=YebgCeaGrJG_|>p4{M>pkYZ!CK)bjb=BXTB*4q9p~>*_)I`IGAM zFDs#?cPBwIwTh?DsA}+s+?&sxx${Xrx^NnkVrF~#NXZX_(o7h;X(B*hC{EPJ0-?>M z>XseRYf|!^W3SB0hkKbnLK02nb+T(Oo=NT)49f4IXqJyhC0E1NcFk!|Dp(NrMBk;h zt65T&SsI9EShAK86jO+DwK5m2hJ!tbeIw&q6No*oUv3GWy~_Fg(=Fd+$>WG=vl~mL zJl+3>Uyjd&qod~?LpRZG)Z(1j$#w{&d3PVcGT-yzx{a+=g~Es(uCmP zV1*{P2dub4GX&Lvb6!0WxRP`cIL5Z~%0KX3Ns>)(Y;k7dd41t}dOi2st)BL~HF)YX zM0ip+-d}fNy!qm7zs+|KE`q^tFP@VA6|4ovE5=_i=fx0s1~tqu<`cGmvvVdfCeUwI zbtWxIbKJt)3-+4fearapT}0m@ zilw{aV2b>%ICipX%v5on^CUYrmgP*D(=~4EO7m-68^WUt=FxaW6VaAQ-hbPj&FCGM zY;xvmKbt$LQCONhca~tk+8(~h8{8Ln2fu%KCi)?$46evQtHOFiXjub0sihS zP4JQ?^OJ@==DCox5YeHQ;4EF7;t-P&+2oAr3nuHkHg}TA1j!EXs^1+CH~E1}7M`tA z&f*itq;y|PqTo|yP-U2VynED;>PbdMzIpac{+;~TVn&@$r5d?#(CoYK{2?~C(mM-Z zxs?+N$NywjXXR#fVik-ljYW2*Ke7KyC4!12I$rLxf}wgrPD;*1UcIJ{4xgsG{s+Ck z9{1=Pm)4kVzx~DB8l$|TETGJh^A}6_08dV;a$e%bxtGAZQ*QJxg7P6#rBz9S_}C-dN2)$ z&bkJjMpi+TZlq4PE~pRzTK{SkBce1ZEiRRs*QntSJ)z#`qLJs#DYG>m!yEG)Gc>9u z&}`8PhQ;M*7MAH}sb-mJn+zKAKCjTIklwAa&ZEq=%4|h<^&6FVXsjloSs~XTxdLXs z;hwMq{`EtVEO$STBM}ynQ+LM|zk!@pf+g$Kk|EfLe@58q=#t#3>fm^`mtLt;V%y_h z%?VAGERyKDqL`KOrS_%XzUkcQLP>LT+dHoqc`)YAZ5h29-F+iZyPczEZ?>2+e~GJ( z`(_)}uat##R~1P}lQC0w$~>f9r6r_Q`A|($O&usu#uqO5*{QsDKIlnn;>$0Nb#0ZS zmD4{vI3~ckhEby?Zso!)VuOCE$3DA%Mjoa_$)vnpbypr>4SpS<;M^D-}P@~g=7`Udtsi8K3l)FX1_*1 z>M$BM+FM;(9VpxeezEiEqh(Lb@#4&{*g5#*?3BTN*#k>#420%%tiLU8)nL}UC4A+t zCBF88=H-tSFy3jOgk1>L7nPcBrey2n(h|oKixHy{Q$gNpooeN3l28KXh_XbN=G2)D ztVH|yr&9$|{TRQPVERCxF&}H+je|968{ue4$I}`vZgTGE5xr71r#i2~L&{Ax21|x$ z^^d)lX4+rng>1`jPK2y6%BvFbAdwQ)Qg_>#> zTn>N1ez9z$ZnE8ca&!3RCiywJK8t5G18)HOs9Wi^(%bCNS6Z&pJJIt&H*v|)YD;P^ zN>9{k`TJchW`9H`>^)j|AtGUD#bdLRsUvM~v9dA5oua5lVZ#`es`nY)YvnTHVz%Yy z>WI>+KW-?@3QWwQuEBMuO#9zxEW+Ymca{Q-wu%%|GxV-=*2%pNa7+`k}lNIjIrHTZIf*2BdF34IDnJCL*Ka%N=E`IfV~ng9YTD$i#dm!HssuP-0GTgL`kp6NBnI4LYgmx_jn zih8xK*%x>gl}y2!e0_+Gm#w&T zZgwVbgs2y&cc)Q{7j3w#38QH-zWc}Tu#?=f+!BWrmjwql3o1p)VV67SMOA@*frsqd zMa?5B(@Tw`$LM3ouGG-C7O&b(i6-vjjMJAR3T^GX>12}Pr()|w%iSlXXftSINB~mu zlc><+U+;y>&2Vq)@rl#D84<oF-i)-*Sh%TqI zPG_c732{!VTjr>S^p$WKBW_8hh)SiV$ggY}s(O|rQZk7Vf^S}YNrDnwsK%uW2fjbW zQYz}EHutVK;e7h$_x>+lx@y3sh_EtHvQ|~aVFSuHaIWLh;NSx#T;P(xrTte~9``ZM zwSUy(;oyYX;avZ(XVifE?@ugn{qFP6`?dH`975nPGT`#g!29o~i3BpP{dbu_1Zcx~ zt}Uaa1l+YPU9GGf-E5uQBVE_RfQp;WiiU1DIFu~EFI*+9hg-n-7&{#UcLUWIB9=}N zu9vTzEUdV^A61Nx`8O2;*S&L}N%Kuk);4g6|TX%P75pHfTFE1`HUM?qB8}4Vq z!ou87dANCaIDscP-FzI~UwU&ox-tKAkpCV>*2>M&)y~=7&dHJS_qZ=DoIKpcnV5dR z=wF|I&eO`qyM_B1*Hi9)jSX}a`&}!dVdrfHHk7r40DA_!L*f~~ zFrU~zI{cre|9R#A>T2*mU3r8ApZ@Qz|EuZ$-Bs7k%2mb*0=(2+;(rMCU!DJV<9~G& zclZp2HMus@Eqeyn=_{HcdYJtZFwV&~9R9H$T7s z@L}%_{J|R>yc>*v?@Nm1md>ke!LMXkUGI?~DJdpq=7o{;JjIhb-T1^$5AR-w61D&0 z_}jzc;Q6YVU)~ac7U4rH%FE}w)g=>8_E+zMr`#mLd++{(d1~UFn!!g)$IejM+z%zW zq2_Lyk~Ap_IJnpENd2uZ^WK|lfz8$xRfQ-OD()oerpp&_pWbtLKly*KO1YbB>CKDp z-{0s&p%S(k>XqQN`3%H8%zuvvzqbR9y?)SZUU6(xa~v5?u1p2)Nt0A4w?@T6h!&MR zjUwh@W|peT?ph|{5VWzyxO?nMDoK2eC*9$M4K-T3=VUa#!a5XeBB`*$lcYSAR7KWz z*{V~@-CqS#1Zxq77mdhS#IxD>5e?XMo2^=RNc*dID#q=D^pT_wYmNNvzVy8?8rgVE z_{b~HGo0`pD#GLs`)WQ-rl_u-EI92w2%8@J86O|NFyYqwWJJkvvQ8H-=#5UPX@l{2 z&a*#s&p5+Bm^QC~&0X*@_3qY zQ^pMlc8aQm zq@+iJlkOEe>@{3Gf{qii@-YGVFD9+5tU5=`0%5zyrh28OoS&h*lkFf*E;XY{@B;-p zkHozV*ebfN5f%D}lcd8L^wwc`K~zBHL~oLljZHcC{6J+p7)2|Y&PXds&FbM=tyFd7 zZ899=s>{bT^wivqUnQ{TAT61D^fUC8zfR@DH1{t*4kc@)`O7Jnj}{xaJF5hg_K#>p zBm$p~K?0oLd~u5IYbF5q`QVP$JyAhJ)MAa+36xL*d3sAnjRrhw5{73%;DTD+c75&R zuVTEbx>W0&cJ!IJ;OckJxU2wy!2?TsCal#~?%bakpvs^2LxDoL4hA#s(iOUh!4HDXQ|T z5w`;}E-sG8X=CjdK9pC&4cjg_1|!v=A<|tpm6nS#D6$bFU8d)#X3}U( z)>|jFlA_mW_F#$mRgbRIV@Wp0 zC}WkV8AsJv`l_H|$tDA*pY{~=^vjT)shLI9fJftn-1{ZT18d9eT9#YsT>ip71eD*=2C&RYvtY2 z&BJt?)3h-y!6<=?jY*^(+{Yk9orRxY^;(wh@{B<+( zKd$V2D$TJj+DJ-I=XTJ8P58ro2B{dj$Mn+@a#g>C%PROhnDC3@A3sY;54-oJ1h%n? z528n)I8PkBg*Z8NS4>a5!Z*F*Ma@jP4I^1QLZ{P{C}9aK(nX3$T?f^tp!cH}vMb&@ z5I%vQGnk2)s;?EIABKG0cKIjjO;_BH!-j1Am3@d&RlBVN+dC`8c`UV7GT{*scZ0ns zva4QKVr`bKk-81w2?4IdSMLIbbJ7Na>+6jTo4j+3{rWx*lpafYeWbdYi5?@r#XgX+ z8q8HKc>wBup{ABpBgCne;gC_J2OD4!1vw1yi-D;3leQ*Kr!Ni!g%=WcOjI~d!OGKb z@u6k`#}w6i1-xkU-LwJ>csRxYV9ysP;ghuK~?) zZ_@jvd&JzUH#k{~EkE3d?mP%)HBNUlFzp(ph0EGA8s(Kipd4o8?BQV(jAb68NZ;-g z9-H&@oi&CmmZkuyua>{u!8B!_0Vltvs?1cmkSC0*ZTD)kDg#fiOqiEfJ9!TLk&BUevsPiO(ryln0FN@cVS7F@r_bDjqigouo}MN)Fcd#s{Y8#J}59Zi{%l@0|I#lgZNclv^yh_=1G zT?Od4H-2DL(|1HNY2PQL>sYtE7~6yoRfwTqU*z#U04u2~eOyZc7nn6Q5k`<(BWan+PCWPwV>XW!wL;8jnS zJ5Gv}!K)0%tj`7PhdS0zxtEuh%iWi?ylkt7OU=}2R%_M`p@(nri5AwYr_sWroOe(B zWNP;YR3^`rl$3f}=(gTo#cn{|+QU1ylQ-NO#nfu#x_4~zrN`hNh9HKRjt)g`t(WV& zF)CHdgV{!@*Hvc-+GisYyklzgi5BgLEkU>k(x?Obgb_(ZVNs?rl3RMZ+8Ug=#x=>} z(=oijQ?NJ9knrX6v`Ul+KT9lDK5NeXn)^EF(DCsFl77t5c3H9pzNWTxk@Nu-Pcf}+ z@6~`7H$x{UTT47@0v~JIqt%?}Dz*(gsPyH=+Gy$Fh@j=g^ZoEpSa35KbdLDu_O21! z(-A9E#PB4Tw5|vFS^p2hAmI&>fK$&NB}r9|?{R#}7fC++??hL3w zhm?4@JbU&`vjn+eBs}xx+INOHyTRPf^TQZd?3yW&M!IWP=g!_>Z^u}b1JQ&7#MpP8 z-*qO4kW$Er0^0t;N7%hYA!ft1o3r{|v{$vI<@rjH5|8JaL78)fst{G5x0iVI_Njr&*oN#7)x z?%Yn$>i{E4o%9uVt-yR{yG?rVtPXRxp0~B-7W08QF2SKR#Zk98f7BIrl(<1NsF*za zt%!w?y5>}L=n#;Xp-hk7b6Ki4*J!}eefA99da%UMmd!-KRPW{sN%5$#T0gu@TI3`L zACKzVBM3~M8c6D&+CsK5l;!KjjmU0fOO!(IYU~7tL+C!-g-uzx>rsK%PjRvYoh@#{ zwa$3(uHet4(*Sy|o180kjBbzH!Is*DHWWNs>2D64vOFzaxu?F@tgl!%Z}}0++2j@{ zybOy<^bEJ6O&?(^y;6>?P2-uiE~?g1<~d}XH2Eq}(t{EB!{bg(GK;p7+_yJ24}H_zB(Sh8Wisc(>pvXCw-)5yX1 z)Y>5h;b{w!;+XIqhg2&8>Z@M+L%0;`VZ=lx(~~`Jz2c?Ki>1bw0_u_4}2x{=?%ieY|qczs4x53UyDPxQEbPNmkuJ*Q-A3uH7p#eKW_QRS-U%%9Bwstr; zKa`BEo(|}SZI9NOyn25vq84M*sZ6ugy;F zybguw8&r9*mW()A z*>%si(K_8YK0P1*l7G4=TAjOs>ZX4>{^Q5P)Xj4;28PWs_^qG7oLy`$7I!d~F?9|s z{x?f8;NT5O^EZ0ojm1*DW`5f_^+&&mGKKd=FTAN=w`!F2ZUvJ{xSM^fQDe7)HldhB zMC54%9g88?vq9C4t{2BxD%+TASL+5TbL5+9`TFx3u|u#&&5Dzp9@un$W->k&p<`i> zb|sI*k5rxR!7ejah5x)e^36vFv}zc(P!9`Aehm(+XZO?8fj!Rr7`=Ij z-kQKOdncCnF^HUu?A6YEXNfW#w!ADEpZzgRA-Y~crY77GOhm~Md)I5~i7<^T!b7*{ z``KDWZ;cH^weM{+WoVV03`ju?n^LjxlegEscWhsQ-bW|kVmYsm=2tjt8e_8Q>jLBk zKD5fB0@8fpuu`|-F=E1=D&i4eHMX&8X_jmjaL&)?xjMIj+>8f&ncDU<`~HB-$tsIo zxP>10LW`Zh>HJ`P>PJ=8RD*9PK9%vzSXFWuLx60+KpE1*YW=8kkRarHgPoln4Y={i zywf@8-f35$>65U%r3X+!Cp3JE&_Xw!4(v?4zfx41lqMeI;xBe_G`G`2D(QW`Twiy% z#8o}v8tG;HU0)Ido`aKq4ZOq!q~H!73}avlk3b1fo_jU|y3oFLF*DOQS3SKX{k*|* z)IW(%q%v7_zryBfFAMj&CwE3<3I$^HyUp+vn64Jkfn!Z2u>{Yak=-R7`zVJA(myw*Tbwuzc70m{+cjW0@1M&t9zHdm_xoCScnHnW|$S+V?Xv8 zYdXQi7K0+WkaVlf40$Gl*St_oy^a-bB7{Y1WOwNb{6@(d*tFyieLB;uuU=6Q&yCyk zVzN9{pWZYX!bjOy0Ksoq(S{!j>FiIEfw(yBE%gs%E0I(C^eTSd#@;^sa%&fE9Jwd9+O~jlI~8Y3c?v4(l$qH~I3+#`nREqK(t7mModWQ8hiiBt4SG2~fiD zMFIeh738J49~!`q1Bi_kFx3Og*5{^*N3=pCOVA&2q(At z`6+p_u^}3%cOCfoi&wPo=Pj@8&`)c89B-aiDcR#YoSls#pm(^-l@qsAdw`1w2-s3~l z15JZbj{JgJCwuGKj2JP?w%fHf^NJvR&vdHhMQ8DYyH}#B<|BA7GY3qj4I+xBXk4Gr zPgust+TqSH1*25BL`=TeT3J!OKs+ts`eoFChIkh5?owGp9*hcj>6)F*&CtDOIeNAA zd8ksyZhz%;bR+$@Y^NC5`*&k3RRMsjAof`AB;OKbh4)33bosZ?pET-V0%(!&b=W7uNHrP&O87YTee@Q7ttz&6Ol|yc7c+I zdmPY(Pq-Uv+-5_|jH-M9$f-G?lBO)wlhDCnOn^wB>1>C6tZ^g|a2F#@%0~I`_!(B& zfBgBGx7`ptS}rG0w9eghzA|-wTKshfZZ5YlG^A6zoC}>~srD>7ZvuQN3yQp!YVO9P z@2E}~?kYa*G^~1R#Sw5y`vIf5>D}V=E+)-&SXVzp(sb>7o#fz&a$K`@5oRrB!J9aY zlo%+zMbRJ*7Rm3tMYS$s<*(LBn6!s593rAly>(6=9#?>wB70+VRiU-cFyEW=_z4I~ zMAZ)%XP<+{$YMb|*U{SInws~>x&*R)@@wjHH0KBIL36imeaXiOJv*do9~WWd4XrPU zNkF64P6eZ{2k(@vdbIl^TS%$K8zTi)Q$+$A_?fzpl2(}nP<3vQ^HIFIT3j|d3T9RP z4N`0Elr#QQjJqk&saSx1RdOlEcPbaV&&|WcHw(tKp~aryvoCy1=oXM@@|e1k;W6p> zp|{H-P?X>U^!CPZ1PT_Jk2yQWn%1B}*h-Ja4vm_lUyA0#uh2W z-p5M#*DE*8wM8Y5m@%v-k;?SeoKCK1Qk9zdn?fu0F@XQ3M@GI{W4@cONm}FP>8API zh$2B^*D$ToA&u#;SL_VSEaa9-`iwn41s*z1 zN@v;uWWk4uUuI`N^f_71lZG6LQYJOdMT@?bH!2e`WY?2i)-4X$ZcYC2w%6JMRHVoG z+rqAxBy&plKln^g)P7yxUJme|SSbTYryBtlgJI56(sm?9=3++7cl$CkLAeh5OL5Gb z!@Zp#!i~Pn;0Yh>`#ulPKZIR^zT($=^n5K%UWlm9HB6#pJ23?5bCz4UjS=fBw&dT7 zGdQ;-`ra(}GW}<+&FOfaUH1;661;qIc8uh9Np!yAKT z^73+B2ne7^M1Cuhf`=`$rdKohV_EYpb0PtoIuA8a1@dspH}bZC?5ss^WD;>Gqx;-0L=b+?=*v# zj(g(#@@sg=w{y2iB-L$!Y417J2bTZ@Xb1PkR*gzt9JPdeM78-r`5ZY&qXIfm8-T5m zC^+TKD|GPYM{8>*dujbzbW1M7Se1+4_Cm5qiC9CssQ=k^yNbJe<-5124ZnALt2WKn zrG-_~-;z((c6*!dtdy4Ofg8WZMxZXv*T<`qksEb`lJj&|I}~>?A#ZZz+3ktA-s|b? zE(|%wH81LtPO;oHG8E3W_AqfqPw~B?efcpKV)`1KSKmc%dT)g7)+rRZ$gp&ctEi~G zj)U6=#A9VSn51>tcKL4OF{-DILE)d4?ZEbeArZO`ZI5oQYr=6>m~!USF793wc}+}6 zSUNUINsEwkb{afiQta&chk2_En78=7$s6ml1tew!A$Z&#X@ zXS3M!nz{VqZO4gY2`+31`~chWI1XlLI3UmjGoGF!LBF` ziBEQxvSMmM;D()88IpdsHo=XRIJ}X1^WF2;Y9n{{j~^2+8NmhFl&St|5hwx!vMZhN zTJ159ZJ(WMGd>n63Ow1J33n8P75c5hTE3@oy&fr)Q%^mCPR;BGF1-w^lX6CSu4fxn z*;`P2;C$*h!l^USpx{OA+09wR}#>m$h6*Yt8Ra2B7ct00f$wi8q3C4ew=UOO=~!i*toiOoRy<9Krc@X?;} zyouwCo}Ki|zl^&Bu#;X3lU`F*pb0%4_~?1!lHboD^NP||(JLcGH`a{4sU&Wkqz#zv zqY)~nduiTWT11bjsKIKNTXb>ZbJoO z%Bk~>2zsQiN05ORM-9?$QY4O?K>3#)HqkB`y4c8~**zWqZvJ|BUuLa6L-V$`N)M$C z8bx3oT&{UM=T22mC1~2vUdTUAOgrM3c=iaZ2qVFUOzQ4T#!%KjnA2}!_1I&-Yl0Cm z&>ho#C6GU}D8JZd%L@J}LMd)H;fO($60K=2=24gqjFcp6)26WGA%q1ncurTMHjElZ zxcX&275ZhP=*5j%tRn#4sRk`B_N1~iD@?Hi@P3_v4f|IF{(Q9PM(i~bp>K5Jzx@Wl zF|YAQdIk|V5E6^DWoNM~{fdN(Xa{7{_pt zKKItySGh=!+j{y&v>p%@XTJL@ESG24kC~@1U@6HXrPFtIBsc5b7WM|!Nhfzp1J70TiVdj6 z&`tw`&WEX9Q`9^=PA`A}H2`xeXlZMoYY&uJDo{WBMdAg1u-i`WYm5(hW@wD@0SxU4 zCemO7QwLKGxx!Qch zy?%?LPp}Pu$Wi?COpqm-fx7Q-#0v6Z^|)bH({*qqG_Ii%fWW} z-q5X~X;X-Q-)xuDITeeN=#JMI{Xv|V1$TmPQ`T2!;3v9CEK_)%HnGx zj|dkFRY0ga>w~n>&?ee6pVtaK0P-jxA#TGEgrCi;jqZenO|wH*6Cr*jw5C_8bxbcl zSCb$7)pB)q?f0AZ{Y`;^Ktv$zloG+BlGKqRTw&D`x|b0Tb9?;~4X}$e$BR`b?La77 zmNA7nsE=r=x>T;SYQ&T|jL!2@$+}#fRE4gUnv&8sLmaJR&GS-*m1w61kD5>`Dy08v z767(bG+Po0W3kxocQ?sjtzpVCFF<@wCFfDDhrSfh27h044rKrQ9XUabC%V{>ot!m* zt;ijM20itNu^axSIZ$lK>oubfMqG2qdf5^>kk{+tIa8H~ZLrc4M^kjaJG#E?Z|sEi zY^4aKB7%%t_nN-$WEm(8loZ_j4LHd(sH>>;2#h37Jpgb=B3&@z%{EGK`q!8X@%3?` z1B_*k!r?}I6-&47XM{%ezQg*sQ1O>a-H!*=n~kSxY2Lq-ye1vt{KJ66?21#Me=>D~LDZ(zR{DyrX; ze4e|M;zvoV&frn^D}sGu_khP~O6+(u(8;ZR${$yFwYZ|PYfqD_gP#6`MEnQI)dG=m z<~sOg`BwqZG;9nCtEHp{BQi_ zqDDn@9$Nt$9Q~C^hvb(Puk>gLC8o629q^ZmQR6abPqT)TeHLbBnc!w4P9wq9X_}gY zF^2_P;;u1k3MgQS1ynn63+Ockmd?BrL}Pii^{TkS^8}Mksz(;uqLtBRW-P zQK9>ZUJSz~v}yxeotxd2{Pn(t==v`ix{Bbd(X|^wxS;DL`6#o|;{1c|sSTaWCfj&h{z94dUpCKLX42WiCr_?3wDS%NiawJ^WxYj!@NGTvTib zH0E}pBjGq*stQS|;)qla${2f)d z?DFp87eqMb6ChVxJU)yMaliI>XWYL}u>5JL*0@c9`AS{6wuvVbdOJ=P%7IyfG!F|` zi`m%J$HG7jZVr)?b)FxKN^6Z;y7+_D!GRUN48CVBpT9mZlVn_LLgIg$88f%A_zK`a zsaim4(>m$u)Y&nSvH9y&G)(Tm$^W(pB-YsZ;yBX=X7LYSuFNR#H$2D*LUeo&A(EN9 zvDu_sq*rqJ>7I!;v159Gh^A@4@j@Tuf@dXq_b@uRc~POkd>06P&QHZ1D&-RVI6n?; zs&<+-UF?CbCW*s<=v=rD1`aar1u#ORXQOXXQ|KBO%L0&~;lNwU|yot-YjnRzeDBDzGc=bV%3Z5}LAKCnsmPfBrVqu!X(V6L+qPEYsSc`{k? zo&F#ba(TAim=z;o)OdN}JMICRZuAG>PW;c+PU_}qzx$A3%IiLahC7*~xpREyFz;6>gR0G0kuI$AaJeg1{dv2b?QmI^U^1T!tu_pgSR z(EHfb%gsuCE96@>M1nIX&%~6JRO4PJ8$Z}SS4UWPL%NOlOa)#9${STGqj%bpw9vWy zez0z|;|Ohv*I%>WQ5XDl`mf;1<c#=>&gZFugzJKqPnkS>AdZC#;Pb34O#b))&Tq}E|%OhX?+Vx8k{*b?R zz!Q`yQ`%>~D=%mdN_JaKsk5BbFG7Hjo)9HvB#p$vm3=B4X)E0vOKc}ZRF9eE3%?+s?_7NL18 ze%!FJ*A|(Nm6^BJ&oO*8T5bj4(EU}GC;)qYu~EOP<^$xM&I2A?tTE@2d2qRd3DVue z(h0yx^QY;&INqP3My5ORfRw9v7L`hM?iujmsEz$OkjbV<$E^`5*Pg$$HTm)^lE5mA}5A0)*JAq<3ffDi8^&shnmG@MDK_;it5xwQ$2d z5k#`_aW6a3L^tjqG#uRIo1P52y;~%TV%WR3(qR>YBo+XvFEiA0E^kItfSyLb4mGQxoYJJU{@i9Yd}o>m+xUdmrr{B%(IcgA zT^g$^m7E$#6s=y6KW&nAX@eYuZh9bx_F?9r9NF}&*k+?^z`1QR?aswnnHJJeJUnXH zxuWDHAz$VFxKdL`B~wxN;PU9q ze&^P;fY|5`C0?!ae4fZ>t6^5G<>pcyMJw0^#N$mC762^44g=48r*A;wWV13B^V<>c?XZAVF%+)u&#OAs_S|ghomXtLAEIYPJ-*{ihp!dKOJ!Mkz$<<1{vQf<)I! z0RTMYEdW$!zXmLAdp00-ORvQE$?8a{TM^uYFWb>!0>vk0G687`B`&u(zEzNwZUPkd zDuzN9w7`pG6coBESe1U3m_NkRO`pt!nXy}J<(QSi-uM57&V@apV#k0r zWX|0Dpd3TT;ulbM=!u-XLS3sn(Mu=7A(K-L_czCNwoP(Pcop*2#KL8$WgYtxc=~-B z`K!QU^G(Vvc7bsYFr+L}Pk>(H4BglE1nGk2X2da)#mX?p7&C{3CD?&(BU1%VZocD{9Xx`czDM+Z0X zQm-bZRP8Mw6A*~70`Qwd4HN)#_zlWxjYYryDb*Jz=oaN;&+O{I=HTVubbTwF8re1B zqv>iuB1JsETj)GT3cIjfK|Ip&qaH(a;-~pKvcc@yA64s&!=iK#UDicTV%GW(A*(hj z2Va)s8v;t^t+P82#kw2p7feON379d?hmQ&xbc#lFr`c`%ekVnYuG(~b-xH2UxD*sm z*0t%zsYMjAqwK2_(N&^~dGlNY0^4uISLAK3G!Yo#hc{oOis;gN&pf9B=UVJe9Ueck zxcYnoVb^aLbU=EC+=dpA!hEg#5Bl&1qsC{vCr_U6I8Jz%X;eU9Mpf|%Y9gJYGZcT| z7b$aYGE7r2>fkw$DjykOOwINOq4(zm^KXr8@IXRn*4M8$1U8E#$9@w1>0MW`_dn{) zo&GV2yC=i%bih;408?uAzdsF$e@p`ivBz8(@ctoN_R1L3Kc^McplW_eBA{L7<8fL1f@8Y~u(k1>8w z%=d>_^}m~=l+r&0fsW2!6i}5D=9cq+_wd!$)s4ia-z&&WvQ48M{uXfz7Rz?3h9~zWF z^RjFL$*nOqf1i$bq%!Vbk0*Cfk!|7)yWW+_`Q_h|{Yqf$cMoNj*c92;MXl!TJAa!W zft4htOXcYQLr&ejJ8M0^dAjL2Fnu_EnB(R+Q2+n2{wq1{jgCEhl!$#i&%Ak23Uk^R z$R(B!sj#{_C*t3pKl4D!d}!k1yXQUkEed{hjaS-NBtg&@=P#Eo0$fUVI+m&bZTBmg zFR1XLMgav>{Jk_AKe>I0vLy2d{QqE-TU3f_h1xz8&}%g2(HL%ae!{o-1u$iP+W&+E z4_Wmr;G=WZKv2!D!>cKO=)QoN)317_E4TMw%y(5Qe41!TRk?UA0(C&W7MQuXK3sTs zn*JQ;kIeJuhS>RR`u7XH0%1P00KET%!@pfGl!6OqwDd)wEH)AA+{4g@(Vf4g zzBFp_R?#K##H)HL2WXg49&eK?q0A;A^`$88E=}w^3&wvAJTvBEj(Gn*vBrM*;j`Mt zha(UTTWDu^70cJ@&oXB^E}4S5L!EE*xT4=<7-UdE=5h8=(Z59~DXSZ{@hSsJT!|lu zSJ|?BPk#RvtFThahY;Gr3KD&0_yu8`wGc~WZI_Th#?FmdBkt?fNp94tSj=MK^-l#) zexU$ssUPiDE^>-N1SxinjrYIAYp{;`mN2m^g#}8l{jDGDt!V}g|3&lJMQhG6N=7BMVr?HF_GwWO)lNze+QGzkASgB`%2{@;I#Z@u9CmZf_iehL z5!;`Rf&FkZm-pW~vlz>NP>RA%<-j1& z{r7Jy*C2QlbYOuPc~y|I>mbYdCKXjuoI#D z-BzRVKACL2WiG=D@HqnIDw^#~N>2toF2`0oLB*?a;Xes+w&&*PJu`l@B(t01gDilU zfP39o3pcN@@)z|G=DQxBXaHh5NfrK~J{QWh7M}V>Pdd7fv7AAaOUd>KNRQ|m)3vkE zO-9nWy?r?BV7%DXRS;?2J+UlVS1^t#USHuSpYNN>d#~%v)p_Kpzc9*<2XXsjX5H00 zIEcr4dE}$+r4$GpuWzO-aMrgdf?tQ0r@Gk|6~uBfjizJzhvz9Cv)Hw!_|JR&>|`Ci zUO|f+;_4g+704B4)nJs{#iy^;*Rx+o8I`lJ58{_Pu#O~ew5L4QjMEhPIe=eAv_6vi z#~%$|schiw_<$eb;wmBwaQcgHr1$mrHdUG!=9B>OJ4ykC+zTQ6L_~beVM&OaTm4at zZ!3MGJZfAik6yf+#W|&}k4ONNvltw+#2$I^6>1^EJ#C;wBHUfkdAU5v1a$4tDOD?74 zJ?QJ4GxZ3X%Y}(_;7;;m>Y;4C7Zu35-nXb->$B|4DL>(POhUH2AKkIXj(6u&-Ds-A zSY6irzI|J6S(ES2sHU70GX}hhwLDh7H!1Ux5D761to~J?$(||O>r&AnK>V!qg~?+F zJ&uo2wCFm^>DKSNW%f{wyb_{VLGYN-*RNUU-Sz+fg_4x;4IOul()*!`(|B?S*gn`vU zC~EvQ1=OU;s_1J9!WQxK;+&APVF@FKK2;;HwZ#yq z%2R8>dOY2<243A+kNq7Mp7VBY75>Dr&iw`WR9@1`==t`8qM-)&i}Z5ugG~$FFOe0k zKiZU_*L)Q#1PCdyo*RFPss7(H5@hj@Ro;`Cp#&^dz!CV@D({8pfdc~e)=BJ_h?#G_ zq3~$@rph7&kIf5tuR>;zSmeZtcdtQa#$T=f!@@MEa>x?G)l1J z65dbhRlUy2P1ZWb!jJ+{`{Bh$urcJR@nhs)fYww1Z+vW0J-yI2thWD5tB86hGLYFF zM@CMrKmSdS6U3pis77kCIkhXRR5Jb0{ioV29_6dF?LmF}RJ|i&q%f#SNkw?cM3nj9 zzyB8ie2%Y=p$aNm`v`P0Z*o_$PbR^qNt0wL{BvJYQKcu|{P+VK~J^TVBKnNZn1cFO&0>Ldn zaCe8`4#AxU5<-HzH4@z2f?IG4-e?DR*T(vPdGF2q-pp51-^)x@SE^G@-@fPUz4l&f zpMB0%c~v@LJukA$eth@A&xpT`>F2u)g)Mtf123{2*|+MEfjx?5m}9ff}EZ z9EIt?x|~r95mzzO>D@W2vy^3$K`(8DQunDH%=M8NkBjGac=69Ex6k55Y3n%(aou?qqwy_4$dKJm=Xb3un=vs+xTV7&VyDV`(5ZFW~%Jl<3e>x|3(i*0fT2qMlOT zsjF;tYIsDwGYw)b>6h_ns|VaI>$ZGSeCYIZBara9N@^$5^Xh|zk$0Ch7Ps{~9?Kp^ z2R6SjP~Q82#@VTQU8va3RmYd6Votm^cq5o{SYVc0T>cV>>ANKezCXWZf()*16IJj~ zICXMox5=+(wih()hX;n@q)3~WJHDcW>*C}E7$G)u`{@glK9)d2-i3l*s^`yO@{+o3 zcj4f@Fh<=(Am3Vu=pvF*b7!&zy1vmAZufCD48Ikhlt=C_`8<*e)2%H7|1dp`vi?iZiVnR($hu#F!B^NQCg&Yx^%8eHt3aT){pmICP? z2(K`_M?fL@Tpc%WN+&(g68aGpT6Bj6ROOxUUQ93y6w3=Pxm%7_7ZfXuL~=oIzcR}0jhdr!9w^!I zihk|*XH={}rKX;W|LkjyaWp{PC?zO#<$57_`U!h~0Ytam1Zs9Z@SXO359w%W6ii5H z90K}06ssSCF?UOW7L)8lyL6_A>$~LhR7L|f1@L3Zbm8YVwZTJ;giaZ^@fgMP>GU$? z^~E2gv{+Tv<>7rCxCMp=Xu%TNir%ENW_MRLHkysfm31at5&jpGkI$ZSOk=sh7lzq1 zKvT5VF2^tG#I*~WPBNO^_=jHo&G`QYk6_bNQKEjk(kVose?#9#!}r!$z2czkGZCg> zd9yD2s`vEie4xx~JFVMrmFnA8yah8Loc6Qb^^km%z}Op=8;fU3sA74p9G`o>S%n0? zc*&JYuDJA1pg}%HH|0U9PczD#adCV7`e|>E=D$lD`UjH;MU1qin+EZKr{9VAe zW@P>1b~B;RIyW9RW$JsmN(C%&gf@%u&1x-ahpj(uywaRoREzZ4ots$pH1lUVhP1$3 zQZ~9A1#ywz=C-uRuIlV*cAX7QYEyGW6DVDIM^OJ}4a&ce35&!=UZ+Kx({{0G;6X(C zWzC>LO2dE`{Z%I3*IlPU!?CZvmELE2ER|M6v%&k05!D8x!p)Vk0q>l&PTlbzAoa`q zlg$#jIVw{>0*k(h+t=gh(5QrjgfuI7w69rw z^F6%I)+68K>;uT;q19<2foogb2`KO@6i(*j@h+yj0piYN+bvzX;S<yq1B|k2VF5HxE-OU?9Gqp-uKvmB3J2qQ%DC4M z-7HTR^t9=iZ{IYvO4?7*w+NXHH0J%qB*dh^QzcZiv=aC%ax+`6tXmB~cp7!RNnn(V zn{R4L-LJqEQ?s#yeDzlyuV9jsV;KGRc<&>Sw90$#gO4T10N?GauaGMUh{4Ft&z?X2 z3uU~fo+zhZ zFRyseZCw^Sj-1B0!(|i6KHRE0EVS{Y{xZNRM}24ymYW0`itkDHkTM%g!Ao9WsKZQH z>>*~VPj=y3+eHhx(OvZN34NWfp|Cy(W)UrI&|iFU9qk)`Momx8HlePcwqc&}6O9Ef zvDvgo45?_=b(raLgpe=HZ)_Xr)*9tBo$Uu54zyM5WYV+ zN2|uOCdG9zC&P=c)}BE_*SCV$!6HKO8s9gq?tN4fe5vPni@+dozb5tYof9->B$hnk zPVVZIS6g)YY{BRJ1lE(zzIHbnF)-`0gT{Zq-mWTe&p{vu0}W?Haojx?4+E!5^()5T z-umu}I2uu_0}*lHoFzN@W3ufJe4ASOR?hZ67a8c--vKLJX23p4W!<^v10a=E_O6*jVEo(5>xL>hV+TP~Fo zp=ByO+;Q_eQ^m4LcPj~jzMSVEmW6NnQ{8&L3Jj`!VJ#9fx-9U-msK^8{T z6Ld;AR3M^H%sibjmABTZ-LkPyq`q||g{iFdqQHip%Gvh8V$9WftY8l@J*N<8U_ON! zDw{;VA_SkUSxRRax#$S+TYn5=Z?3?$OLB zC*{&}dvzYo2e(h0Yu&jUJ?=9%*gw_HL*(s6QxM!87CO6m=CK#%wwhI^A9r=Oz3*I> zDk-PKyrIiaOB6MuY~g^7=%6~z3=EzRQy&%<0J#DhCn-b^kUb|_jrIkPz#DK_2+>wXGav`b-nN?n z=Z<*y@!Qgq`8=`&NFQgy*X3#n6H=u$+p0Vt7R!(g)y3aj$DKrs^mv*^EF0^sp^YgO zzBK^%M*{?b^5!b9Gml7e-Jb3yhy35I1FQP@S0aI}t=mr9vAG(g?k?w?w;T@rafqyE za#HIwpwC&LxBMk-fh5SJWT4hnn}kOC_vG$}GplrLKfh(okLr;6&=~lh#LPv6gvHVB z6YZZvfh{3sNOCUe4;x1n(J3!+oewmaCwgz52c(8^k!B}j5b9DVIunnX~? zFrBuN$Dt5>jd8g;_`K(!nVG|SB9heiO3d!+;)O46#)Wk$F$F=2&nL8p>7|ojHOjOY z&UU6_Ac@cFZ=qaL8;HCOP8tjEi@dt&spkZ0wH~f+q`OOQPs+KrA8@^XcQZcHD&;BO zaJo6AHZ17#^|kF7WWnnqfAsk1`(FF@i;dHsP_5&x)jl@+J0rJNwXe0V2JS}HL=kvo zJ{up7j&EbrO|m{^MDX8W@vMEn8>@Dpbc2J$=NSxqPEzMEF*)bH806hC+qVkIuQO@B zL??P33BqWWZ6IE~4Gs0V7Vr=e9R;+BqRTxoRBB}*;6j?ZJF zbFKt(+$)}fF`%`BHJB7&(~f`^mL<6;6nGg1w5ZAhL_m1Wya>W?-#x;d=$)u$BX9SW z2~X&q?LA!}CM2Z$oeHhL~=G_aM9D-1(tD>uELtJd|?3|w;#V* z&i@$rt(k8h4Ys>OcULl2c7g3UT!1)#)InXq2D>&emD+x6xmfLgy65q&+Hz)4Z|jy|7Y(Z(GGLQiaP9${@d6CAfx*9%{`VIerJiCMyn4uU$Trg)D1b~YS`JHl^abTO-A9kf0k zv$nrXnD+`8XKoh1=P6rEgZQa#_o5&{MHMFk+?InWI)SMM-Zx>=uHq z_SL8=JQHa$o~iBMX{Io%$Cft>u$^XA)H8Ki&4x3~=ut5fq>0Vc|61%o%_baRQxC7- zr|usm42GT^uOuwC_7-oSIpk?iT7pH_>{SFo8l3jhZirhGE1DWnjmHvdN0g9nhG947 zLp{vZt;Y7}PO}v|N@Lk-Jug@s{%+N&JOTA1GMIoxkA{$l3y25d2@v5bSDGn zhRr}6a+>AI@59R!p1#~S1|`oMFv zRz9)q&JKJpWS>({1~#UF2BS9&yhhdkp1zMW?3j3DHazb@i1@qsMnY4<8;6Zr6bFI_6o8WPoMrtBSs+K~3D- zF4B-mqEOR)_YEF5>wLTDaaVy3vf;muKLOXEoyLN?_v)o9W3GZLDQ;IwvOdoJ4<^c7{3p+&OsOeL-kp z>0!dHC_t?pu9ut|EH)=A-z`zVqkdd7JlD2^)`krx)xBYEJ#IiqF93~K^bl|D_f0ES zlX_!da>6f$uU7}kTF>k;o1vUH;ye`w1~WiD?@4!5-{j{5BGwtez>oHQ4QQrqU!sFw zPowTWv%~1^H>Inzvu%fyne-%G-9ljWST?oNeVkFt8nVOV@Bq^>iL!6-%6!9<+OdKl zptXP8wL@g=!7agTH~P-0cU#T=MJ2s$R5nxcUO*qR)yoF6fcO%O@OE)*JUpf&muOcj z1%+=;Ah~P#c&4@+v<(kKq7 zw9T?>)c;V2dOSTJczjp|ciM3EuCN+0PjhVI(*qn%Afl;U<;U#v#f^`|bV=K-b1YM(h8{|+hfkBn4xTi=Nni!M;n znh{hbV#-OoxItXQH zean11SHJH);*023h~GSp2oz@grEw1td%9pgTC@eFukEII6MN308wXSfq^_=N8eg6g2*Ya(0=6zRdXai|qFn5^K6P1a)gN;!`exJGWP7=&&YGw@!Gp#L61d|a zkhPny^kIcGomBD6=yU~C;uD`;RopnVJX5-RR{O-`VlGf-KUURWR})VZ$i%*u~PJaAu(mg z2mbWrK*|d_xgL8&To(8VTw}A*oz`H|Vl$hlU+i&t3mN)-;nH;Douksmv1465u#aB7 z_Fa{l9&<6Tsk(5Exvi zLRZ%6!!FE;B;!@bQyx?;_*Ew-l(K2OhCyZRg0u;QDsGLpyQqymhnXD&tt;D8fFGf0 zHG5n7TZ)N`_Zjx27RGhPprvg5!w3HVdJ9)7Z0x9<+F$z5m6rcc{Vy`O&jaRBX){XfHdKVMM-l}Tm;00PWb|0;#Ttj1 zdv_iCWzDB_VG&^`=HUUR$XjjbGcBS{+_cusf9L_Zn3iXY_ z*!9ZJp4UZ|N}SJiQbOt*#i~FFfgm{-0-PPrSbz^zp2%JQOd=KZ(z#1-KJjzPbcbGg z`Hk(2@{PSMbQUY5K*)RvL36z=@-q^CEB5rElIqjlhkW#xxNj@IDnHHstQxggbXux> zcOsiuJ~KxJUKpTuyLdv%+!s#=O|4}gX>xiG+U#$v?41tNF>jw>lcIWq@C&cVW>hK> zhz%ZJr#5>rCtp2%Tz_9@v;An^GLy}|&kl!jo;t^iscjci;Z{Bgq%4i+ytQ4zno@E3Wqj)AT zB^CtsVU6H3t1rJ*6)AISAm|~pS*)}Rv{OXD2RjvH?eC*EGh>NR+}Wug8NBwY#Nbs} zycw$;SNrl=TryF8pbINyI)9pf)m#1cm1#%N@qti~XtYL^uTz0$iG1&?JF*8jzx3+8 zLBb)oc~rBJ{>|VqelN>O+#C1rb6GEh)20iPfK6ivKFo_9>!mN|q&Ci(;-UtO$Eu+3 zUk?IRb9?pju= zMf4%@H`H~DleF&hS2@>R1pPkE2NOZ1Ek_ty&913;c4JTFA)z6QzH4H(TApkDla9B0 z0x!Cz!=6th6y~YnYgXMkcD&sKO!gcR9{DDo{2fw{-L{O-7tW=&h~=(enqea+GVJo; z;Saq>Nljl+gbHoQ z$AlFnSmEBc&aj$e6oRN@fmZKP8(XcKsTmwcNvd&J&>z;?Z1%e7Lw7rk9nz@{ z{En1q`Z-}~(S5_m$=&}3X3~)uLG8A$Vmg>$AazlycUq&d_`b1NuTCFS2Pfoo z|C+K;{6%nnt1dbJz$!QW5ibfVTGwv+FDad;=BrPVB$S!~dm zI+OX~DT^NYt0<)m;4yjRNo-45?=H>Lp?b#zj5s%`ajiDGw+Y%F_zlZXMkoyYJG6La zWQl&oWWS-B$RMZ-AE1?i$Uw|`YssQZ0S{N}{V%q5gq~m5$lkfgCwqn9(J_wjyG97! z-Q^kdRVO)}U9elF&@|gMi(95T_6qfP>|ff`4rI2f_X@#pXltxgqrx|8W23u59^Ho2 zmDgKDiLjkkc)7R}aoR?i>yuCi!C|z|1Y0Hw)Z|LPag#kGxju_v8=>jOsE2f7a)}5~ zB5*E>e!hXhy9ZK)@FlmDsR6DZFRXO(DGa=t%XbjvbA1jT_>it}dcd$ZV*BFeb|c!< z@kGym<0-!d%n@|Zm_65(dKOjCIvB~)a;42lFOPy)s@fNQy#CoxIn%w6n8#C95Vnh! z^vXK?P4WEZ;FTjRhXC#S(EAUc1X&C~d{Uqu(wJPyj&)VjS7$@mERsoM)HTmYEYsL* zKkd%f$UINe(qL{}r6|y?$ULt)FjjcHj3QxJ)!imU6={m{r+;ML`$tB{`WuRO6@7$BTHsQjFP3oxN1eTN>QQI-Vb#Pvx#u!$3j3=8mblAC7;Ba;t zIX%J8b7_;b4_HW{!!?;cXX3?Iv*wB(mlt9;d^Ou*+I(}gFp;!Us~Naz_F)sdW{#`EEXO7{$&C{t1SQZk|hRhSIeKt^sM z3&IF`oPHK2c=ru^`=MBy*LnXgP(Aw`QzA^t@2qH7UlmcT-#F~aec9{*JTI>!lJ-Mu z5IRV#I~0s{V+#i=S3ZPCH$NjgfmNGmKy*{P7tdP8txYbCSQZVof2d_CYduK|y*eC^ z%xZs4hwa=WQ}&q0{8p*J=%L3-&PKLkM!-}jIkfd=o5({wXAM$ee8n9&MDBk10f|lE z0VTJqioumsF)FfeHa-(0-CpB@(L~oQlq;R|tC0O3yCNuAXek_gQsMiEk#mLlBGU{d z(Qx71+|>42scg=+GEv9+Ge)6Kqm#;q>@}?Qbc~#N@yddPNTVwVAwyW48@*AYv(EMU zsNlFRv%O5Hbt#`^$<_A~)Jwlft8zaEt=h^7p`9kA?-W?8+pX(;!Cn4%CSoB8@!chz zZeMZm`?w!Ay`LI6hPGy;ma51PF}Z+i!&E+>#v@7k{mP3>38}cljLX4&<@Obu*P@Y6 zfq@}AKTn;h%9gV}1Dz@>h}B(F`p#^Or61wc#$#(M==3NQi?j1^NlwEpZar-K;ffLb z7#c;%wp^mHI%2xdq%(JmaxoXCI6{xmvKhRymdwB$XI<7Ld%Q6~aC$z{8s9fi)=YhZ zk5sMMjvLQX9(y~vK`K2;Ayyr@hSsP@>2>}HX(xU_q$O=SH_my-GXq#!lOQ!{_07^< zT$zfrwwf-at6Okyer-8H_C0cSooUYGG!YIgjfA4rlL&=!Y`)-g>kC{clX6=0{AzY) z<=8VMkgXN-Nf>TO?zxxj0<4#v`mX3cy+;|6u8np&^fbN*6^a#zNKchj^q z%dmP5_Oy8Ayn{htP0}87BZ5E`sfrZ(6C&rcMY~KxeEA^c2o;wT85jFQjVWOF3c?Pv z&mRUf3g~Lc?h)*L+N(qzey4y8>ZhxxrVhfPvfhwSd}4SK@512R^}HOCBD1i!zxZQE z9tr$4Vu&6+#7to-W1~;wjkaqOoA4{_Bq8Hyo4IQ7AcG3+7X@0a9v#ckqYo644l{19 zN0uVrDGa4bXdAfO{&xgwd%Bd)4Z5qJcQTtlvU>4Q#<>ISX)g;4zeDTYUHgW?zy#p~ z(ExRNF*JhM48*HV(Jb+Z7z6X6seUJeOEZ~sKiMudM=qqh(j4kr4{Bp|;$A(uVrY;YFhe)Z ztHheW_(C8$yr$Ys&C>EUEG4U0ieM0=%;7cd^%xZCJF74*R~ILb!usXQBBkFto_L7 zJcfcEEP!uGE)8;>rA&+2^MXg1{VfQu8crR%L6`ONTHzQ35vPwM?ttJ&h5Tm(_)_sl z1cpSJ)+E2ex#Ci`92v#HP}yMr@7M$MzbHEYebPuA|k52LjUy+28yOa+tgbL{V)FHnOr zDAvv6;o0Rp142eLk-rkk{E%>f=PC5Kj@A3L`K^8=&F%m8MJVWd!TWZEroXNbg zBBZ*{oZsdDn1PCG-(w71jBsvkoBg>Hai24GmR7H0<};@GqsS1WJM2bNQxpdv9%Nde zfJZFypU)>U(e#j%q9Wphxk|KheMu5r2^b9789962Iq>?utjIJwNinR;i#7*23$M|5fMSmEa%C%B;@|JRBnQX5cqgKfjnZ7 zGHl{_!)@r&5rY`WIIyp@)5Xw@+(05l;$N9pmR z?sM7g)Y>|Ze2AbAnb{d4_q`VYZlY_wXSIG;&S8649n+yhSA;^C(b=5T6=&#)Spvl$`)?st%SD;dz>s_ z8i~9g)*4!EC~6o;YrZy}+~zpmf;d!KU+gE|{Or0%X{6+z${JSzP}c9OA-g?J3K!iL zwuJa#5DC1cHc_`!ddRQKj0Hc&_{p@U=_f!sHsCn|XCUF?gDZmjn)7Egeb2wsr`pd} z`sOgEoK>B3NDS*bnuTB}7r#w%#-MI;L%ui|ol3@h0m)-2RBbMu$e(7sLDxDQ6bi!Z zcZf?UW)}Oh?zGWoE&zy;U#fEefBX~tCT%`Xx=Q>k+hz+#Iak%8FXy!=xXw1=jEHL> zcyh0>w#b-B9vUSLmwE7uV6v^>=zyWUY)*~qQ$b$(4V61eG0g%i3?yN;)AQP zkBDgnVraDTkSG3?_z!!?tT0Rho|>+4kueWvFwR*qbW#skW>g(3>ftnM!m@vIA2;*W z+C>Wa-pWg4Y4%l)9THO)p1*_fyhmG3c1JE60`j(cAFHK;YzqB!KJDZVr=0B1Cvm?P zweATUnGW{nIz<+pb`vAOp)09?er{`$V~&8GLw<*U8)C~i^yz3arWoBg>e(Wx?B#}1a!fNfQj!sg%tm))3^k#nXU zQ&@*qCFr2^Nrd7)KPS_n5i_IL0~cyblruZ?u@X0_2%F2p;s$o znA~|8Ng_4`;2$|%Oxc^q`+e)p^}$k%c-abZk#1fJg~tiOW5GslWQl`c!}=eA;SH{4 zg>we6pmXHwKbUa`RxgU7Q(6-&TYIRX{TY>iml+-b_1klLti@{Aq+;Eh#v3Q@A8p@t zCQ@kaeG8O}GGdEV7o1WQa=9*vF9Uzww)f^mMBRn7sBzkB#`u%7y}X~TUR@+M5r}TC zx2Y79K0OKz>c=7Ji64%hJS}KA^1MpXP?4Og- zo-*ee@2Gv2Q>yaqz4<*s|0?EDY0G{09?w)+OLVNXqx=pSHk{9ZESDTrr86kdmK;+F z!HbGLU$A>sxEvXCboXGVAB*%Fo5Os{3sJl<%9Z!ms&;>t`262u`7I5iTWcuKr=VOu zi2-J23-!ASL{~E;xrpAlGedj!<6|(Q!gq^81#6&orZCo-ecM|D^5$o|J6wUQPu2KM+(OKQ$7yT(9#_Bs2$Rkzq84V()&34rF}^8zwzmR zraZD46hNHbQ`7H*PdR9Hy5D*Kn*96cmHv1`Mv0n@FY~n=_N{UvC7OwegEYY}$_k}n z>wmf`K+NcEFn$JgnQ>Deqv!ifUf4Lw=7B-~<3*K2Xy%RBNkqAI_y6fak{AeGA$W23!Gb4^c+`;bf3#~Q z$$@gZ`^r@(;*X68TRURYOu1(s;&Yem#Hx#r35m-k^a8R7<@0 zw~V?`qJEE@f4%8ndT&C6kRu%*`SdZk3fQnD^1p>QI@DNZwdjuZY;U&Er~e9>H_F(( z245p%OBJ;q=iw*tdmIC&wMi>?MMRMU$<-SvoK%Q82$M8cfUkBS{>*x{?XEeA1v~WB zzh31(j5Yh-1?43YPlNx}L}WYIl6_I4g%kOdru3@)o?ncE0mq6CZd*UIHa)8pbG(66 zWCs&a{=J+`DCUO`W)FVd3Ec5tJ-pW$iOj0h$Bdm$x7a;5gpX1d5E(qEyEFBA`0!UT z%j<)%%V*aA@|C}S{V(s+kt`}^Xx}W#|EqsQx}c`GdJ1S;4L3w2&{6tlQOl?$}?;dGt-TSWrzI5Im08V~o0Fr2GQ{S7TM2WP-0GBoMSx z+Z=e9;kVZjyU;ofS34+u5ax?ex;{p(S4+!XQqq2FWKyJ0vets%6vMbc{vu;Pi>FF& z*2R9;REtRKLx~i~=Lcb%OxFq6M~5Z>DBounXS4deZD~88R9Jiu;yWQpN8!7oeruH1 z{5!iT%Ihb)H>XLiIKBEvno!fSjTZ+KY(iaN*4q(jq?`DJ;_m^Lz)XK8eef8((i1Nj z^Cq4uGCEqcKej0Q?GSa9$yMS@oX>%6!+UckC*3uUHni5oe(#J)O8IR-m;k~V%ED2S{J3%6!9G*!dZxZcN*swE-8JxO%Z;zq zXPOMdjOn%Ph${}?;ZwCaMm{99;*?hYcH?#KK|r6OCe2CFo(+gKDqR1So```{DcrHQ zUv39pOS+TAV4+MeQd|vAC#{mRB`2l*Hei?w`Y8K`7`5E{J2rE({BwlC4XKlt&#-|x z^I(u93!KlXZ~jyP70_NjNuV$B zPo=<0Rl~?d&V5Nqg!PW6_Yo!!8to_@yw(Crjh2@v1;C^C36k&aQc5Sr6cG{lZ<3Ut5_RQIhCT#c24AhjB0|5z~^QYBe$aw zlpViDgM9mP|JOco@PD>MK+@_CUdb608BYT>KL#_zGf*?dk=2SMMJrb; z3b)UeGl32wU5M)XX61BD_m|fkcb^3g_oandX_PT?xgO+a)mtslc&uePv}6(gIYij; zdOYoZz0@_E-B=l$?K#4~4X_w#caQ1B#mpx<)Xj{h%`q`4on(c?9&n{28d-RZKVUF6 z{)gTEfD=>RTMVUthW5#N-tI|pk}@1=cde^6;`IJQKAiuHXRfmR<3MKnnf4p)^M$JA zxS0gM<`2V;F+L|EuJSws!ocHPlZ5upZ-|P@c{GMo9K|+V9nJQvunmj#Tg4Wyp>;rs zF5~kw^*|p*;A6>v#3Koc06UAx&XBld;PEM#n85f=suIVT8e}p;mEgxdR9t z>v7VM8!KKE@XC_;ClLn}Mv{*8x9dDXR9C#KTue_nuRBJ-F|mHl&1+sN(Sk3<8D#De zJr=&8>E^*b-z9-)m5m4l@U7pX^pQ|mUoLgj|KltGp@&L`XsBCn;oCys@Y&Ymy^+@bC`yTOCf}JEO0T;xF-Qu#1V;T{ zA+vIgkyhB@<uF*vgPKU88Z*Pgla28Ww}S}N#8wk#(AEx8 z8-0RE|MayZEKXXs5!m;rb=OwQKKhRJ)6~H1x!3T1_*Is>K%4bz?0Ti4nq?)qDW>qGkcMrR930vC%HILZfiBfagSL89SDL0kv` zNJGl&<*FVz8a7rrwcv4D>8rFqD)i9mPdiSW-yZl{l;4~jw6;*%+S@OLb7@|O*il6Fy3q>V z3UFDjaLk0fVT%)`J;vfp9dC-ld6mR)0n&kAof`8&ojZ%8w0ovH`#d1=LQgdN4@}*9EnLHm$ zB7=s!N9KW^GDS`PWkE>ulv|QZ3m=JX;`9YJ%Zf@&?i~%0jqx@i*o!g<-`bbH)Rm%1 z(USHjK9d%?(>hByu^%s*!xcw@OFS_hkI8tv?eXm@;c}ht(Dv;`z9JLBN8_cu1B)ZU zC4-pyJ2`G*@q22ba!*ZE1Ubc%k=WAw=Du{(UMYeH{-o}iXegv5PPxHzER z{MqS~_jc5{rJp-0sYvoT^BdN)!!|gox^M^ehjQ3%?pcoWm*q+~xG;x>0A=p$o+}Zw z5uynJz5Rxx$J0wqM|aPX1X!*@?5QwA@ad>*bzYAq3@XRGBw~x2j5_^%{XxKfOnu|G zcknyJN=Ys2H24Nv(n*j|X;h+lo|At6gG&TsODO3Hcg_w|oc=5B0!$a)!aCNc*>@M{Iv{YBb*orQ0j{d&cg6+s7L5Prwr zPRGLjh>8Y<)*E%5$>1|Vu4$pnk3(_tDBxH1VN(0Wn`2pJz~Xh2{5x^!pyp;5zQuQY zEaJz|6qyJ&+7*rBbp30uicc%+SH+1elsSA1xm-#88L2+QDLkw74}7xU_}^ST#Sb-F zvZ$;#p^xcn>$YzsmVXlY#8M`L+ub%#7J_B8;BKm6Ia?mivwDe&J#C#O7}s2ApQ@FT z?l2xIpOx-Sf~lk_o+qU3u7Bb|ab=uTZ#yDu+MR4Z|9!5&w!}>weamA&ZI*%2tSfWM z#32Q@7eec>U`ileRQ0@X46kUdUSrYiNt0G*gX2I=LVqaF$y#bet8>%3EC*`R94q7d;;Dz=CM0;oK2r3_6){;&!WvstFUAzd2cs%JS%#9y)6>c{xCWD1~ zn>=`CFVfEX9^iGdGGCnwqJd(KWjYC^JOZI)+uqw1ziazxbuMfUO~(dM+llnM6NQPy zDW&*iFQ}{BA@5`GhnJN%1ezWebD2@adaYWyg?rgJd;85RygXUmmX)^G@&bxDGTuf$ z8G+EL_IJf1i*97cCfyp_)Kivco{UGu>N%OfZoRCQwGzzMANWQ_^+Hk8S7CfKdIlu( z3OZYQrAQnj%~vhv6?zmrR89~8J7dO7Xiqu$gqDW)bfl-l_fGpljaG?cioGV%Knst- zMAJod8m<`pVCecSoiUC47nA~#ndq@b2`-30}7f5GN5)qy4KYRuvB-Xm|ybHKz&NH9ziwsIuY&u?nk!L1o%;Z|D-na&S? zhqdqxu|M8a~h#h~wV!dATrdGT#xiW3KUe2k8j9m@2 z&1qW&es`T-thBZc+*1F!o>y@Ka%+$BzLA^^E7lwVOUHVnU4yja21X6!Nruu^O5+s@7DoQ9k@jy^3bWlq=PV#5pj z@^b?A?FBkzUW!z9+;4= zxkvVIUH^(~EY9v!*4oc|=}XQa?>eB-=g zH@zYz1eW)mRrnqn)+t}}XJ9{r^X_00a(H_GV5u```fKWBbpBkM^{iW3X{7Uonh!M! zh|6A%&us17qvx8J1-8{N(1_w%?@z>fif0-KJNP`>bT(_}=}&wWb({8_nQub#8}y2! z=mJWKP?P$khe8=TD%WU%Mnf(X(|jj~^7PTYTHQkZVdQjQxBynBo;;|-4Vxkm_`bUW zlpjV~hr^#s*(d2;jZ=v;f{ zJb&>@XKi@%CXul7L>VJ;_-*V%AlZe!+vk{{E8AfX8!0YfYCrebt^fM%HRW=OUPt+c zL0!i>QG*_tinW3avX$fH|(IUI9I=&X*_q#v9gL(T{Utw_|9jTbGMNUxym3FDjpQ`O2Z6 z5$14paHTPRRdaeKKfMHzSfS}71u;~M&=5*J$$GNgZ8}LEx8(&$ACMvbGmWU9{Wn}TT(yUXx03@zvJ9!*xBs zVPD^*0`W#9cN(BMt7dUEbSj=V?sKZRvW=dpbkN&!+@EPbeHZ;(3}W|f7SixRc+yLT zDZAG}r{=|L%<#!XGFp`fli$h0arlF?x$pr*Zvbxyen{MQP;}wDVgoOH>7LG!hzTRf`F&8!T&avfs=nknQ_jHHv$C?@rxC!gyCqzf~H} zd~|*F+iGmGhFiIfZulX$3uJGmxHephPRNUjP2W`A+al14&{XqV``ro8-I1(@tR!lF z{+bjU0f}?BVcy@GnX_=KES8#k7Xl7k?$BrYx~>IQFmJ{_wBX!Bu7GpqzkKCKd!H{* z|5#eEiw=(xD^yj`jVDilX6r5-LDfz+;=u#GssywcBeYc{osxS~xmU!#09~jl-N0F|i_w6) z)sa@f9YF5S^Hz&FgwKmvA96&ck+nRG7n8?3$22@wuKsCJ(Qx-{sB+Ke=kw;oWy|AJlb@3zK@eqYy zcxUduvL>fZt_S}>`qKt)jEv>=G#WNmj%z^F&{Vdl_`4?caNEaiXa{}KVlG;k-LJUv z4~Lf?Lyvif0Q+BgdQM1gLVv>Krt|e$$}#%nO@Kg(%a^KL-wDOdcN|&}ly>|4lpMLP zg=F?Q^ptEfEG4O?!%6({Cqu%haq5Hwdl~-v*B(X6a~oE+%RHwbOkcRX+Tj89I>B(h ze5tL}D zl%=#`>viX7_r+dus+Z5zD}R$kfAS(Z?(^8ZOiyG!9Kjeztm5m7^lWB>pGP_BO_xXg zMy$bFttf=&QMxPl?OH+*U<5~d*OZUUq${1BpP6d8sN?qG0?I2w1Aa3=^bxHLnBt?R zNJ>mHnlXAYcek@d*2T}<=OL5IM*b(?#ph!wx|>uQ-~HUn)}5uDE2YO$qk3*6WhUEs zGh{7u+lhId*{yP$R}Ck#nZHOAo*BK2{BoVpIw38)IAZ1jo-4%LS=dfdLXG&VhMW!c z7z0y(ppkLH zJnq$*WVx2x_P>4m38w*OnQ*L6H;UxbWgW;4S<^KD0!2?ljyw8>;Zi|dIIlr< zb=i64_4VwcfJS8&=nhA55*s2-tc1N{nv{@ay+V^E>Ub{TL<5vhYwqg<$GTlrRdja% zV+(wdKgxP9PL$_ZR(QOXv+VK-APENQ61|^hBH;xq_)o8vtl z){B*SU#ZVkq!x=D{S{O7Nk^ZA*aZ(iF)QX;ZR4_1*VyWs+Aetm8d2Y9JPs~@_j;3> zoPWcO7RQn0f0hAt0o7?~NEUSRojfm2^9&aOj}~U6|Kho&zX0rk7Q0iRKHIdrl1Nc3 z%99+(tJMLIvek1uE(`ZMw1RPzseoN@+YgNL_jd*PbiP2zMr zL6N)*KGv}LCe1DNP8}p9KSKCu)f&T8(vHo)IQgzg`Xh*}Sw zM4AX3O)ueO6TB`T@G=GLrhI!B;z>6`B&8CIZsZVnD&UXx)5{z6?|5kRax@}JRSI!_ zKms6=_cZfzkTsN=ZP%sbAcM7F6ERcqp-#bp9MdsuSEqP4!;k~Up{J8DY**|T>q
lq{hG$)@QE=jY-xy^^&59W&;YKq90y>5Ic2AbKlbdaTw$_s0z zsd!RC&9cf`{07oCs&%xS1iFK?UY+1z)_+dr%6#R94#~2J-7(NLIwr7sa}ryM`NaxN zVMnNv?52PCAX}`qmx^2+3ozy(!Rgz;RrN(n`DveV*E*wOGoD5zm5v||^S>08qX3?c zyE;65D+D=w%RMNe`zQcR|F1fnPdZI2O>!lef1Q=%i2ymiw`_T~ ziV-rWWt1CMWylQp#Vx^Ov5kVMU6wTl<$fY+dK~jK-E9Up#Gw} zzW$Caq>KHQ6S2`=*CbunpsTsdz!%NmidL7+sKg9V7y5doxU-N6+f|g!!@j?JI>xzN z4_yYH+hhBfaj*K_z*l8P@T&P~ID62)b-Tsy56_#<@Ts5~seR{-qpi(3C1R@?1VeTW z9q^ISAiKv6(GSF$9F&^TfqYv2E#$U`9j(__T(TEr#GEsWEnePMBtDjBz*ZX|YU;jV zk)f4U-1y=SUbrVRPEmQBT!C84cy?x1g9pXtcwV%%iD^BdVaBDg`hVMCMx%GM))wc= zCpJMJ4^xX~wTe4Dzh9w<=dZg7%@t_2wmmgr{iVZ>503VOAx$6}z7do+5n^X|w*%wL zu4<*v(h}q@Dz@|V~LI!eILhdWG40&gc|KZ18d;E(dt3nPBNjcPb4uX2Q8QH%hO<0c zw4^e_oYpi@WoMUaiwG*<0AUv}&^d_Pu4C$EU=`nvv64g!v6xvrD7?-D+U0q=L2+fa zT@z$7oP}BFW89volcxGqF$JmL;^6`{t-;vd@7UPiOv$HG3B7Z^?nz-Dc7@PwK?_x9 z-DyZmTO4{ay9Z{wJT}&W7)41HTmUaYXF)lHEeX3R2QZa2{2MI!>djHIo`}8kad8mt zzGwPRhbT^4rUN|xg{bm?+`3?jDZVu4NB=@6mN#OMcQ3r@q*IG$;{}bc9_<-qmGU4( zCrDa-W&Ns*#7}%xbCR$FA(5Un2Agh}>02~yZOa*-)_2H`MyAUx=CJ=f|IKfrw_zDl z25=34{4K$4cAVVSgNIvOkL0{cT)SM`YW;Vj5x`Hlw%jI#?iV`#=>qHZ=*ODTi^srl z`1o)j-9>+YH$}dB`ITWgPx^UmWlhwZ;G|1S2li~tRfa^0dCD)GsLMLn zjWw$C<V6dXByt}H^8+_lzK}^UkNrKy% z^v>m(ei@~mT@&iX1-!`F&_OHlGX5!J?~fwOYxK2_Z8Ut{ZC}>h@KIwRwy*Y@sOX>5 zvxeq!2pwv&G0P!Q5GFrKz=fTsP1bpe?4u%CyQHMS$)`;_k|!gVT7XKv)h5+epHiZryWz?uKB19q1>M(OGCSfZk7P5qV3DO z2HR_cZmtQih9SS%EUH*zm%}qxFtQr`sbhhHOe|?f9mG@CwEOZQ>NOiqFW}KW`yvDL zJj=r$(-k~BYXWCyf0vRn)Iyn#k6t1@1n6$p*^KCxZPO&b>MSxUSrjD?UchH<*3g@L z_SiR}3rtEdr4uyBqGR}p@9it*O2o1UvYH+PjxcYD4{nc4o1l1GP* zkgA&MWMw)Cl+@er;WIimwshcEx}sE@DHr>1&D1s)v$N?wDJBWEuKrn{28}n zwrCI45*BaYNFqETF>vY#s<%;6!F|y-_FcB%Wwc&poGLHnt)9{QMLtvDY4_*R*B>2A zYhRNPYUbYT^hbOvcEhcXA%prv6DOV-^BlGx+UK(TSL5AvdP*KIY$9-mV$Zm-o>LcL z`+Y&K#Cm1t_sav060T}+{wURj-|0nG=8@`9i?v36%@mt50m?@$kH$92rBWX~=g^V!QgK4gRsu z(f*)8Hzbznvd8RXnKnH>aGS##+1_6LE7EkYbGEx_yLc$2RTu0fRG~|ZvY^z{WA0>` zZeej;%>FHOlrb_ncAi0uQrpTml{V&iB0}!(*U6TyVC?r-85N>(Uw=kv=Oirc&nx3Y z{bn~&r-IB>1WJh~b+~P71&tohM#PEomp(W<+iJ=S%i# zUAR7wpn#LSbaK{_(RiIbK?c_IeWp|yoz|@%pcM3pH@~uW`P-3XbR=4v=CNvKWzX%G z8^>x9&ZSp)ZW&V|J`Vh&zPsex2dTqwY>xozVe+hE`77HGS8BIzGj_+sRP!YU*88R;LamcVH>+yp~ zbA3iGga8XDeQWhm`e#$w$s%x!!?_w^SL%6X(iMWgS5ln{(s+HdHYEsps}#@4XIk z;ILDA>Sgl;i?0^8{g?+Z4<94gy5}5?D(o{u@Z^cyxxI702l6uw-k+bhLtqsl_aKS) zPr*M+!c1>bC;DEHGqBY3ZHTHgf1UR+63^fZsWih#r4;eZm43v>StuvP=aKDfB{`)B zSF7Lp)A*;u){KZ>!pr6QJlv*Ax71p$3^$=g{VF5=A)k9+9crFe9e%1E(Y5?J&gObn za_L?KfqSZKD$$4a z<6-wZJ6-29c`_WICd~nsRpL>8apYIezAq%TQ9;#C4 z1HVRMnaLo*%a6L0L31@2C;Ec{?sMM{7NztCiwM6#4j(h}$(2msbyJ)OXjr6eKA){Psn&o*#kOa{5GjYpH`?C%yhr-dT2qlk!Z(1v1wvM_>!O=h z|2b_l_BM_?UeQ#72n0A_s?EM=)llxIJv{l5H$+-FALcizuJ;_@E#9EHb66+wJvoaE zkB4T8;|N*>&Ov?Ye+3r|>a}XFX^N5Fua>sg($p=z1!#ioaAY1g#zsYjN0%wtdLgG1 zQDNxF&p+MBIN8nivRio_(brD#>Irzvaq~+}htF80c@NCPg`HIJhco4=N`|10>I;^y zv!X?|Btr%Gl;vOw3i8U&p|=&9>)T5WtHDrhn&Zi*Q6C@nCyoB_)8f|oxA`bH^yj36 zop)d|6E*A&H^HAK@IUQGWuL1z_}$9K@{?R|1vDq#Eyswqi{KxelAi#u7FHi{Jh|ty zLK{W+ykG%uY4`CbkVic6)P6qs-(_{%k;jeut=haIZP~yfbWg3lbA^Nr4f*tOl7|0Y zLK58m5xKG7aZZUAHH@;$IOi3no`C%9#M+BMz z!=(o_S@Zui*?OnH_;ohF#5)@)_k28FPm}hW#mQW&F}Tq6T?3-gt8vEo&wxxK%pBa9 z?Pg%Kxd(E7!xCT=A`FGj=wg(SS@$6$S!J%iQ*iPxlZqP zh4yovAAK-yo$!cnnDV31TBg75fr@Kc4ybZJ7oO4w0lIL9VD-60OOt97BUP2r9mn>k zze@H`;8(U+$Hz{b{R3yp=o7C~num{DTUn-1c3+ zDU?U*pYiOC{R}joZb+0IFOO&r#ZvdooI%|x8TcZq!ak%|e)s$Rp>|rD=xkJY%R`<) zGrUD$pCkpkF8D5x&7^ENzoC4Jf`P((J0wBt|CIga^Tgr(rj?IFyO3TH8Q(PEBrUy~ zo25Tik)iFdBChT4^OdWG8JAvhkN6a>B^OWbQ_thhgxg zcPU~7no0Tk-@W?6B!JO+UO?<5V}6r`gO-mZy9+K5zQtm-KYJ=C>Jcwt+*C(y4x)TB z;Jx#(jHBpkk~DbUh`R5$MRUlTv?;%R;PVL->lFSh70OXDizP5eJ>~bx^8E;VCXifZ zx$qkH2*nH@=fDVG=HTtiI>`c;qr& zV+Qq~E64W3)bL%XRhQ)LW77TBV;{K0J%UUYO@FQ?Vd=M*KX<^<+~!ca<97l@Aq_F> zMUHf5`}x>8^h=v@r1P|q#}5Wt4o>4!PdmlBr%PwsmKUC$Lr~62kA$n zA;B*mwhI?mx7^ax@z5u%5qNvC`}c#7$$$^a!xs65{RtELlF{c6%O*T5(&6&YEpnS6 z3yTP)67qIE_7a%-EyIPGz9K`G37&fal9<{j=TPV+|&5jiP!P zzv0r^#|1_LHhb@{TTCLG&v40Ki9P3?V6m5Hr>HS|!LYX&+{t1dAy*szq~c&MM8dRM zhlY;GUrjZ)t?kRekKxubHx>a9kVtunDa3FsHJMfbJVOK4l(4i8kbOY`Y?07 zy``P1?o|kCZvLlF7_3vi@YFZHULt)C4hdlBchn{Nyq8+>op#RF>~Zm@$d#iAhMzsT z*$dx=FT1hb6*Z(*u+b8V^wZUihE@6mEpICnF4%U(+VF3Psv5`ro;7+-k-Zg?VnjDy zxTt+er@u-9yGpIeBfEmX>@j}}E|5nj+p1gf7GDmY&XAotFw= zs1Au4`Ync#8b8@S+=Ge;u@Bwa{;3tLglbYXAPRbVRZ0Caz>W11zT6jem-?-n<^l;a zXe1qIaG+9)<-1*1!@s#o#}x04+4+-!25-rm4eQ!1=kX!=RXBIcg)z(@UIOR6A^qC+i7BwRUOzzyszT0UVYJ2e_r2$F z3%NNVw;Q;!0yNgUUf??Ai6S$7FTrggK@;~Zgw~BZ0dG6dT38#9`p>dChepC~l*Bi3 zL9LD%R0S*&UT*-GO~OX)(Yp}Ha7^G`QyaA~Nr8jkCqZWSB9f%*9QZ0KqOn5hQCib} z<|nauQ3H+(PVY=4Hs%V;_#oW&gZUekTEKyWGpxDI2`89j?pZ)=G*0$b4R9u67Li52a;iN_^B1Tj`_nMw#pd*NS^|FS)o8p3tb2jUP zd3iyP$-)D39&M|I?S83!`Z6OvJ*LDp9~=JNc6+8*67cv01KrIq<#HEQ(4Jnfl;}7f zw%FZo@x6KCUkzFHx^}qc;PFqRVBsd~8>$TNxR)u|Eo~aQxRVXrbG(3o5f&vDLcpT;2Jkw* z&PPFaUW2aG0AdU<$VumR=>Q9^ikzn_56jd#8{Y8Err+9!$!$6>=PI%E6}jPsfi{Xv zOo*DsOp+Ly6pZn%$fCI!J9I3`N!;G{Xa>g!$~6VcGwGg&Ru6B_l*6vE0CO~NVRShj zV|h|U=H|m%>E?OC820HL;Ty#g*Oos1lXj6b#h~VVH9R59x74K~;3PF`iGn6G#~6Q8 zgwWDceWSl_~)OW$mFTC&^xfn|MeJG_EsAxF#E@VYzUTxtE?os=r zMgz?l!9*MBA%n~24<$5Sz<15Q(>h*4G}ekyoJiEGV-wpwk7^ivok zT?N8?{T4}D963Fe+4_W`lhHTgZ;~N1&XeSr7oru`4~tTKS@&{6zODEFErKzAIyk5s zT>h#~RC_3dN66h<@sWTQSillOJzjR`KA*g;hg<7UQyOxZFjV~;!SmsVhVGTMVNDXX zTL!R2(CQ;iDt=^5ntMShK$!2CuQ+n#-(;EKVAwH(ZRIV_=kNCk3ap5uoU~~j1n8z3 zz4x_287&>p)tcN=cU%K5uG9(oTYEbI%X>USO~t*pF`%WJ3kbS^7jha8kJZ=tJS6_q;ta1%^U*X z#5Gkjr6Ma!Ejk)GcG!z9wLh-YYHw#UVH&OjmvxsEaW@(RxMS`Vow1H zP~OEApuq}iERY6bFm^LnL6vf7> zO_Np{yr^BjA=v-}LV(2#x@NRNhq79F7O0Z|8=%HFbPQXYnSh`j0V;{2A0Hr`{NLBWilZ>`gh3&TlRG!V5Ux1zNQ8OK+R9J#$$e~{k_+_S z4&B_5le?^I2}nDb_nZCC7BY!5t8v~%sZl;#P619WuIvi#QY60!anrvnP?xwQo^opc zt#&2biboy(^bUpMm;cs!n&{6RwB_snocywX1Ix&-tV2rfoHrOp;I=}>wk9o@b^G_- zR&3YbitaO$;WYQy&+DSPCe_k*jpn5}dr-XqrRRid7WYr|kpxYFfqk}h#kiCC>|n3W zRem7cSK?!Ook29i6nZRE4;k&GITyKbeksMj_kh&588@FJWsQ7H&+k&1_|4fv zq4`K7*EOW(6@xfTE=sp2@3Bi&;+XF2f%BQ42|^mv9)PC!Kq%R;Z#1hMUTcNa&^E2k z5;?c|54s~h{yyxI1r+psB?Ov^!#X&Sj)${tKfU{FKft`kr306wilZiLoA##kuiVdB z_KmuSbv<*1Rv!=|1wkG;2xxwqmo%(&IRpaji%pXn=^Nbkt&zMv|AS>Yq+db1wR$K0 z^D?e^_8FlzD!&*y)>L%;pjW=c&t}sNMip)SI$a=Gs{LP#7Ar*!FPZ6(f zjnk#Qr?l!M8&|2F%i0}NQE^a6yLgWQRSC9gxiDSd1XH(+#E^TY8FK*xQHP{0%Q09Hv#G_)q~%K~(`p z#A$-WTCemz_7xM19s?P=XX63Y?T1L#ylvk7Z?}-2wd;v>bnmm|@_4k-liWTJ-;0|Q zP~D`Sn_65zJ3A1zc~y}7mriiaw6~DpAfrT{@tSSAyPt@OKt!P8 z_U03NY)88;SrFnxMrho{LwqRIZ3L{=z7W0&23wZ~wT}o=rJ)BFh1Z{Xv|kzn z_1tp&7huDe{0F?XY1EP-q>R|yP~Q|A%-z!?!~6!^@$cJLB#Q>J3{Qh1Pk;I2H>q_# zx8E8?T1(L-Zj@$BszWlxLIOrvOXJC4;WzJx`6yoai=|0DR<0*0!sAzrs~oU zW+-NxZb7o(H(B7hXFrD9d9g7M@-XSo*q3(*KqaNiOTIkoL{mx(;uwQ~jsPwz-u#N} zomw(T-{w|pi%fkMwC5(eRFvSmz=^#cmCo(%n zYI8|dRE$)p`HjhiGR|zmWQ1Z2pZx4p4yEvSQsb``clajX*?9L8u2DGpr?UzO-An`CeL%9tVBWUIr|v^q(ERZ}~& zsJJ)UVGdHrOE{)KDY~2%se!z6i-*Nr(U^yvn%ft<7v~k_VS?(91QF9oHq1A;^+&W( zY->!)6OR4r7VDkno7p#^KVA043tFd;yLgv}6PtoY;$p1iUXn4ftLqZRW`qnvbxow7 zm!QKY^w{4!N54-F{st8D`TH`vfZbXgKtp3lM>|YiBj{1HbC}!fX+uS$6KB-tE_UA(62OL8pH^>BUKbE27GcB7zq~`X zYE@#><3CcbA36v)20v6_iYG#;#X3H+mUz{BRHTLyTT75en9fZK+h)?ZO7m#*8N;-r zG))&M=Hwe)Zk*T{4I04fMl?qHM_N^8wb^C*;Vtbb+98!XkTkFljsY<>^6tQ&w>( z@iJ|ZNQseAS<9A;|JBKuW&cTQx~{deJhx35fmr4G$}Nr}cUL09WnO?ZEMB??jd8^G z?86J2s#FYKV!2zhm=2+-3yek2qTC^dZr@tOzgL`%l?Nowkl^kS=cvUzcU7n(L02Xr zE?!PL&N8?z4pXq(FaG9v4-!4<5CJPv2d@&8+YIxxTk5v&dnkwxS}t^#9`z}$)?h5T zOK#4_c;lUEhf0Bepo>4HI`5vr6rW!DZ(s2{KaD}o*EZBRK=b4rABy>|lg9yB)%h}; zJtz_43Z|{x(b4@^`mbZIv}t$^N{4-TA@!2_`aEsYP}PtMNqy;+-xLoVUH_zzimoGqc1-lE%p%a;)0bm zkG3XtuVIr{|2aE?o>|57e;zU5<{rLS3#4AzU3%M3i%djV9_BM*589;yZ4N)SBn!Od z#H}?iwcY7B-L)=uWV z#`2a|Ti0?RldEEbX~@)NlilBumMD_VnptWk1P{pSx zWzs)uYTt!EEOmg)l`BqHw)CGe_s42ll14da0YIhoN=^LG)7Id>^|%74$6>2hvMdg2 zj}o2D1{EAHhwi!Iz2TZoT2{w&=?phrY^5>dmi*7UA(u%z!pt@%bQz3wh%nmAq`&AwzCq7?{f^xdi5|HN7MYT>U z?r*U8g#p+EjR0iub4hUY6&kQ!JMTybQ5pV!Wtj12Z|i^RFyl=fj-f5*476ANpE~?! z$Zo+`a&?M1$|%4muyU>`A_@U0);GdP42_!(@6f;es&qRD#*Vt3#o)jNVP(Ypx_9E#u8gEz&>jRcrUsecA{(W>B zb|9@yyS@G-)fgC@$Ls{?4Cu%-qlg545r_lv^4>tqeC&raHN={U(tbQct;|f{lu_Y*c%`pWp%GlDr7zE6Zk>bUz@zF^ zS07{L2w!>m%&_3I3Fi9eH`b?YZZBk(U+4OI=ZgUwfo~Y2?6Or#&YctOaOMYPt+~O# zk-9F1iu^A!>rP1*y(#B8OK|*)X$A| z*UFIYB44rJSp5nwDPT;-)wYVj!(xB5TM#Ol^bex-SNG9Z8~xI6d12P1`;keAAMgu> zP+`YpZ&hDQDzVkG1s_GLLTLN!!Ds^VQ{xU{$xMY}-vjl8b6C;65-CPn0CnK`5gjgb zUWzDi4yRi+k7Q{*k59aAIyZATBqao@Z2A1MdX3EH1d3!XgCoDN_5cLa-OOm-MhN89qaM_WhC4RCCtw~U>uBQ8oDHQ_rB=O4x6afY&z8DHj z&y6KF4VnjQ-BdDJ;o=)P`i;OPFI!wkWA}rRgLGd+DK28vYtWD9(a*ytiB@j>e~q3m^@{dA4ub?tQ|z%{({KWBj#EiPj%l%5M?-Dd+X3K_4^ z(o_`xt*rM@YIHe)h{{+arDE!3D-l`;wOc1m#68L^uB#C3WMJH_bn7ZsU>(6socyo~ zS@3@_)xA@)iZ|&q69WPxrH{dmqASagmN|9buLWYH(WNMMaLAxy)Ps+!OpvX+3tPez zfD$d*!#rc1#HtP5k4el2!2BwIw@WyAy$*jFpk@M!yx|_7e^SfZ7=PK654z`9*Z!S$bp(ogJ=ab|bIX-n;jKw-Xlkw7-+?K|8imK`HiL*yCWN&K=4Krn;B7?o>vIO}8}eCAy=x|?vwOb2N))nr={aGri9Rb$O;TW3(BOL)5! zCrzAoRUtqlL+PzM8+c2O=09Z@^xv{OtzxFPYR!o(?KzO0H38@tcAhPS6*nFnbVO*! z-~^S>)-IW$tZHY_PL-;2B1Uxr2@uiY zk(bhYze8yz%D;1PyoquZJ?Vedy-@)MhrgP8o&0CR-nMCHV$j#$z& ztRnU1?g7 zp=QOS!`+4*wiWiIg!^LMh0-YMuZg)jeob=ideVNIV*9^6ctz_pvRgQYwVAmNqPv2FCLZyzC z=>d1jklFNJ#6^6izeS>U8ID(|`7!nQYBjl`v-#}}He%M7)f53c z^-^TK^1B`qC^8jU!c&i#=Uo5fol>o}$K+4scA3=Or+;<@G?V$j*>=ye>%g*7HQ%pJ zirIuW2Y!UJ1c3SAOYV}}$sHDFUnWK&2X9b6?_-i($wGm#4_tISxFHjP1vs=HPQlm(7pDOEnQq@4CzEUQ1?n@k|G>qL%|i zC6}+ssW1M)%M{>h1!9HordQ)nQrA|8TOPCei%D;npwIg&m00X2H3WZf_-6PCLIQ>T ziX5K2^ZioAY_tB|NIa(TTJ4KPOU<0kSMz!`Zm_gY)5W02+Xb=jMw_G2C3^;&EOGGL z?xsnCJBRH~)R@@}HGX^P;m)&UFl2YCwA4+ewm(mP5Fh2=*_-KfG1FQYyC*+n*MUon z^x>7B5i?H$FyhmT?x&j>`dN=Ku@HOJ2VS(Eb0dwpvs^m@h3|@d@uZ+dXCV#>@_BIf zdFl$}pO&+KjHLFhQAaw%(&$Ttes*dw(PhPVa#qiPB-WEAAp7p#4qaZ@B7%LJKny8Z zJcr@UAl!G?r-=5QmE9Z}j(qo^_~|NVQQRJDt?bMprd)11hvIZgHn{9+KT$(X=G~B- zBH~eXmp7wM^Tli=w*12o0>)>0GJIyiv@LTI{bLPn*>I9OcEy`Rb2!8dYfb#r(^WuI z7z=6;eRCCHI(?l7z87}$pzYB;%IWl}5ILepluD1bV!FLRXm?i~W7k;kL6p2&-Z~+x zubPji{rZ4K4RKY5DW0Z%B<=KkxdEeFbzKqlPoFV|lZ}S@-!n=gB#|^uFPy)U=tnw! z56{RyDU>0LXFUIt<6R{>%b2fk>||06t%asy`m$6NTPZY}$+>l|Ij;r=^{-sk_y=P_ zecsH)*r|MrfOC&f;WuBYZIQqAj70mjnq9LSQk-W(tt5;w;xpz@!Q00AD|f~0t}AHH zpIZ)E{#<7pn?y(I1dP1+ZLT`$d?kMsLp(Gx^sd1Sz`zjsZs1vgTU^?RQ`POUvvGjzK44F)FnIx6N;S;1Q zoHdh{B37ZNv^91n#dYzmk%CT4YM<=oLYW6WI{d0e-Li3ls)qS(@~2yHl zzM7qYd@8vz%tO@Xz@MiEU<(%r^PnrS^_1Ur;Ke9M(~}R2XFm8wM*|XSaVmGj!ZG)% z972fgvF9}Ib6S8jtIAcO*HaFLWgEJUq^OzXLe(_UdFL0LkIn{dvU-{q*xr0CQ*+De zd$=CPJ>(KxusUefLM-HMKbp2LXp9ebs=u5M@fb7N?8dmIzdgfGgXPB-r#Qmbdz142 z?s_96;pk+a-Wjf5HnJTr!pQAO(#h4#00o+4LerqcSyAja&!o+k^^Vx;(K~-h>969C z1tavPE%;Re2futMQ+dlJP*+C$Zvtc5|M*AY^>2vW1F{|2#bzlX{N~qHF3Q(5@`qcKR>ul|HZ!1%uqM+tB?G4d7CW5a9qn= z?cQh8T4`y=DvQdFC+BIZfrB500`D&{5xM%kMW^w8OTnsp4E^rtBuO2^eTY4jAJ%f9 zNqNTV_ZX&9>oJJlhD$cJ&cyn<+w&p^&j$N9+Mbi8N6~sAjkwM)1i{!(WZiW0bwA<- zojg+nH|^-Jy!M03 zsm-XxqUk!NZSLWwRPjsForN~6 z!!M-FJF{lZde(Z@ni(&}NKg_+yl4G0;$9G&eAEzb)u+!7IMAtYYhue)l_Z70)$hs# zpcb6YU}F~R{cLBMhRKBs{qJ_ia~p6Xn~@9d4)n)RQziJv$&IC4^Dh$ef?D~ob!RX6 z`7>qBq5w(~4o<4M+e}7%!iWz_QxJ$6(MRSZ^9P!>t0-@1vlvbyu zXm}rg4)6y?`=Kk|!2~Cmo{(=B?PFC+C|_*N3x?Dzk*peax8~>9V!j|HCRW{b4|^!& zvUhGZu5%X{`a|)k^s-iIRD)Nx#|9QNG*Fu@SIaD0wzXO9D`qVQJqZ+!%lY!a-9b5RP)I*A0Hr*nP+U4 zTGhh@#fBO0KFzF`LH=CPis{}Z*c9Gk*X3|w)^KFFfx1zhNGGHjBD zkRRU-uFjfX8{fSv%XfP8x(IgdtpHgroGHcrdr1QOKIqP-<|AeGpFmJ#I!T^G8K9(P7)*SbXIz)fu6Gy&rLFVbAlo4BpX7QyE;H@Wt?>L_myK@gh(m_UL)?T7U z`leB>5Fh(@mEHEw)SKI^!I~(hvG%=u+#JiT5yuK*sq+ON9px~}H-mp!d~4suX|n4O zKl8kC5HNEe8A^84qL5c_x}-Sfv`ZYtu#_7!DFcZEGl24vg8 zYwWXbj;{9(%5r0n|9iok3FVNnRQy2eo@bw}&-)^1qJwIN+=Lk$YTG_IBRK70UGt)d z>}vj!D53tl|mUqVGq5;FL-!#=0VJqcvWp_5TZuYT1U5o0F z0O8e>=6MHaeTL@8k8OBiLF0_9YZhWB>O7Ax_|Zzj&r`$eQm&UhLRe4XxA7#%y?Zs- zmMxgwMpJ(+9udl9;okJsUWlQAOkt972xwwzbXa$;(qbo>T8*?)lZ#h(@A!XZSGVk< zMNO5_kInGj)C!>l1zOvVh=^mq%|K2u$)vWGlS`i7%*tsZ6d1QdK8+f9v$o`f=?S5p zOU~_gZZIy%6dPb}-Fsdr)t5byC&%^89n-w9aSQ>IYE2T}eRC5&l$tf)oA7q6)%3SJ zYR*QS`(Fuo2~1$*ij&@w<1ur}gq*f79i+)7HQYN(W8oHbi1+|%n?hU#y*B2-RtZKhrQx zk@~FFSEO#%VH|zdCgX}+DTB|8BT zOj7QREo;75g~XmEyu&N!@%kp6H}Cf%UrPsvv*q=cpc8P{(OHQlyPCc^k5iIp6GP0P z8-Zhw5$pOT5aD3{{ubF&qauBk4l%pRcYNu+VGGb*g4W0)x;}>>^vNd_72g9EdvkCx}Bq=V@vG2yOCcS?6b4 z*c%1q2FAT7+sypGS2yoC71P;aMPjD3q(|N`MTwd=bToXB-fAD1OXkFl4$WkK=-K6Y z8fL9JhEu@S@z}vrH6JwzCAT$!4vsY+dsa4ksWH$SHIMIo8*)BBxE>vaV5i$B@k4_l z&?COBizUN+3!t@fx*3GUW)r`rSEcj{gIdbl$7PRSs85mv5qFzO@+Jcm9J+T0k%d&= zs>`oiWRPai^mJ~4WPbK-S9oBeThm*Px4<@buCV^Hf}(g|Dcx9D;17Zi<=FDKV4VAI69=6a53-a(i`ckST&M ze5=B_6t50=G#`MQL*4o&cG-Ntv+OK1@mEXPvG}G9huV+(qgu;U`A;C-wx8gi7LBx% zaM%TuETXJw8jW}O;w|ph@*%yB8GZL-ZQd2kTYW{e$4EXE%C0@#?;iP8E>MMNT-ERJ zupuFI0aOA>T|bLYvG#S-97&6^TW69{Gwdku4BM;s%I0KNw6ljkcB>iG$RO#v)&nFOA!G%tIPpiwZkLR#mGNwpJR z!)k%WL0OM1{OfNSMq#|^@lxfC1@o@bqHMYd4J5=NdDf54_L9y^EXV{KNScVAVwy9vrnj;h^P^5kF4iYj zeT7!blAaDe0J~M#1y{keO<8A<#X@k#!SC0kL8O{R%~(*b-=$+33(IGU^udAnDaA4Nt1ZbA zerD|Wpx>=u!M@fMNK!B_wWt#WO?<}CjRlV&Y@4-HYzx>#cwX~)9Mj1~&33fl7l)bM ztp$%q(ugngO|1Jsq)%L z3ACX>sH4-t50pz?t(744dTZU?yFzvFrNfS!L|xB7?GMX#5Rkam{?^P_vg5Wx@7D09 zX!65*0ELHuD*sn(rH^s6YkJBFr?PTP7LN>7;#ZrJncw!@Z_v%jPV%n#e$-85$7+80 z7Sy$iS8ngXH)&_`3$;ctK)H_iNL5_W;D)lEQQvGyk;aKc?5*QZHy28oAjvZ!j@!cf z%{K(fs)1)eCz)DjRFNrL*V!u%BNE{^TLPL${hu{nwZZ53V>QJ!LMsX83w81xLt8pE z&%AB)cfY)=4Sq4-AuKKdRW1-YhSK~NlHSMjYEJ+1~IQ3iM~XEM^g8bW0o~J|>&U z#wl*AMOBmSV3s#1F|G(YPC%qK7~mTgJ98vos1O*Kos47>@37myGX>ALUf1q6SO6s; zCzWJ~2f^KgrCiZx_rGU7x`A9{vwsmlJyf`_HOS0FP7Am^zoyY^OyjD1MOXCPe65mTPBN+Ml18c=~}Ifm#gb69%U^e0cTWf`-xJj?b# zB@hQwb&UDnjWZX-#w#p%!AtxpE~ic}w)4omzVJ^7Ipq|K-8J?4{L;1FJE{LHYcbD_ zy_3`-S`mB0mNdJC!(AIxu_EC;OQp-~U5YQXlYgNx31!bazYQ3TOlSXler9EC*G*U2 zyM0)Pe;|m*P3R%hRhM};{CiU2<7=|))#qK!)ln@ihLl9`n$Z=)$mmgrp;#TeZKb@iFDY_Y){B#iesXD>ZCc6Q4Iz!bFv8-T!>)MniciIoLQlKl&L?MgtJ#k7xN7L&88Fyp3?`6HPAthA^l z+W8j2V&qqagKrMr+o7RP&)~V&1Nevt2iD#`ApGvQ>3;ykE6b~23?LVpG#PzydJ-* zGo1LeK1U0Ob@-u<3hgUvu-b@ue-?p~0`49nYiK3>rAw zi1k>c&mC%@%Row4SwXN*GbdSL&oO~T_ts$dm03hOk!&+Gw6Cn4U-Y zFGZZNn)4pE7Iyw_r4jYp>b_F<(V7!<20>U<%w_`rm{!x2jE)&w{2Z?*2rb@@z8 zXgeeny-+n(2jduHF(T4{{pW?$UH3nv1U({nEoI*M<0sGtBUetLZ{`wk+!yEd7L0mk zn>9jf(n@=td;Ygv?BXEVObTx&Pg8{+4ZneY*lgJ2ao~;0!8d_M-OsmFWP!LGhx{k5 zR1FM;2^Ty7NVB};)!MwrC?c z@XBJSjp-ShW^7@$9DaCx_Am;)^h&;CWgDQIm?=a8qs!t=ac!73#MdUpvvJNR7iBDBu>P@~(Y$s`b(v0}pRQd3|7DAjq@2N5@2RPWXg z!(G&tg^F(UgCTVXFL*Y6?lb}%6HW{3b3-D~8G$kWO7_{jO8QO=cVfp)iGNH7Ep3PP zQ|wbhTDB;;*tl?m8xHRk!i)YemU1PJh?0bzBrUK`XyqR%gc^^2!1E^m^t>Ga&zn3p zvD*d%apV*Hf!xv7zLPzlD{^jcBn`3zfxiBkKe#jn z<<48GRT%Lb?0Wp;*~%Sd4zFa>mw0p<>ySMr&L!m#uW{Pz{gF)NE$yVfv0aSfJ;1N7 z94Q58o>=s569d{yEUKJf#4J#;3y*mRp3RZy!^R;{I zSrU!&+_DVSb{{%7XHGb1$$z!Q3&YLa;;ewu{jSX#ut|A2@fIK(=OX{6nzt>QlIL3V zXV&`P_P1A#n{rIvP^4PoEt)}Eg5X+a`7&0j(Ouyjj8>8Zx}cV;X|H?#6&kd>`7zyV zxn6mhcWp97sF!?E~aQ=Y2>Oo#cRg}i5UkI)|EQNUt2t>#9j1E-h;C{KhiFR zdQ>32dIomesugZJ{p60OcBluhoV53sZ2Q^=jT##%Ut?Of)H^k6fBMm$2gO@G>=p>; zB$6?&_#4$qw@!i1&*ea-;AEq*@&h1Iz+}kwN*PEL?9p2~lEc1p(J_8I%GfeAIbbO= zL`d+dGK9+chC!MeSH?@V1hN!RxyesxR<_9ge&KCFZQ4D^3;#lD4OYU*V@Wnfuj&<# zUmj=9)^8IIP;IF)HzF~u*r)28Z^j%yu*Wo0@#vna)OmY>goMs?Z*()C6y?n9u58~e z4}g*27X;2pa9ghR`)p5nhG1nD=56t4^N0FYM}IC>1rpJLF*%wj+8 zvd2lj;TsAGK*N#TyES?@eEUK&`r<+bS4!cHwWTR^<-P~zo^kbXX^sK!JjA7~brc#^DJes(K}0$YhFNLXs5PE%W8SCbd7SbhLeyum*ebn|5-5p-JuwRcL?O zHaw(ze<9xRsr$?p?=4B}yX&-%d+)~DO#L;j1~&#R2DAbYCK1guC~L0yyCT@RJC&~lCj z!fz?%bl)i!*FK$)n{kTLRYeZ+IHs7B z^*H9%8yk|cGlJs)pIZww*^lHSxx6hRcU|umH?7kZ| zyB$J!@x*#>NO0Uc2t#*J*}1HWX7^C9YKzXS*O}6%TIh4|S3dcYmaEq55(4tH-=4@t zReoxw`zBOXLTn_J3U*}`K9cHG6y<*Hj++(lTCQ118`w*v(I*3FDKbKt8S0hoXfcmm zNkI*o2JxwVOk*%HlN?%(kf6=HH-tN}07(UMDoU&K0Mu1!jQ-|0#M8ySiE(RtesJmb z0UL`?sXf$qF&sDvQgMINL4)@}N*|@&d1awk%D-zF*nw%$N`VdXfNT)1&F9PT0oJ#S zHqU=Q7905eGbmRMY3B&1@qB`WjJ)G(X~@C{@%qfX#e+gx>GIt$y@)men!j%qi|)|z z-0nC3vi;<`Sftplk<04_^1Cw1xD1+@V9p{1dnpSf7%qcxBN_-i0y}C9F8J2E0AcWh zNI|W3KUlsd4!w`pvw{mKO*Srm3ab9Wzi3&_?s0m6!*w$RdW#@lJ6PJve@|<=No8J>4i)G{*z)swg74L8PT8-tesy-6 z(WPW@SC=4Vw^$$VyQZNhmWy5m$Gu?J2-#ufvV_QmgBEwk>ad9hebNChE)Th&*^1bU zZzxhu<5{&DMeT=X16se{hx3EL6WdvE(2a;!Bs6U%$gExgDj)lM9Wl3zB%g9=Kb zR*g8tY3OFeDyn|sHsSd`(mYBJyA&r4?UEfaP8`i<+)&cDXf4pFGM&ntNQOw_yQ}(YCQb(X$hFYe zuhO**?f^6nfXrjL=T{C;j43N(o%!eG%)1nn``#1LI#POV?X&v1$o9 zy*7Pq>_+?Q+Qtg3t^_(c8yJafi83#MMNL|5?2px*t%LIJVe@!FkfzXQXRM5kgw}B7 z#BMMvW@dd#eTF%8OBAv@{#NDk*v|w*60NGlAvc2P-kyeEt~VFeMSuauWx2WiNw(bG zMfc!1AjQC1dH#4XkWbh?+%-wy{3-<`8_pc@$-2JU#RYI4{a`RBPhFB%UYT)?pp|Em zMxPwQdzmA4tWWrGP4N!RYe!Uh@U=g0or+u^rJ{qOg=8jUcfxW;f?~O$ zsYj7&p2>S-Lwt>M(fD4%_br+IrNcYLW%GqVfm%aFc~$rMdI$_urlNiNv>jA`C;0px ziN)bPXLE1dH9KdGGl?Dx->R)&us+>ejsOpvs|Jr`LTo$QddlL!J*S{J7jkoV;xKi<}f^bfYoKa?}---W*IYx2^B!af49F zi@TldO;H=#Homa_-wkqCjG~xW%RmubYTvL;g!rtx>WrXBM$ z?kPZ6U26=zr+=__6FSTjyZhx^hh}OT$pa<9ovl)!9_`6R_7k}`(|(*IM?pCOq=ATu zuz=&38u(%y<#uQ$=F@B~f;zPfYwpc6jjOATHDQZ56&9}Mb~PfKuV1Jf&K=knPo3<) zNRR}|J7h6tJC&8|NQVodrD{`h9n{orV%B!LZWG%ip~NU^l1DQV%Mvm)YxfU_!H@2hze4cAk3j@d8B&iy zLby-s(;ciQtgycH(9~V~DAMMc1l%oPz`g>yvkHH7Hl@yA=S}V*jM|X#nyOQQ9bVNb zqN@3bqhjE$2RT-Z_*71+*<$+~Nk7H(H`KJ?n-mM|-5ah1DLd^jn#q)9c(h6YIgT-; z?5ME-hJrlIrgHd%(z}O1y@~M^1D^Q1X4GTPv#!^BA0riZSaUQ)(&P>OkVt>`RUzHbf*mS2dw=sM`^sc zKs(%*d3{?sRaH2+@O084jB?yi+Zgdc6ql0KDWJmTK0pND^9xVQq2Heso#3f- zSJf%ueHuWN@Y>S9k?g-RHyY>TVc-2;llygR&`#DPxq+`&~2wF}1vg94-_LK4_l+3`#PE0l>ycmGyr?m8*VIYDZ3sE^A|kvV>&GdQ$Oh7yvx2%KUCU#TZUV2Dd%kB<|%ii)=>v>O4k?&3^7c zN9=($>!~x!hXQY3ewgEW`KrEj-q85%#}3!R4$)&w0(c^pV)3KJV!J(2Ey4zImGcj} zGvNDHk|6Qe%ZjvXq*fY4#tECYuW~26g6iDVy?j75Xa4LU+(HU;QTFzzU6E*bWf)_16KiM%W6{<(T!F`&3g;J3w+P& zeG3U|!|^uTmVjg|<@^#w>Cue`sI&abfrsc{st1IlUOM>e7%?8z03>qw?m+mlw}%vM zj}7%?7VZePc}7F4HC}-KP-+Yv!cZVUNf*$e*p2U27tZ{g+K2ZHE{%ze0t0yIv!%1+ z2lTYd#!N8d41G1J_zCMHdOF71;6v0VVu11Q*2=!uZVRWXMu>dDL4MPYW&xUeds?8p z;a;idS?<8i!}UW+ty~tT<>?Ms6yZy6CtWW_&*MN3yu!D- zbUz*4j;h;pezWzJgC27yfcV_^id59cYi(qmCllwlwA2DT(<9He{}!NRe%1_bkTd}y zk0Ho${wu$8Ta^XCeY>$Kx>5j%W(~Ag9F==*4B>AbVh}t;fodywDQp>uO)votcwLX3Qs^C}tb> z={|>XwSNe?eGSm4;nHJEZ?dOe0#>H{VVT0UjDa8hINmo`bsOl%nWDf40s(EFC(Ph= zywPEUBDUJ9wpLa7Zx8Qb)aO_v=@?I3iJ=GVd-quX?TgW^fQ=XdS%hZ_eNj}lX}ap; z6ma>hIr!a~)7C#%dxJ!o=5l0PBp?Y=WCfq$nrAkc9b;L+uAsiUI|4!%p#om4i!fm>Dj+JM2{8$Br8+K zy9~TO@`LmI`RA}8@Gf9uEf`O1Yq#-F9zXu45C7|H|Md#os1)B5pZ#Pk?XHp6=3%Ry z6F&+N|M@4-o$&~8?|qp0NhJYA;AKDi0i&5V_4>jIB?0(h zphKpnxqx2$kB$8Ch-iO$Ei+O#C!|T5T6j0&pCkKooB!!OGqqEbjUGjg*y{V^Wd|nX z#l~W(GPHRy$PdY#3q+MN^Zq|N@UKDr-$O3vWxUblO-zZe+)K>a7(gDpj;(00*SaVv z17-HJ1}x9tZB$9;>=nV!mUZ%;iLPe~@9i9o=Y0d$d`R*lE!^ALu*w0ts@mXPPWZd+ z0zGkr2kKh&+}}#6(ad+dzT>oYbjN1Ihd#CLz}_tK7upkeSg2uGpi>p;IMeue|4CJN zn{^7~-ySf~vD4v7WA@THelL3}N5+vXj$%oV+l){CS%?4POq)^U$IS% zcGYK2|AO!UpIU&?RZ1rDa(RQm_hg}88N%GrIcrC;J7!X>fOqis=kKT%;IUDwstGd=y4gP}mTP6b9 zZ(MquE49j@<{{57i0@5;l5@wxFo0SmWBtRwu!f^e=o^EyCfY+=ag{{D+?ag#Q0{a$4JhtJNPKMO8)6D9f*CuczM+ z_jS*s@6@k|e(Q%1Lc5k3^b4v(*rw+ORL9Zok64Ho?qYcR7xd>u*Ihax3pcbR(39Do z_geqflj=s-L;pMr;GYKjzZt-P`{48$I-&B0^8kQb`B$a=TVwzBz5lfp%N)QU8a6!5 zKm7}~NLBrZK~(Qp$gCMReEJL0sMHVul{ZxXQQn*|7P0)l_VfSofzrv7&*@Wp-&Qod zE_Oc4@%Pm#kZ>B{e>_V$?O*;iq5ZitN^ej7h`~yn&z}9ICl9v$0K^9sg|ZQ~);4Df z&iukW93dkt3Q2>lOc>&o#Tiwhu%C1p5Y#jEoE0ya(R98L>I&38>UXO)^J9Q%bq-2M@hzJH?m z=+rO#nAhk}zJW(4NH;?ur1m(hBmN63>P^rOd;+Ws=gWAMG!5YSKWFaN@NJVXz!J;asjMcC>c{uPkB!uS)X zx&Y#H{A|3+|Dde@A&4+zKGTJoG)`$-;B}`}KKZ+=5m>ONj|NV+Sp!SqO}y&mUt0=4 zmx1B0ErUx+9Sc8L$!mYxk^r(V1mFa`oMP4RE83$1gye1wl7D!JFY~dF&iu+6@)Y>t zBfN|6&irWzPqPIgys-Bp<@O{B9#_3;-sx|ubRAm`e|$?Z4^i^Ws8bk#8P_u>P$mObiPLVhg9yaQMC!UbJ# zcTw*((jujorzss`8t#dxQBK69q6X!wv8P_E8l!dAq!6-mjWy)@6e z*Up*$rpe|gV1w{11iwtK!UH?KavL+2{(}boNoNB;ioOd(&G#fEJbUcM?Ib!K7PB+O zKb&2W2HUvLBD9ytvTtl&>A2tMnC!Q)bbQCHJ(4G2vccczsyRKx$$NX9rSjS52h{7P z`>QsR$qKTo7rdsz5=GvsmpS8MY$Gma?M$v8WNlvWmprLxCRCF8E3JCZ8rJwk_(}Q$ zN3wQcB)#@Q=7J`+okNE<-1=`mULG$H^`H#A1?up6b2U?~hS)vkLDax>{I`L8Q<%Iq#Cx{n7ENj>OC_{ru5UyuK`%A^${4xy+GX{G=YmaN z%aE2s6veJMibC9kAkv8*anSRLkaU;sSTm`Il({mU(4=-tAQl3ffrW2^6!Ulso^hSq zX|5WGPB)RO*Bd6T_cy84|4pHO0nD7ytG0y~MeCWQfw!f+!H0+K1r|ry72>G;=VL31 zQ`eUtJ)~iMmJF{xF3bi~$0xjEc5pB}tUoGJgGq)HS5-glL(+6545?izDP)qx9SlD4 zw*D4&KjawIs#je+#=q#Xn;QQCL1o9KTbDv2ui8E=D?QFPsC_L?Jkfnz51S$Ab(~iW z?lgT)YPwarP`&poY6b=rF~o+$cfBcY=7cNy#Yy4KcrDrb^+4~!1M(;(=iuFY?_D*b zOP`L~O`=|vm5}f;PJi(k=T=c1|FwJiAy;r`EU5#vwLSx1=v+s?hBT%b)V1ie(wyH@ zWgn=)w7?HqB(FQem}G2>c)EX2M07+pJ9={M07-$rWai2(j@$&a@rYw&lgtQ<%18?^l(S=$ExdRX2VEZXT=*My`{aHogfbcaIlG2q@;63`U43GNe|OL|6{{ zs6lEqb4Fj5pIl_oGs=d{Ah+T4}w=6q_MZ|GBztvXX2I{@L!#S@I8wifg5nuqf z^j55tHuYo+b>9Dcbbbaq)oPl&SL+9*Pg0MYRF72Hldy9&W1#pF7iZwn%?DpQosqnv zPf8t=B{vAO?CRYym#3{ZJuT(R8+>Y7toD+0DygeY-ZmF`_NN3gG%~Kj_ZyVCrSpYq z$R#I9E!O?5_475A_FE}Q`{@1m0+@ryas4WP6TAWKh$EYjI+Y#Y@t5%sWBQNSqJCq) zZ6Qh#AvQyuUOo;SX>z^5Fst$WX!iT=M(^ZDS4_pMxyzc@6Q;c>uajU{_6Y9L#9d~B zc>J};aY)dCXs?FEW}aZQ3wmR^&2FuHf7QLS#I!6jPNYKgA*rZ=r1@peRExI4|FCoP zeGp6Zp5_8f-t?@v;c^CoB+C(q@;xIiCQBWYR8ASoxf@}>L0yA)Wv;=a*ZLG$nJ9@g z`|)U@PI+KQaIsLCn!E~}MdY@l}(z_^;iO~f)#Nmo}iPvj#PE2I3)UT8p3 zg)^-V;yX)t)9HO+|4Gby259^gb2D8|At<&GKSQgcBj|t>9^9ewwz=jN~B8YyATz=F#|-r#0?K)5=Yu z1{=#&Iini;8ruXL_|Us0izqo=a_LjFU1HIA@mv10;%^5OGPpo@JO!u9kZhp}I8Qfr z*_I&p?nWYm!*-lKivw;IWv^gLfn%fsk5Z&l53H)m)Wu{tD2hK!d$W@tH&3Sw$gtPiLWdkq)7 z_!IKkc29Jehn%&U;R4&PO6O#zv1(y_w32kd6>%CbcBLQvZ z7y074*EAxM{eT09fh(~4J*aj9t|89y^I_shPbxZ6(&|BP@4zkRpu{?>ffoNR`Hc=E zy)z^-R^_a3k-oPR)LZJQK#g!- z9M)@0sWk=LD0*$f7?^wGyz*o-+E>-x=YUyJUvuVFm1^q{UKyud@ON&xV-36BNt4r{ zvXAIbbNR>(I~6TGeYHNgycB$x`-yYdY9iiGdSGX4eN5V#bj-hzrjE;(~9iKQag;eR^mIw5Ca&B<9c=Wvw+C**_{`Re*}5j?C0mmPcu<*vP0DZJnAf04#qx$7ZyWVGp0X%VK1Q2i z6rAKZg%f$QNbXgkdI%-Fv*3&!+U~B%nsy?wbF|HTE0htGTE*_1o%ACwiSpKbaaUUL z>Da>grF}dOK0gj#J>|skbdMT!>tYfgwr#)M=`z-R1W=|YueJt#ayk5N`Su%R8kTXy zU!X+BB|`k$X>y8rC77Nr^WYqpEGK33YWTs*AhX&H-xg72V8=ige0xw?E5QX^daxFb zc6-)JVU`Jz#b#sfe(Y-(>zveSCpo5)!D~37bRT^_zfX*zzQ7)j-)5{xeUVy!(dz*+ zVk!WQe}9rjr`!z$YiMtqzFWU(FY%N?;nB>-1Z zo3P2|S1d}@zF@G=3NIQVF)yRN7fo4}MjNT>z%@#9H1@i~R#XO#U?v%ZE2dhI8-~#> zq$l-@zJ|@4LKZp(H=c|b$$73_l(P2!y1BtD;~O&4*NfGtBZFCCt%aJ*hE%E?ho9ke zacn4A7+=N|POXybJRUuFC$4SbEoJ&@j(S3zzr!O=)TCW+<$SLdZC4QyH3Q`n=rt%V zH=SQGEomQ}5bPLLwYdqzJ(9#(?fEcSHz2@yS!p-2P`_pC`O(F7Y@Sm+^pvkGmmg6H zIN75v)ngZ);lIkXAT7Usv!Mk#kaRqU+mA|P56dwRX2`aV;PyP#-`G^@&$+XdXo`R( z+agy#akv|M8y;)epDLP$hD^I5n>VK{g|bSSLZ@Q7!%-jz9AWG;wBtFvK`QIb-kC3% z8CY_^Bvm;w1o%=1Ao)$$y}XeYJ>Q1!(^AAsh+69W1j!OKlFkjXKQWSigIaEVVLV2Q z8a1px{l@!;ZBiDMrg}VKFM-H?==5}PH?2tQ&Rj)Rm*MPc-B4^$b(mI@8lLRBFN>K9 zK;YL_#y?wD*U`410KLgm>Tp?9yrm0VID%NbnPck1klM}%9k#n@j(_h_V&DbmqI@|* zbbqsIngK!=8>DV|btyCVTvv1I?P>vnySd_H;+&{QC6@`FZ~H4{rM@ZtbtQWOl!aUp z7C->|m-;4|H3U0+Jf^SuR54Jis!EM(p5D0mgkiXP@dY~;JjxnA-e$u3i0VXq8Y)?; z1%?1O2gKqMDztAohqq&l> z>JCZWiwx%`1`U*g;ZRL>e5b^T(<{r250g4)vPtaY-2 zMJ?6a*YncRht(c~GTWaGOuNB)iqRB}=q9Fb)ZVWld?FWLvAO%gt%8g(?tQG*HS*F& zNI7}@u@B;}O+hxgEI2f%zCAm|ICcUKoXdOb4>ycm7H5-5Ym|x{dC%37;k{>0`9ROQ z>OAs3HTHUoPVnwdLd8yPs&Cg-P3`9nn?phw-I}0IYkx1K#?Z-ud;A(NJcrdcWLCJv9_tmdRKlc zElic+hSUAA{jAzE;EWgW;OfV`;`upZ2B)LkgQB(q10i+Ca)b>|%9L_1h~X+mYP|b{?NE%L(T# zrk>nnWvO*{#_0nr(g~{0t7$1+Yt+s#i)Bb%TN!AVVq&o|8a_~QyMO3RZzCMl_V-KLYCz`Cd#pNk_&;p)Az-%qZ^Hk$2w@myq7i3PSu%uV^Jza z$#nxxE;&*ff>Y4GK0kgY^2)sBoiVIYt>bm}^X{I(IGWRySgusHk`e^ihY`>AaE10BqRW;rxWd8&|Y z#q^a*f@fp*{%+p<);*!^?RBAy_>c~T)&9aM?Bn?z7K@pIsS5dAlM%}__-@6}svii7 z6-k85=mYy{rippU8$#i{mw}kgm3o-Paa^OTOhNGjuUg$_8Q!Mu2@=wS8ZWBXk?h$} zX*CT4_Ey%}1VvhQ^6*}^<+(56o#$2QUEQ>BEq1B(CAg6<%mcaG9;H4FTbGlpFtL1w z{x%i?`%6s9kL<1G(;pD3GQ#ReX1>-6*#B6Ub(jrXY`V`Qa;YvFk;)WA+armu+7NFN zqMei@cXx(Z2D;j3)1$A<%8ht0DhvfYF?yvaF=grMkA}JL+cRmQJ{5c@1<=(?>f~hc zhZhm-j%JdP`ksPWtsUaQ5=Ui@ic7B1r?X zdH{3)TXiODCPZUm_0$E!z1zs1`!dMpnpAU{g|I{Xl^4nzFcA~#BvQZDQ%tW>UF;gC z5PMt|$KaO#oOwu>9ZD<4;$W>SNu${F92MB^TO#Sc&9E_C1f7Hg($fvx%qiGh8AI`^ zffX{6LFzSiw3X&a@cen9Z7lzJ?p#_&-!$pHOV;kj;OnLtg+|9atGr9F{(-iI4-C6K z>z(I`{p!IT%+s=cv^1>{ob8uE-?=SN-dwWEsDCKpowXO#QZi~{23Z>?lRQF^#8g_l zlC9s3o70kK^bX6YISJRo*7uoM3CHc15NhY5-04%LW9t+)zl!dApvfJnbG;3w;NZE5 zY7d%yu8Yxvf0a^c!}}`Q;{I5O<)%58Q4pidO0XjL3#fWCv%n^JCeapLaTz%IsIij= z0U4BehDWU4ioAhNTI2O=;+>%t;TcE^{=%jy;S_$qO~oN*aPwsjjA~==0zz@-FDr_p z_74x-m`hTo^Nu#C+4y{`}lnI%m=V<00=i&j|S8Cyb zHp)7i6(~L|y)%K8B!$Up#b+_fI6SvS@|k4&Z29Yo!GIliD8FK^Xj)DvO9Jfqwu$rF zys&Eb8WAN>&^Yq z9up=l!CgI%i{{h9j2dY{jJ+l<_ucGpxiw&|LAF$)91x*lZ%OC-67s(~4;}#Zy^zNC&md>C1#ZD zlX1C;&|HOZ{D=h3as;N6Mfh3kkzsw0y>|(6jz6KM%noO`fHHkoJ~nyU)`%Wh6>B9Qa5rca&q$pfle3UpPycal3Ta-|kaziv#;)gfSdkkl&x zjkO9pz)aO_3@9EnX4GUod%4kX!^$_du>fA{$-f@;0GN0=VB+~`OkE-T@IuqiaT7xF zmx0`Y3)Pze`w<9+?@lSxs9pVFXfLCa&3B+Oae*aLgi# zV3`41i{MQAatHi9UyUzzU0dEpabd>@3E{PS-d1b&VePDFjCgAwDv0}1IU2giDl(Fs z)AJ-X?{+I+a{nYD)vvE}IOjpPCUCvh$ZS-^68x)GiHcOXJ zOh*motkZ$R#J(8(fD|>dQYgc#S{JKPZ3<>z!v%ALVst2MF215ht;W`<B#>{H z>$UZ@A0&NL#f35PdlF}Gj_nN|HMl$yrKk64*VY7NJ`(WQt!0<`j#>65h&n4mLztdr zz{yLwCeUM7U_+Kk<8s>(&6rXzZXhn>j+0&eaMD?z-m7tnhJVF+oqCg)=xpXTTANAI zcoYP^nAJ z7Qut~0H_U4Iv;u7ZS>@7@r`5nV`a@3Ob4iUW%jlb&$~>1ko4VMOQ`KcJn@{Ob@pgKdVnZR zUKg4_Nzx8eEZ^Df2Q?@U?6z2!`>&xAp9R%O@lG1IOks6v9-Kj#e3Zl&HDoGRcJ}~O z<5|{@I;)Oi@s0XruX$Ae*BqOS*M_;JEzWNNKlAxrR={l`WY5l#TU!XlmQ!jO_Xdod zy%h77yHuzVAa*z=45U#B6H0e!b)2rhiWaz!#3%}>rJr+y_erpa^9ggLBTkPDz;3Q4 zWm$FwI0dHvi1lvB7d0%JB6Tu>DW%3VQYR8R@8WEVF7Cej5NyF8Kae{RVY|^*985wyw3mO>^sAn+P1Y76+||I4FrUsAfO0{ z^bRT@QmynNy@cLdBC-*Yrl9mDRiuM-0w_&N=sl1~3!Ml7LQC?k*v>xZ-sfEN{aco- zx#pOok9Uj-cq7LylS*RBt>`L(0!1Qj!i;WlXjO@n6c!wa+ikh9*jrwSaBPIj%*|%E z)u!4vMj@Kq-duU0XFL3!KcVklCtWD?ES#PN@M5l2@LlMsK?Yv-rF8E^YoO=N%jmmsCP)-ueM*#n_c47t4al|HBh8^ zNJ<5I?%c>yZJ(iw45L2oYrQhFu@l`R*_1TA!nugbsg#YjFd!+P*IZ7+t zR*=fV3)-DRk>N4^@VyenBPk-^#u%VqbGJ3mjY`iVUvEUU*VMu~)AaYhsI;-pbPuy! zSlAY3-G5kc=}hW{`PIvOMKA>hNW^L@;<{nC9jlRTBb90n zO$8C}s2zH5Dd^~HCKfLSRp_#2Hj$k!l;X@8u6H0x7{jAIl3frLC?3GCc;g=;*Yr_N zryOYKll(-iU8~8{Z+jR3Cv1bt3#x!f?^I^NlC=U~hR@knz?#>|%?+st?z`FN!S+yV z=1&BG+U3MxHB4A)swz(8t0ON^2*xQOA}!BWw#luoI)Z4IhTaA%O<=M8wXE>J;sXA&aD~ zme~7QcQ12)!3|n+xBgWI)bv~|5E1EPmF3pMPvA*5p_JBnO>L=u=%bk^uO@dOO^=+W z1G5Rby`oN$`oem9UU2Dk+icrq)wz*5-U;vOBeM^bL(%gp^DN`dYKDfXDLYZRj&dQC zD`K`f#x?F>ZlFTo5ecRAi>(CI?IU~qumuW|Q-&Y6NoAHVxO`@HB)K@}7UFNp94_8RSG8kW!<`I0Fx4?kTXnO*1y(T&xb+Vt z#(y=)X@`yhsH4)Xtnd9cG{tPX5_Z67lis@#=S7rK;&)6_K8ciJtF2NP2B1yGEVGev z!--){=ge+A;x{V2U08*t1d8JmY2!RIdq=iRuhP#q8FT5c0fmDv=^%((!xoijtug09 zZ)105b{+Q-^WborIfPKt7eeJ( znf!^x(uWsP`ZnAOjfpvKtCPm{dp;@qljmZEBdP1%8}9T^(Sg@&DT2xkTX!Lh1(Dji$U0}Ec5o$f`I_`B#7Ei zVD9LB--arSUV3k~zZG*B3 zMt58HSw0p}k}Rsuk;E;9zkS#0WYC9c8*s6%4KYVpM4f!801jx}2P(56h?t3xT}iM> z*>;3WUEg{jDLwR!s%cqTg#Dma5v3Y#JCq63$ID~&yJ53e`+~20ygdo{VTn>^l7EbX z;*k%S2!nHRg*T$>>@H3=TGEcP3U3;DJC3(`b6f6Nb9EjT!+klkunf{Uatz_Na+P`0 zM)i#|KEP)t6=I}kcdf9mKvpR$LD^-3#~vhFgaE!!L4Vp2+>8V7SFTm22pCdA&+ zHuKLyXJD}8HHb`Y2UCt{=Ku=imuEjEd*+Y<5CmfXT3!_-sQYYoI{V zo~{-vDzW#XX`;O+0h{#^)8C0t%D;uUX>2yS^7km!q3Q6lA#+KP-3`-7Aau2^=s2AS)!0tHqk2&%3mT-fI9ey54PuB3p?X>)CMSuD6RA42f**< z?PC#LB`ZA9t0~qfmK6@rAV(@4@f&1An{*s#cv>k)0yI;-x_)#bqnp>gOdDP zbMIHEfh!DX!yr?D@(;;LJp(_#BJiXwobkPOJy>^fbgd@tuh&3r;+3?cOnrSpCS=z~8D~{hR$1yibLO*$OMPukaB|!v? z(BhM4FUxm?(rqahD}Y3oJJ+|>XRbYO3_T}@STWIyLbI~bSA5r_vK-`~!ITCSbt|S4 z<%=PuZEUss4KeUUvCi57*{CNRO6_}cGeyd9HQzky1*u{MnKApy$q5^4kEJQPuo&)E zY4&rxZ#?%?hYno&o@zSg)GLq$Q@wXDvjE=&6yi!!;FG6=IQuWwd>(@2@2bV3iwGut_Gn; zl%%u~SW4K55JB)yda5}|wF&Zy_1kQ>+FpJG6%pSI8%R(IrRi2VZ(SI$O&8|YXjN9WZqt2zx7`c^+aNVl)gjsd&~WxXXGy9wP=U>vs3Qf24xpCcky%wv)hZXcJi>%oorA|xKPEHS7Ny{tcKEi zHuz-I-?PPBvA)n>J6?*&Vs&tkR3Q{dBnmKsgezF;tcqkv-ooOv3{MK#xF0ubSZh7 z&VGCGE>Hid;zO5Syy1Q3cJyXA`wN~s%JN$D{2hv~85U(|DX!4l9_NggnqNtF_AIN7 zpG-~$d(+W%Nf39JeNPHWNum6t>ib;pyU3*qtw)^~0!HYezhD?^YIY+%n{DRRu3f0a zZE>L#1aWti+eV)ph?XECq?$kEwPF$LZPSpB_3)@!uX|0)V_}=OG!U8`RhGbD2)v3jRx_Y^q+=)sizlA#`kfyrR0X zHz(Cwmh~$2W40TalF0UB%-l0#T@?CV2IKd+snc1jZPod|*(-D<$Vkzm?N#@myFUmt z(9%JvrN+rvIdCdWwKxP)(RNuF8gV>k6THUwLQ)*vL%eR;lYE8ZjKr;@hpH7o0 z^Z@f~`BukC`UA7M(nA+{bd~u(i$xupwt&&XC#muxSw;f?qSyo@6fvEDJ5`}*j1tdHHYt4h;)bR&9p4<=~m z(MLoHgHu-+U5S-*MmFQb{)o_O?bu;ug5MSC4S)Bcwpnk-97nKroRMg$^Rm4szjFh( zG3A~5v#(H5@{=fI&j*C!_igq5`^_(&G?{*FJU6{HAu~J?>#w)`{$Y_=!6XMOD~EUY zozgfuHenv4#9iRZ0<*_`MW3>Ei0S3!<(A7A{`*1xIP8G#v#(03h1_fJpbCyX_jr9J zfii&M*hB+gzqmr$3Pr53n*;woq3UxLoy>u?0X~>zrhLRQd#p;XVb^M=Qy)_AB)ePM z#kTKX`G5jT6F7SD|70N2x@m34IRoHBkIuc$%`NEMj(enghvRU-yKwOfZbFA~j$*YY z*@%o|6?^>*&i$H^{667g6E40<-fPDcN3>u69}N0)Ur0WnD@CA+Kyl0Wlj7PZRX5`H z*HAl~Ne%Ys_U4@;DBnpLr_}SpP9~vP^jbDi6MC|ATIX41!!rwyA-GIc<#I`*cx2B^|)j& zPEgQeW#PQEbsE|Fz%W%Hg9nfN|gMuu=Z~vOju|Z$AnoIhe8{{+Cz& ze)s?KU|j^%q5r>@>u*Fab$l5d9JWv$&LekO_Fl@v0dV`q`)~h<;eHESK=(Fut{ zpUY$(2rY$*iL3h~MRQx*YWaPTd*_dY^f#}|TTy2MD*EjWQBQ_q2=)IWm;YMge{4yoM&7^G z5=c75-Vgl!#y@%p&^U94A~ESO1q)+2u`gDQsHNwoHk!C$cF1P-=eB zGnpIcM8tx()X1dMYYH0^*Pl|WSaC1>kGA=bZ@YZ%eb1-?(9)jyL6Te@u1>K9Trqe| zv{FUgf=ubVdlj&cDiZEWWNz0NP~r}e38G}O|C@*G)ZoIx4=>cz?F}n3WtPLprIX?b zpsS4YjV+lAyQPps-SCQ;>-?$Zc=8qv$f6#!0v7@UrG$Cr$rOPF>43dPOQu9AO`RSx zKl$1tz`kak&LxlcpUPX&d>Ne>`@Bb{MgxR~A~E)ECfNeRN7CQgO4->J{D`r@#Z>Z$ zdzb=FeH`qdp~&8UZ#P;0& z%3b@+lvpcJ5P#;sYVrS}k+kwjGJM(0wZv%Y&ra>;^KZ$&W&!*bqh4x&(WMSQiY4>& zKdJ+|i%rk)+;`4FBz#c+A7A);jsNu_p#M0ayO@<>zbW!(550#;PFcHLwT=PO?3YLa=f?Z<#P zN+S0pnPdW--hU`SeU>~j*F6+)pTx!|6#f5UM{K;{ch;d_gr91rgAcw z>P0>+90*#-&*F?P!WNPq30jIx5QGo5aq&Z3E+4i0c z*b9TdrAS0P$)P6GtY1D2WVUa=s`;~&^lzq3!;`oPrJ+hLH*vl?3WP(CZj<66{d8IP zTfblVXAc3Q)Ij{k#sBt904eQnnFzVs8o_kVlQ35#=-X{@ojheJ$@!DHCoZ-a4eta8 zvz@P9xh`wCr3~^vJL5SigofcYe55mLuAW3jDTbwxL4#S*ifz+?souHPLX05%u= zAfgm5w(sWm1lyyOt;tm}=9A~OFx564={a%E6Xgk&c)mSnyb(Pq zukTam!yk$riZwP67qb}uxHF8TIv24@9-~-m{4n)saL0X*c5ka@%H^UF>wHGzl+(T| zxJe}obvmU;W+va2ng{Q^mcp2vhcHFF7v?=*7Q_}Acg>9D5Uzhl5#l#k(3O*|Vwh8$gaNsO6KHF-OvpO9G*y7z>i!aF`+L&k<*k#W zIb%!)>j=AP|s~77xyA>nC=9or#o- zn2XUbyq?HZ1P@E*UeO13`4@{M=VOqRp^rt4XKOxS?vke%}neTxObF-r91P zw?OCp{_KOE)_suQE4IqBqZ2ursiW^5@NODMC^sYHg-|b`vIlqSa1j66n^x7y;bQrr zbFoF0Uo1J;yjRwBK3Y3BT`lROTvj9!@H7^Y(vlJk{CW?qeVgA=DM#r_HjpYj(~I*d znslJV19QL4KIPp9i!)z81x`1J?yKeB7rNvgheao@-{m*kmLqQ68F$R+pgp;_waSvw z7WUHFtS#G4zZfV=?rgjPia*-ZL-9q*qPkDXN-2<1oj|$3f>bTIZNmRH&+QzE!$%C9 zRo*ob@$(JSfw=j$$H287S{0a!axZdHbLm>)jB5w*)tPTiCp$o*`(8aTT-z*nD*p$h zrlzFG>dXjZDx+OdwbHu=4B=aw^Ugb}R_5|N#*T>n-CSp!F(y*AM^0#ISie#W(u<1> zYuA)cW)lOtSi07^+odqUTh3<9A1l30+OF7naU45*VtF>LuT7M3U zh8c=$csSG&md@Wde8hurn5P!BT1#{ufscrusNsKTU0jAhQne1+4A5Nm_4AD#^^A0! zr^t#Gcr)i2B^%kvBtyV(V-0GVH=H!f#aD{+61diFRnK9^ig3aOSy;byGb2vu4ht6O4i0s?g(osw4G@D}eb zTBSz1j~JEpjjmFIQ$&0BGuF+0I+KqSayj8p&Hjv+3rC6tlTvU-TI;swkDb(xZm?JRHP z`FlLXota1)-51d-Jpy$*ckgZS?+v}R0jUd#GDhrkw%~ zr?~N(ZLizSPi#d!U5ho2KfPR3=@G^x@ zUvxF2X{y0vl(`f!-?*8J#W7CqZRc4(HR+9=+0V`ps{wV{Q}ZbV?7==H+m3R6RsYKpT%~7-qeFvzh9uFyBi?y_5~|)Qd3rPntLp2=bnc`J`1jA@reo~HLPXp&#Yht zBF<6%^`h3Hx`JLD|LK6vba~Y?JSdbQ%RM@6X%F1Jr7$4?yCDK`nQjk)tdM17uU+|B zUaaF(pMQJ7r?{mP_QjBQE0rS#AVvcp zZy$q*Ml?y#xeT2=o*dW=STz^=vT6YegmV5_HPLU`Jt(YO#d47|aQo?D{EPs!uKty- zRQr4X;a$d7R?o>ZAV^i(Jb-jU0bpmf#Nqm@{esoA$w!a`^ld3!FHRYz&#d4VW`VUe;iE7 zpOYyua_`f{pi<6Pd*3o5H6uGujn=`nE5c?XWYe!hO0h-Cgy=pA==6is3GoJ&F|cUG zPRI6Q`OeU>`DUNx{72%kE7x?s-QygqO!e6w`&gvRmR#5DZa3I?m>l0&UV${uK3!T0 zAh(ghW~UG{tp_5NFtM<+Y`lLrV+O>!k6Oo35y#%=k3O9ush=a+_GnEL+4p5K=P6z=2WMuAU9PP^|exwq=dePG$Hbnz`JW z5TDZw*yT~v0dFi)L+BB4uX5z$pF=Ia^J#z&DWtvIiY}d`liMS_D{0!X9~VX%dZJjk zQ~Jfv*Zlri+$&J?K#w$PMH+E>^Z@CLX^X8yd51t`%NDt3k{$p3HYlv>e4}fta)N&ox!NT|y;8r? zA0p^C+Iw@XCDxEh+sNPE#}9rkwt3+m2hBXheQwBEcMWw=(SlBFv5}MFhsMeB^A+!Y zbmeM=L+dhnEoZ+xnxWGsaEERrI>JK@w?_q##IJ!jlg+IFkCNmDazwY!Qgq6NS*7@1 zq@=wNXX(_&Yw5SW67%8Z9Fz_M72VvVaP9HiL*(WeK z!V?*>NC>x#HW=3Z7g&SKrOnW@?FgOhS->#8WNmd!>~yMAl59ADMjW(rhdeJJ|N%ciMTb@6c0Dv4; z|JUhw$F7%vH_>)3Hi^+Le7{GU01RkYM>U8Gq3d@aCphowOJV3q8sJ^zulQ?cnjtmx zfrfO2maK30#OVB2jPhxN6tIfc_E4di-~rlp+%yw-?1{X3w0k0L_<5I+it6PQ!4^LMMto_P+$Y z&Ml@0nzsaK&XY#Bk}XN8;_Afo9=!pmvq7G;swa~%+liE-Ajqgf z30ADqvBF2zcY`@EGK8i_Lgul7vr~qTka6m{w{Lk{P8^{WzA8tU9hert9cXSYBg7nP z&>iO`Gi#h6Q=fyXfm^TIo*e_V03j+%`&PFj^kBfyxr1pQO-Ei5N|*ykIM5XozWkp4 zjmN&c%h@a3iTSJ;I&FuV-~-ei-cor11>LQ}X4{!HJ|zTU<>Y3Ps_>f7D;GexnI^Nu zg37Bv5X?*|%5BjyxSZdJCfV3!$1aCAG7@q7@T*?b>@i;m(PkhqSXhzu%`x-{?2IMzd%ZJ^Slje^_CG(*R(tpKw}HW5p|E{`{z)4M>Hg*nQVOftI_0@$?VyqPU#K zOqRTp^@;vj_e8qE3&=y{%g3GpP!4naA=H0{9s70s3;zE=_QD)@ACp=5l-DM?A0%Wi zufbog^A49YV(NhXP_0b>k1pe*(ykC6=(07m`| z?d4Vba)CUxNDmPDhA6Jy&Y^|853Wff6{bnh-q_k3T;w@vX^cOiy%V8@)mh}3^97e5 ztuvn0G-fx+`(&SJ5{(ya^?AwDP5dNt%m}-lO&*pbO^gIAQrAZ4k*Ol?eOD1TzUQC# zC6x!r?OF&X>BK3<(GBug!Ho)VeoR_Y;6oWy@-H2C9-0%JEai)0%0J#$o zpAJ0+hV!VC*bmn!Hi4{%5B=w%-Tw(hnxy+*w&?Q=$j{?Vo3COu^K$}4Q0m{l7~k$NOhiSy_bi4*?!lhI)SldP) zCs;oBNFqyjb-L}He9;=w8`aP#fdrT&{_XRe|54QEJI;w2$;-QX>A?g2*!_3*rRqIot3?{l6Mo37IH>{7k+-&4#Nmp0wBe9g39 zy~pIA)cMgH*ZwRK;p{IAlxyT2Pc6uq1nHO+)J%9@783Md3VoK~+Zj};@q>I-j|4bzg2ds8?d zVW>3|y5YB$BxZ^ZyCP%nJ^ZUM&QmRKRiE0UIoxCIO&$5~ESo0&JxDTfb=I@}E^q<& zY4WRF%E-#@cV0{G6+FRakr6w33zG#!1$#rRpf(sogfave@d zBF5q`5+N2WDQ1R|*Z1ho(+kMrZu^N7106(t2Yy8dZ-8l#s2*-!=Hkzdn?<(QcD^-v zO^vnB8E2mQcf%zw_Pz9a6|icSBXBp;Kc?jz2N#=}w?gxDRw5c~M9PiZ7- z_Bsc+_|Y?;%e#%HQAU@F^t@b=$?@>(_aXf*fQ?%#W+a0x$5P=3<>eh2ct(0O=a_TE zj8*OaEy*2_!ttsulI@b2Yfp9B_7jj`5+Edz&R93uZGz#Z(tRcPV6(dh4 z)f<3oEo%o{nNk=OC-yQ9dViFe@ot)M>`A&iAh(>*ZzLPu&cNIxo zcQ;_Wj&18MPw20lz`?JD_^4GN0Ent`Dk+Q9oh zUh5sEx~~KpCSsKv=sl@(_4NsWS_?xI_TDl}Ls|^K8YOn10WSE1hlS1t zI{!va_TDo`qi}r`iN2rTexxM*6QK2hEzVg8*#LjKc2R4E>dmd^g%@~>B(ILt%f8>0 zW3`ys8p&>%7%L*?ypNYDVu7dekZf+iGx-wW7%4q|ofga>%|K4>tzD84M1t?%TkL#Z z$ZyD!wpkq)^BoL7sR?WoS8HhrM5+Pkqc3-#x_H5D1;IO4tI_I#30N6Rvu&25mi!T` zS(+HH&6<*>RI}-wuj5lgjpMq!`Ln!XoJ z-?>uwlr?_qy^2Nf_xF(a|K9hLjZpoMjwis#ZpixFAvx(5-h%r;xgoS{OsCFk{eF%h z1X0xL0+bqrdK&08Wv~zSRgsYw=N{`^1TZvxUOOf2;m>Ufx=T}_ z86lFS9Wm1LSUeqnY&?2N7MrV?owtXD5u;_5Jp(Q$kXj;rg5>Q$S`in7_^)2g<4D|( zZ;0aqMo1vA$gsr+Mmp&}SB%AC77P?P0z@8z&Pe&Y?t4wO#7+Y>RY$i%4l29BLN=Cy z1_yIj;)EX})wC}?tATFii&yewg=VTOsg@Yp$Dti+=U69_t&wQ7`HIyB(a+8Yn_tOa z9_yEuypK#qkH2=eMMzK-z}VcoU&!Q)jE|R;tR>jB0$_vIgI(;*2fM<^NT7N?f329M zs2OW2R_~Rq0kD7r`%fVeV;3XI?mtel_+h}T|K=;84^cJfsznf|))Sx@7#6-Xta;?` zwlF4EkpD;a6kkG?*Y+;s%$mXV@qr#$)XFQO>pZ;VV575b2wHWK%g9f+%w00C*m7>l zc}gOurGrE1?DL(b@XBV1GXhsG2mn-0fRpg;lSc-o+fw{)I1mSn6CPjL z+j*(l9a`R6@OoHoHdwhWBbsRFh8VuOwqB4tQ@ao4F4)Mj_js`jlz8>FqEb|!eegs) z@f)|YL)tUBVd1i@R!M$0$7f60HJd%5J2UBto|*Qlfgd+hfZGtPRIv+pygd`_l!tTe z=Z}Bf_8D|)Ru>x+uit195WaTRmWba43Gc~h-3zZ#$nwXH%WQYz5l;IZJBHK1tq68H z4~OyHUful56nDjS93e&%00>c8ea*AvfmPRwJP6c~`jM}Ows!^&36<+^nAKLr_mAet zRy%|7Spu1d8tT49d13H$J|~~Q1t`rQUMdi2LW8~L6JCA-ARMaq6ae3+E=Ec?1=cL|!op$XBqg@_W_e~MQ ztnRcmMK%Mo$avPOafHv-s(EGC=8fYrS%LF&yBH8b`M#ibJ7FEw#&!;vymL3>5jEJM zd5R>{$^2S{bUadfHGPZp_7}biB|>g=OKfF+e6-O5pQTBH+miqTYNPf!n}aCOfz`pn zDAA!%&u@Dm^<^349(1iPl&b|(RGT~I({h%uH;wHVNau2oHkstKt~{%+d?Ybi;6}KS zeYuwqE@dTQ8BtL`>%TxkdKi93&*4T`*_OXWF;jALe?qCydqG%$Su88UgyqGlRHBbD z35a`;yIE3R5q`B|DlH3_l0xc8Vsd&-!!@!Sm@uZP9MvZ=mawk~Rt5EE(@L((;rF(~eH|Ju z!Wr;84XI(bBE@||Rv23bbsu9fx0wxr_0n!puu^NOQe>PQwF2fILZera`=A#B4(ru! z#wM;EU*$*wohV-|o3`?0S!wM0`1M{AMz5#x;9`uvL-j-~m;Ol|0z4CIW?*kWj-|rR zw+$mOr`s8CCOXg_N#kDeb*kI!7|ee3Qnrv)O46h2*j)Wi2e0!CpXb?02b*(J%%grt zCT`^@8>iO1qLyyoL8`fH_FV_N-N&PrfujD(X85Wp3Dr}>;usE~8fJq-X@`x0yXY`s zn>)=9x?j|;)mJ+4kJ%L1XS}o*w_cR7vNG%uRV&bPw;3tsg_mEqk(yLW8y(4*^=~?f zaI9^vA;GJgo}X0dj23XaX1igKXntD;{daV%^++ zxx1wUUSbAzFa;2re`svCW8d}lho5 z#vgFC($133b${R3Arxpr$t=3eo$TRJcf;Kl0N3#L+W;4qw+!`>8f;%O@{CZuVfCKm zfr!i%coTFRUZ{&AuSZ==(}$Xk0A|@agZkloeTXk&OPjI&P(l*;O5jEjzc}}>zEwe- z5Dq>HV86rn7Ozst0Ln;YBny~gFEfJ-~zw8&HH6DqFrZQclz4e0_kK7-j7TKe^lXGD9KQ6$t!{} zU|>XrcMexPZSC4x9k~r(d3djc$68 z&pcvxwQ?5?SV_{A@k6>2LEd1OgOx#{0fDB$d}#v{S7m`0+W=cw=ll{aso3E~I5Xv* zC{Ff@H{IL1SlcQ?@2zs@bqj6LYcEM)U^(J?&87eo&_D7WFK6o}E3puUW5(|bfJ8Wy zW=k~$Km(R)j)E_^(%wj^U(k8{Lp12DD7Uf};hLFZbjc zT69vaGMpJrn!uU0g|)-j>fS=y^A?$-Mndo9g@;w5{JBYx{!O>CwpAG2_#XD&z$b(Y z3LVb8?TqKEYymnYMvLDU8-q<;OSdZyB5EEMVS+-&FUqHZDOmBlxuYQfbDGYRRQ^A7 z0sTS;5Lj6=C3d<6b&K!*J?dAcnUf)MaHw|i8!$L8Omp@|%w*a>UGlU{qKdZKY!9QrUa_#xvD^@Vjat ztUYzhCjc03URZpR;%H6AFV@y}H?*5zc5b;mpEV$5HGO$Kn8ndXcxWb4;!^$g9N+hb zpN@|KeVh{aKH?DC4W`{o*YoCjg*_Z|G~ zgB|A*6`LB-e3g@e+X&yAev8=D&SURSYA$oe=YgQp?c%E|Se9d#4WV0eB`$>;O*q3{ zoCVMg)BkKes$2g=lqwQB=`lI9xC1~)753QNP%w{3k+va*;h3WwDZYJ~RRBnLz-O1p z8KCNk^0>u3>p7Jb+lDEu6(lv%?X!aJ#}<3BqQqFY{wfoW4)bSpQbtBCetP7$2>EdTREu zqMn3vQmgfv8PgTrMWc43{RY@TZVZNQr#t2K1Xks`5k&QwSP_hXt^#@sNKH2!^`1_C zuC?|v@Ji&%WUdt!Il|Pt_4o;*s*2L4xjujdUPIzh2K1wf)laLugFHNHQstG6BV!mw zCpLF4?HUfQ)vlaZVYey8AB4G1Z_D8?Pim(6a<4ET{8u}J=Bs9JZ!mUn$?ZN&f^0Y6 zfy@s$Eib?l4vswy#O-T@>6?M~YD+UQvoxHV(z|L&uYDoFJ<)bSQ)@N5E z9AF;~%&)l*Gc!`H)@rX2YccP%I-vB< z66@2KA*cjQh7bs=Pz&@nuoU`N!cM+0gB~>PxUCV?>TkaS4_MVPG>9)xc!4+0ygn#o z--Y^y_SnZj+Ib~X^96VPKW7>nzrDa;Qobeo#j_Wg8o9BGa0Qe@iXVDxf^qWki}8|- zfi;NNrt^AaFk8E)p_c&=?dwAwu5E9@t{;{Hd~S(=VF-j-vM&4EgzidnG%o0Sz~o~$ zbM2n)f8=}?=158dK$q1}Gh3S0e~s##t{g%3qF-Y{{ahe%hhjp_>x0s&>eVn;?$+?i ziF@MZ1WCKLOd0?}6e*7Wdj|z2#-lmU%#Z2%xrMn?J?B%_y~MT(?kJbbMF5Q1>TY zBum_5&SqhVgm9f$~LaBGC+v?cJ zxy?$GRy(d}^Oz9AZrUeRb$wqs?y~uH2F@Jat3$e>lr+ZL%eLC=B{D_$z0Gg;xad0w z`SGPFL&*`p3;P?q8+)6&kLhRlHk?vZwmHX)K$R4UD5LfI_R>|^BAIo2F%&!9*5cfW z{EA>HV-dSbONyp&;P^3L=Z3N{wmK(W^L80-@?1x3G__eu^d&_NoN(>}?Um%U z>Ykci%RIZf28}`{>ALlbp6%;46$+m=b$O{RQD{u*^HK-cD&wu(2eG&syTT}iNV%jr zu2ZZIjQG_Gi!vp21{SC8>nCiqnr4V=w45(3(%nxUtXX;qp6v7Y#y}!Nket~}Lo(+y zH9sA^K=xu)hfCfUA#U-xaMwFG~Eh5`DIvrIXMu%pETYs zDtP2S2cCbKqwrxf?5cSl|4+41b*M897K)dK3p!G|~_twWy zZ7t)BHsz;bjfITY4wiz!@ zd7sQc`+V3N*xFP1^y7aE$peGeZ;xHoIeC=+h4SHj3X9Brf_;!>c1YMUoMKJx`(ffk zA|##ZjEI)*k(gC12b|&kr>lCKlyLRO#h4_UqPU(&TINfb{GGh=?s!%r=1#RF&D z;)IHI<)G_BVvl2%f;I-8hu{BTuCh3soBXgS#+xuB?MS*zaaL|s*!y6L>UIxC_;}8~ zKb`G4^hB==Pg|JNew+TQBq1JktG=%zjNElsQTe)7w(tE1w)#uG&z!mD_3i6Lo6+=w z3v!UpVc4^y`Az-#CQsnk4$O)5mXfe)oIh`3c?@iSJAPLNo;M?M-L|kirQFyDDP?)U zsauZL=am;!DXgo@7XDy7a+G7)!^N%%?XvUDQ8MugvReXHpothtLG9|z$0Vv1*PV1l zCS3p%S1SApZaMl7pYo^KoIP?|OWehrPuj@#CO&FyPbpu+Wl9AuHb1!lHDli*7Hf2(>b8OhVfuXzBK5NIXydiS9VDH-dW5+p* zby>unr|IG)tm^Yi9MIwih*M+rX2I=WCi0z+ELzxW>&fm<+eNDw;HDA+@XwcR+o5)b z7On8OGWks#6Ax++{UOxn7`SiK#Pt%hd448ox6CbHgruGE8?N%gIyD~@qUET>a($*< zk!-faoufgQ})r>1OXAmWkvtb3LnFZ+XxUnn|`G z1_Y)vHf@!}CA?D;c<#l!t*D2;SA)vXY!4=O`n>z#9%?zvA%=(>tKC|&ViS80dPWPb z*ngd3q>^2C@N9RK81iXjts`>fUZFNLyJR0zCtnm6cU;1auoxeVgwQ{=jaABBpBWc z)GGhmrErSxMF$fYwjK-bAM5DP)s;_HJfiIgTlW#-$!WBNwU;ZW2CIK7lxaX3NP6yZ zUhzT4r^i!?$hYY#(XnvG4%gkOut!}~*W2yxUr8GIA;w3NY9>dt>-P!V^Z6TJ@iGPq z_g8B+uZyKG9YsEicAp4hd{};?;^aW(M)TJv8^as8F5i8SBi6Q9x3YV+%iMKdpxuKo zR_A<+m%cKZ38mc=Dn}SB>|cKHtgb+FB+S84xHF&f)N)1Ee1i`gI96*f$9+dH7`Zo& zsrSwLv|6q%mI4w}$cA@xy|KsT8I0YyVKZUvda?j9tL~l{mzd&SC$!_ZyU#nf9&pQW z#;p#?E`}|q=4zT#29k*WdvdvfSDblFpddw#VqHh4g`uK`>L;k=yJgGTAGEqVQlNJz zX<4Ugm~Q3_sD0Rz(sJn&vcR{7_ty--3daH`AKT$kb2r?+&Sa-3r}`+lNqNR_4Xa~E zhHLsc^;hidQEd?*&u|DjS9x|kkEr?oID6~3D7UqLSQqb<9;?~>0V61YdChvWLq4BlCz0dw7T^K>w(nz4>#+x_A=eFS2UG$^ZU1$&YudPXCC!32E zd2T-byt?qFcvw}BR>&pA$gTPF^x6y1vvyEZ1ZC)fF^*J3oy<3Uy|OFv#y{BL@z}{5 zQqsrWYWk#WNQ6O;QiqI$K2L#PYUsHwq%jnRN@WnSlc=jhyV4!UC;gQ5NTyMpRVg<5 z8|ymT4d{CmZHe{P$)N*`a3T4ex`c_f72#iR=c-bX?MUy*ds1uR5c=%}60wP5bABRi z(t(&g)s{Qp#|dpq;@wqH-?~Ra3>_|!TXKMJ+zVFArM0k9PqeV2ms+xnleSOtWNw9_ z^o$?IonYR7GG`r|bjqWaWx7^2llDQ~-UW=Dn}EO}3%K&@;T6z?%TOR#{SsV8>Ol#F z$m=|H|Mll|gL$;4Y1)0p^eO0tZcp>*+l|Y}_!QZkLmm2VK5ZV%BUoM>?y?vjsevMU z?{{~rxNC0M>GprVFzp*AeV}^I;()RRXD;7XmqWvY_SOaOyH?NmLAJ)WrV(q*6h$WD z*70YrI#o}e9bCA&t%m86Zy2bXBi=cf)#MDmXKvkhfe7V69^p)!>ASiU>x2#nv$WNg zF>=y$v(3h-}jAM6w*jUKbW^Rts^+QhuVR@R%ELczr$v5IPzKA+HG7V zcLGR=xAY7q&N}Hrq^Zw1;of|edr$$Ql&DZ5+uc7|`_RR}6sA7DEAF{(BUY(o3E4V0 z)tyr_leb9sDChlMg9H&WQG(+VE|u*$WB3EEUJ{s{X>P+z&ygBeO4gG^3(M&?gu;bo zB}H4_4ze0bTU)anA@sdMc$?*p1vAIXPPrK(c3WC42UFVWr}eViuc?c!Rho+t39HkI zw6yRW%hi=7#kegKobt|F?TbyKLC}8KF`gPNB*shvi&9J#3VXjfr1`Y5a(weXmdCyb zqbo6SA9-FWk=&}5jr9dhWCwEJzzZ9hGvfPxkv`AGgNsC1#J&eo;eeu3m{BQ6RY!C# z_CVIzt{zBte_QnUB!)N`A33rbS4Q%^!G$%P3{utaytV}C)&*^?@9RIOvV{t zGSo`XOrII_Ij-x`x7HpFhi(@d=^Wr=!V9C@%zYOfZ62*X@EXySgyo%)>=9)08&;YxvA(go+?JS%}p%Po-eu zHoGql?;jIQE)of-p0h&Tcr!#=-kYas+#nfCYc^7HQ?DzpIsT!Bz$v?muW*crtEKG8 zDWi{il+lIR&4bs6z#)p*b+u1Wc`rGlZx2^ESjANKtGXJ81r6Y(68_}hDw5-HHT^>y zVKdZh?$q4Zbc?Uum2G=~XE2w>gEO;U%dT8}sxDHvxs$q9t0!TREIf@u(t>pE&b|VAJWXVFmTG8Tu%+!0X0&h@Gig#)>07bq z(%zA}J_HG?+j7T+N5nOb<-AFo!QzsG;)%XV>$@(VJJD_M+%h%m9Fs*K&xgGa1e(y` zCF?%!K?z%_%lZ3FlAa%iVk)=XI+uo_$jJ6B=`ZnZCY$Mvp)C}yCF6vdRDCsz=LVoT z6w3}X$LGsAJJ~1ZieqEsNem$EXjJ^vXi__Rv3^PYTBCfbV`WA7UL=oW)rz?1x3AJx zn!5QA_OWH}&e2<6xr@Z2CT`#VG_X~3zl9U%^CVf8M)l^L&lxIJrTLxHZC|EOG{0gr z^p6SH+uAiMk{w=w&{E;_>Yrud)fW&x@|D||5q!G>U-=#&%fA1wSQh*UWo3aO= zP`Gh@KGhk#=Mfset6G+}eVGDD!=psCIo}V77jk5AW1HsWt+|!tJ$$?NNfdor1~1<34g-`&a!PED})AWx$caeCAH?mPZa#e|-1cS(RA<6ihY z;BiwZ(Q76NKS@KzMZ>YHiNlt}a$*Enmk%0`t!jyy#RzHx!@RCfb2a&0%CVA8IBc-! z835dEUx8Ul9EQv^$HspV?x}H`g7I-Sv?|4Jr)LMG;7-p5dJTI=X-p51t+N1t>i<*s#4*Z$JW zR0FlXV0CW0WF-T_^|`*~(*tE+K`X}A;4#Qf7kh#DqDZ_DL_pi7Y6})L)mo@Glzv$X z5uxI_L$p?PNEI+;j0u0f^f}p9as%zxRzgfTs*$Z(;UK%T_?;G5bA`*W2gGkq4<;xW z61Rdiudq?(lSh5sAAcrfPT}P1{Z%Eh&-r_iW_KFoW{!SctxBSpd_%1$pRzmX3@7eP z5M2q9>^vm%^xHNQsJqkXSn_p7wZIm~YG;k!Qlm8wh39(Kn0juH)Oa_AVMN{ym!c<8 z@Wz&2F;&5yOE46SMA#iOsB2JeV<&12-0{dPmDrC#{-vI%s@0%nF8<_?)PqI^4zzKC zj%8=X5Jf&aD7(47N~8^CM)nz{!!l{$ZzfeBdekIT#{qcCwf9nt++vb^LdsPY>#Sz! zIrX{iL@7a%r*2Fg;)9C3S*d)%6NdBiTbiQy@QoY`D0jl$ih;43ttRt;z@}lS|Eoy$ z!vlE9R!8-j543{|Mmf7%Mr&zw-A!KN@r(-jzV8-8&pFM0zdMitRLjO~*i^89f3mkF zfHk@^28`CWy(XYVP_+%h$YQZ5X_6Y~nmgp?&?(Ph-nYhATzL{DRJ9@^r!H~AZX{#l zM0BeP!>mH)l?#{L;qL)=)(o7UxBfFLJ}nu0TX=u+Y7wLngFd{13A}pX>o>s*pGtH` z$MP@^&%WV;p*H=l4x--x?-EYDLxM+mPm85@g595n!(efHggNW4cW$!85Uop!b7#J} z9IaLubGnr=WUFp2Y(Lmal}{#RKAo3@qSfa~UiK76ObiFPb)93D@8QR=snkZ2Dz>$= z=NIU9I_TBG4HZ$m>`;~V+V49c-sAUve{X9j!*Z>m8{zCNAQ=y&j$A0Q8qJQWJG@|s zK7YjGZ)HUZdcobbz4=e3ZXAPoP(JG?YI)!X{gj4zJ(G0KSF)Bk%gE^9M$hLZ1hZgr z;(=?Dz);W9h`+2O)F;-NpA5K*hLq6)&P$bm6vo~g)TmqYT)#qufQ!_Fwax=PwjS0J z)}_*D2`MiFSWUA3PDos1a*Cj}o3eF-859y*{*9dX4abB$7&mi@LHI@tmp8jIGXk|Y zuiSydv-3}&NHR@&`qOC7CCXKv9@8y*0>bpYl~wXfw@MxCD6QZ;Qj;msjKhXxnf?aZ zjN6m<8NEg}7u+@yARUQP(G45WSX_=FU9hLhN^;c&%!uVViiU=)D%BP<`Vv*!Shnb~2w){Xxz%TQf5WpLOB&xR zHQf+Qc76?$5%Rg6`ULT>7$B zDt?MeQCA$Lv*=?(SI0e0b43E@a%d&i!)f}2Swogz-7E@nL#pc~6nrKNZ zeH3$Fq-;MX1n!8m$m~%C+DrkKz@u@FQ`2DGL-JJ`E$h}2R%nC^sIw+r&l4wX#?szTq^&Mv?=M*R!-wm3XuA}2B1$>0@wNTV z`dXpIQW|TC+nBqqnFDLpSEQcuM=#el(F68T7|M+$rr)?yQF#c2@O2xxq6BVfrw12K zIK?Ava!%0lu_?=d^BlobLLRu?q7#I2%7-0z`@QCcn$Ye~UP^;S8hkiCDETFv1ZV8{ z@aB!ZHo}+d4zDW9jLAIdmdS4&5MQrY+^AVLv3=luzbCP4!X##{blP$*UI4QWGAxrI z6JSK^bdwC*+D*!Q&x9p=`I5glb0|oG)ZH#+u&*#b^8Yc@PcE>fm!3QY)F5`u$mOn( znBTW|lF|=sDtyxu=S)Xxwy;4eR&Kt9tl9O`0BMKXnac{pt>>+u4#vU zm>v0k!zB583dVFm9mGyXABBRKojW8gD?XYxhP6J?%WBFt^|AGxNqD84zr5Y=E)-p2 zsZ^8I!TrN9Gcgm@jLjb}*-OwiE0w;W8OGRApg&%&wh;VZJz!x)4;nFL9kr0=_p{pc|jNxmJpYKd1C~@uCb?6CN^S)#_ z#dl4XAX;el6VGzNzQV2y-yrt&RJnGIZg*WD^16!pNhTgBkH1K(;O>P%XA0G4`c|rBX1sLko`T|eLyfv{};r=WYl@7wUQEjNgSKewu zY#+m|(4L%9X2vhf&Q23;>6{H`Vw7~zH%Sz~S!Yt-uzrUCiql$=Pu8E`Fwm{7)SA|x zDS!(@g9K0r|J-WiDuLd&P5fxDkq`sG*0ntV zwpK_PlX#h!*6PQTZ6-@Q7x)0+iY=uu%Q6kgZu6it26i(!k`;=je?+S!o0{_qr(kg` zf9@g!43$m8!v*DeeHZ`sDMo27b5pO+$Il-qwHIwB#u_^nyUcao7HsO|0WMVCVhsIM zmtS05n-CluOl2DFiuUDI?DzL7t;LRI8pu4d6TXMII=PHgA4bibA2b44r#1I%_2Zpz z2a{y!))u$J@`cfIc@EtxoQnp+B13cKCC>|0BgrDOUwa)46;_LXnF?`Emg?K3cz6() zj8KUa+a5b5%`aj@_$L{G?tRvn*AYTMZb9i*bk}pdD+lU^Ubg?>`4?wfj`9%|7L=esz!bZ(F>_xn_z1y zgE&v91T0+ymSctU3kKgZu7sMEHUCNZBf5$N?dDemdv2|v_u-OiXFeQv0_{TJ;WQZ-cv1_p&NhvX2Gj9isS03 z^?4pWBLnR+iW-}W`qLfAE9LntHRn{9NI@|HimoqTC0l(26*rJS;-=8k&sch5ZT&s% z<(W3@oVtd=H6W=$#g_t`-XcKEbOasK_f*q|>Q*-F>q*EHhC7bSlG9UBEk5hO0hc8pV=Q=61K zy2VwR71x&9RKyAX@Dv7<-ZL@J?Z0=Z6i-INP~!RHqM7eU+#a5CIJx;w6SKB);8@xG z1IBsNsE*~)EE`^_lmW9R{@YKGX1!e9n(4mAb=Ac6w4MDg<^)p#Vb~cuYvhV` ze8O&F*z!VeL1u5R_6T)TimUx?hWpU&T^Eyp1Hx&MbLZZ9&0fr4h&+( z#~P(@X;$Z*YEWwDbzE2X z3>Bf2_gfp?FCp~);W3HT5*a0R6w^lO@C_ZfuDkGQU&(`w%CYeB8;$G~PsDRoXgT*x zO@>Nb;kih+{yGo_UlbXD&c{y_WQE)!D0A5^ZL`&3)tmjqYy?Hl zUjCajo9kZAK~{3SnzgT`6zcZAveWWe)fzdiJS{jF6{_rYw$wOj39{LP!}{t3xq36_ zy5M{ZX-Myb@97nLX^rS;VyTrPBY#w%m|v_;>84!bNM%a|p9uzCWgn~JXo73AwkPZ#D-`95IZ7m`$)LU%0{mC2)+2slq|&rP zU*yCAfLkE_>5i32n}zLO7gzjNx4WDiqgzVsP?g=~ddzxMbukFFMfha6Gy=8Wp5k8f zcy+3!<)xk}KQpuEsx4&YebDBclnR1@w!>Po_3fr4nb!cg+FixmNoy8&;_%uRogT_I z7BQYHo;I;hlu*9b7_zx9=8GKZ`ThcAsM$eQ8<>T&|A$#fjMs{KiORmmIwv=}u0)-A zqsl35=$ZfUyDqQG%Z1b5G&AD zqjhqdi(_%-BHQCl;f*s*4-)Lpa_DPxcNAC*4iV5Pdm>9rf%{^ zxq1MQR;yZvjI&9ma6#rlJz<~yH4avH^6fp}v#P8JPZl9oIk)98PB4>3@WsyF6=S7P zEVgFf?x5go0g?h0W76F+SZKMmB$Xyo_{obbt0Q%39apqI4GXKqKk`-| zX#zn?)ai~B*MfV>^-MY6c^FG_84T{8Z*rqtnn*XR7%Af3mZ5&?Ge%llnyz%He1 zeWh$!XmVO#=rXI-aZum}w9tQk+%pCF%jQ9bJ`l`CKj<)2wWK4EoB&87L)CN-nJZ2pDNJ>Vy>U%boM}r&x;e{0SvL5!!HY@AB zHD$eLX(jGGv$2{$hcQJPVnl+g;P&^(;wRGe4d~a2@Cfm#pY^Y3mkpD-?8Gtsc@wy& z>Ywdgn=R|L;1_QE){?4z6iPnl1+9(W3e96E@d%xQWK(Fabo>NYA5=IR@FE$dXKBu& z>HYZRe9}Obi{e@Xg{s7ERGcgNfCZNw*;$*34Yt=22zxmY9v@d`zBOKQFpwbdP`>t~ zAfQ#%ZI;gEbR&X~k1u{$rXeWFA z@W)?ofgZUNwIEq%kQ; z(o?GevHwCPj<0dTgV>7t%l4IAl4MXJ=|`C6x(|f?n|xBz5!3uBZvSF@ln^JRR?O_l zwe(1S$@gqKd;u!!Z5*}{X4L>{8HM_Ej(6g6gCZvemC>nwhAC9K+8xVKHb_@3Nce4eloRlLQ>e!VQ|v zH5-26pd|&0AIl{K4R8h;4sB0xS|&lZj7MV?aoRaI=_JnQl28f&w^$XR^bkfDLLA?+43PEGTSvG^*U@x&}`3 z)fIfrD%m*pR%4fgSUW;xPwqDXJ5pf z=1)=|{fqJ>t^}w^j3J^IcJvonN_<5T$tzkJPb_L>j3zrnS?tWt;RpfKr+n5z)RJa- zc@BB%8N(1aEoaj6r=PP<9X*bZjWT*U{k)-7^?x>d@#kh=5Uu@fvz$jsiFmG~^Z%rj zJ$tlzoV0xTa63Y0j-z>d$~;`i*$H*8GzGV>G_qS$_^fE!=M_UmlDJk-mC0Oefzj$JHn1E@QTDlFbvYQ~$MnK!dw9EJb1q z89L8?Z2uU(oRU)ahVSPW(v&;%uFZp+s>cKbt{%}gkM!2=4I8bYU^%;rvlxyy7PS(+ zEyJ~(y5|!8j+pz?E7|BnomhM#@|Hm6ftD3>dGj~cEWw#Kc6INbx>BitN!G~<4R0Y2x#R|by~88CcvYj?K~9lVR}0uU zR;1&|+2*V_^uJ!HKnhbx>KKt_NFO>=``2^&lKd#CFgxpPbVm1FAOAl5Y0i(G$gdDf ztgtJ*y_KmJR0Pm_k{2L&G;V?_SOPLAvO|?z`$4Eblz#X*5Z+z| z1yE%I8-J|x&!3^Z1H&geu4uOsp_QSa*=(9}?0UOqU-mD8Hih8Rwe2@Pr*fKXx-f*a zj!h7K$R)yG)b{6R;Ev|W2p{Z@WGdZF1r3ffWNNu|M-Kf0yvAHPmO=u+yC*MpGN_V{Yg9u2cDSti$}WkN&1cX z|2pQs^5_5W=J2V(V7QcfDmWMVpS<_Tp8Wc8mj*|^W^d;(RcINGWyz4)aP_{3!^bkGSvrmLMeK1R?m$;6gS{AHDn&VbdT3p8>~jX<>gNQin~w7k|r~{1fhFxrYz@S|P}h z9eAeCse`MA{rCJk$pVULPl~?0@i(&kZ`Og$l2Q&bFj^b@4&T8T2nE6Vf9A)-Kf3(Z z5`g!|1`;LNmqAen-`(Fkg=8EdqwB4bHNfzn-=JYQ6;Ojt6}`W80Di}T8a&G{yx08M z;x~?ug^~;gYIJ|9(aUrxYXAG#etE`!;N-AB2%51Tw^M-RSLa ziNPTu5QC2AzkKh1#fdDBf7n5}A97$Op#!4a-yoz1@CHit=#KI$|Mu&$WrQkb&UX9& zekW3s__yE2E%%i%i-0*u@ef&^2MMNs{{&5{&id(lIXL$-UXqUF}YWMVwPafDep#8d}{-3I@dyS?$RD zl<5t0KHEDnt`9mmBb*?P7ka3kD8>DUL)3Nd%hqF?;2x&`mJ8X7zzt3ywGX#ETFf(h zSZT{0ewaON#Dx)IXw*%T&CeUFot|LlEF#CB?A^%hDY8+DkIH|=gDZlWXx+?5ipsAY z<#Jfs2}QACrPR0Ge_;YJO2Taf3!$VHE^q!PfoLkn_mVc|68hoyG?gF!0igkeUm->I zJ%AfW?INP0E*pwKS>EiN%_oV+eg6jav&pAQi*RC**j$aFmdWG`^nUuB>}m|;<*jW; zILL$JU;#2jvx0w^yo4Kge!QZ8XJ^Vvuz>R z$$|UuZ(fjDNkFbwaH5O8sap4U+M7^@=UYRM;Xl7vUni;Af4TJsE)NL4%?>yVE5`Q+ zvj5BJRKo*vg5p)oN~xZLf=S$OZz-p=_`g+#JtngJ7-m1UU7X%!7vpwBo)27`3S zDzH5miD!KrBgP&iGt+%8YhXTqvo_W^rv;~JomhWcvZ^;F2AXYpbzga@S zA;O-V{<1A|SC^Ii565ns73|ZGkMHz_<0wDXsZ_#WLHvV zs%!!1r&fQb_2Q9X_!MwVMzryxL0TC2>?0`5X0apvKRHsE8R1|UNqWN{z=-Vr@mrW} zUW3I?%ke2Qe|h{0$kJ5-iuKT?7askIjJ5t$J6X9ndKkRJp90n~CdN1H#R{fIe%${A6j1LSXyFs)oB zOjn``N>TC&w;UI6wDOH{y1&>SrhghFmQBlspHu7Y{vne;cJOKt-z!PEVfvFvBiB)Z zvg`@+?*Y!gG&Lb0>VYRnNt<=Woy9RC%Q3vo2|P2tqqGuk_piXoh+~6wCCG}D^a+1$ z=Z8jviBfj1jsD@TGOWk)T>Nq;#V^rts?29+axb~R6%?Y+x>kEIpI&g%m99w>bktYx zhYntGKF9Ukop?7)_5&$c&4)atWbz0Zf4`lHFc}{;aQ_wyW%5sk*ft_x7au1QwSK-& zmzUsI(fjU;56FAK!N?nsG-z%TnYShfsfXy@N;C&h%3yK@C%ymB!c4-75JYfuLM(sA z%wPK%@6@NI`ye5lq4=>{E7b@R$LF5vFkT{5O!GSPNBKWXJ5k!h{p?$S1KRaS#&8*r zU-Sk4dPW5p49u2{(v30ExAT+t?HPu1jz{t$O{PkxKs9^hjUDo)f!u+n?n=Gw1ZffV z@9*Jjasdrl?FpC6O%h%goqf+UGr5vJHOdhkqP#VJD*isl-EjSDgx?gvNt?S{ZY=g0 zuO`Q!tx05dRyiRn<4R#Y(VXrvqhl4(M*8`0&+^8w$%ipUC+D*y+6v6Hcyr2Zik>1k zEz2TJ^n*RJy|^!fqEm12@>tN87GW(P7CE#=lQ_@Q<#(14kWbPnR|2j83g}gC4xrtF z7(k2NYRK-;2VAEz6qS58i%D#gHIC1_F4?(Zz@lp~Y^E$J;6U|4T@+PKrJkv0)d~9b zcaSEiZeg?|Vvltbzx0RYC;e0wr3L7u5mUn~H=X$$tT?e-32#RG zZTSv;Ut{yP4w;*CYW-Yg7P}TrBj4)Z-h9HLKiI@;rIn|+TD7-o<93clMczUsR&D&X zRL2reA?f8PpkqiX#V;Yy-Sn0&{uuQ*F?ZO@^8(T%8Rghzr=!~PArh(R$`~ZOy0nxL zKfi}fAUkv51xn~o{?lGW=V|)R^&L3&giJK5H>qyKH-TnX*S9s%oWo(0eD4aYoyD!2 zskiUyE0{Kj5!D8!?!0;ng6PY2TmnBys7&DVN+Bt1`|SwrVBT}e^=a2%8s<}hY+3t6 zdY{^YQjfr&84j4ATW5f9l|`iy3d@^kiW9)g5z)-P7Y~@|UzzuWgWBBhkKgk9tzYX| zq6!FlPOQs%qnVp#=CN@*s7}fVr|zu#8f$8?^f(iu>zU_Bl^#S90UF0Hk5nwbV#^n| zniF8AR5%MX+g`{!pmH}(}zegzSo!KmWvC=3Z_>RwY z9@K`9LlgbS#rRjAhG#ju!pow-U>lNNO<%z>E3KZ@h1rMv-q?!+`=5g|o9PSdERveO zH!}Mxq}L3S7$I}{$AN-wBfVws&jiXESo4T0)#5~Xlxd{*x3%Z?X0;)KD;N0j!zGEs zEXKmSo*@l`RoU!ObLmco>reN4~gAETe?G=fYD^oBEWTI;PuFNhx9j-JU2h z)wC=x)t^nEzCm;b#T5ziadd+lBg@Fvr7`cxPtRp)HwSu16ZM!;Ri3T710}b2zm3j4 zC0DL0v9#zI_r=u~tBhkyF&~&LV&#NNa?{%@7&;+d=6C_j*bwY_RX$`2 z$-2o>L3Cp*d9TLhsDIg|vky^qx<~fUEW10WG=704uoD?n)&v|tI+B%Qt~e&WOu@`Pay1DL55F6> ziRm*vh2+??z9{AQR;M_vFPVS|YS7It76nQSPae1pCXnoDyQg5rV~Z;v>G>MSUC;H0 z=bO{`bKiuV`rNIB5=EwsVaM>}Z1h=|5iY%oynV~X!gV8K|C(pF zpFc&?s<+N{U@vs8ojx$+(SDn8_KJ>%FlJfW|6nS|R)DLs8lh_uLOQkFTQIsB`%w$-pR~X10&tvo0Jd6|1lnH7V`{H%U`Fi>OiGsw z=lW}s8_9d>@ZlX2l`n8D2cUZS8<+A*-`hU^rK#rC#VwiYK(t$H$m(|j#AWEXWWn9t zhLinX_c}EoSgaDQ_!qt%BqvWK_eh0eu$ys-N-lFl==pn()3=yKSvKJ1?QUH?Nu~rI zhOP_K@us{EWwt{`bo~evA^G4XL4~iN>Y06lz0QuPezAxIJ}^0%R^rjGv*x$`hDOEE zUFg0APYb60XLbGSl_Z|_xYPYIGdgOqnjeXqY2{utLFGdg8XXZTDM?b}Be`QUpbw~o zCn5HMY8DKYo4+BvUd87$`=I~Ji#>0b@|fb#H|n{eV^g^9fZ(@!kvBjcPQh4B=JmCT z0xw(_=G;S=nkts#{n!XNj?*cM*k4~;K)uHUJw66TI? zX54^wlq~g`_r*gkCkJCo2*o;*#1)kw}%>=`c#R4SvMc}-cy8jS30Ik|^P zx3d{9%Tq935#oJ&A-BIE+Iz{nNl5b5#@Pj@8oCJzd@)kO)c|L~iV@Je!UNV2$q~&N zcksgPU~pvubb@X1w+)|yjwHFtEG}qI#4u)cl)mnK17 z+w#ZXp(l4iPL0PPlQ$%G-|u<9mZe7xGAL9n#phalr#e7!os!{G7&qK`STVFAakm}J z9)7c!t}Y!ZeURKHOabP|vD6Vd4JXrZY7URqpKCgk2nqlxE|Sw|B#U^I z$JhI*hpOeInyFgnj_}7sCzP`4--ipG)QKv9;^ud@=7BRx?aK4vz$gcHjQPqtd zC;q!_&RQ8wb3@ZTcwTsKd!)9S**J5HV}NKM4Ha6JI#_#0J-EO5VoQ!7XyZdnjChEF zK3?@9aT+z~yXJz+d^Bq6U&)skijPDCbF05O9!S%iX3r4lbW$<;Jj7hUjvgNPcSm-hJ8~zSW zMr_FQiiR*tjS|Xv!>us=i{0tVm(qMIgNZkUiXK<`?=3#tw;{S}b!;B^&7w27V15O7OZM3=dpNA_p@2ehxWF50~mA)RD-)pKfOsZ`FZ=DwG_ zFO`cqS|??XX{>MrOldUlUJlQmY=!EM1gNl-F0BdH&*$E;pWL-Ca0_v+!;J}XE`8~( z+ai0|fIkEk8#%S^D}lMqfLNIfN)}eA;gM?FG8rmuQ7FD=TjKYzW606&VL!UzxNOfO zM(#>M8#d6!H_fo-cPbn`vzuVTyR;NFzDSPh`*g1XG)51k%-8+`2e6e|3o3 za`esDw3&?Y7ujp`W>U4~cTc(Z*EEO^ym9TL-n=r9%e1w=wDr1b=uWANH^;8Tqs>u!gCO0&!ulDm%HwNp0D|kRT2&)Gn##icJ*D zR(!GrTWAc}@z%aH76`$W&YS(&sAlYNaDWadj*+?tPoNsh=F^5ad)Ye~(Qhp13Q* z&{mZp#rp$%BE4KKtJdhhZ}Mu^K6&ad-T9=tvK`qyT9LR!gV13?3CYNrwddYuYk|rB z;a8~|;*Y7JCi{vFp;V-pj8!m2WvH61_S%hh4&xS!*?7q*ik*eE!pk3bmz6lRv#f6S z=SlC9YEo=V+DOIg@%m&y;yMKFTO$wl;;l^1%V>@t&Qh)UE!X)@KkSG%c4+zZ&}#6K zx->VZ^mb=o=3#%eQj#CP0D0y~7vv)?(0gd&z2hn1iV1&eja?7h59Bh{laFaZlvk%iIDO)w);hC!pcK(t9` zPoo)dM3(Gg66p(aS?y9S8xn~4SD&s-Be3SCq$MOax4*fMd#n2y90t>tE!8aNH!5Vu zrL3RnpNJHC@Swj|j!hz(rzaZB3<7PIkd~OM_p{thNj9D46)3d&gUyot5lU(d5{^7s z=&`yG&&#F#-r8eXPV4$vnb|CO{*}+oDnxobG0eux)!1@}(e2>2iV{Wp{*v_<)EWJK zdQJHvCeVhtr_K^L-&Idm@@vRozY{pU_dMhaJGShH4zLn8w%QB-^7Y*tJC1VfN?Fnk zh+~b3$?5N`M~Y)@x742uOvB4FjlZD2MZP=6=P|2*34G+*zjZslw|bR-&KF8UHJRtn zY3zLfzc7YSk2iMyT2h&JFr}MgGgjZ#GaQy=TwDt|J0^mW9_D$!-iJPj8M|FnxV5#w z7Lw1k7O%QPfblLVEobiWtppPn_5feSs7vp4x0Jk=(w24bmRONaZDhUZzhQS_K$B5C&ykj)CB1Xee%~kQ74y`<1dhGF2*p92zw@c(vx7a zevuCIN^)AGI1~~g8@C#>$boRfh2HWas?qmjZiLZnyz4?daWZ-{BX2H$La4Em?wPo2 zgVD}~T3vnC0aU8}j7B&pcHEG?9&c~H&fja<6qczU_nGDQ-Wcf!v?gwr`ssj`nnZrpP?xwjh0=UpS z?b4l4CgCQd+0in~;Ig^VkA%6ysvhJH6$Aw?)p$3P1ZXrCJ=e=|C@aoMzD{KdXQN_M ze;9&8gmkmM+?`&-AwpKCoOyB6NqFcwOp5fA7=`+UhTChs_qWN`NKu7)h2cvG0RlDG zixRW40i}*P18sz^bLJT|s1^rPfmgQE$|$C3E?=*4bDpHRYrZOLQ6g6Gu69|x`=xqF zEZbUvv&kNU@w>gLN6)cEKDu59BA7kPf|RaCsnzH8$~O-)k55fLqCP8aGfPUfe!l|_ zCPtU4&|S-?%@uMkH%_2=4E*?FN#QSXTr@eZ;xmG~vtJbp)hn$<3gh3em*WN#QdZcZ z#5tc8724M#61-M&lZnF&AqxqaV9;xU%7b`Xqe=np@yCQN35EAB4lbgqmW0TS;q6-% z&BqGv%M33|cQxkQRn61|<-IICw0hi!4EYoM6>pnx_ym*owh|Y|NbEIv6Z(*C@7^5| zLpq~RFD+K|9~$%AB$0NE_g>Gjwg8h#6IGR|I3LXq8|cMbr2Fp4%y}i0m#T|*Ob`TF zHS-sSnI!KF6LuYB5tD5dPnRoS@i(vAG<2hXUw6{$+ucZC187+>^VQ`X9^+GCCq2ZK z*(4AvZ{|H`S{h7+m0M35OQEa+%Wa&3Mn{C4(fzfHVF3n^>d>M7rfn;UyUIh*Q)EWWfQK`=b1I%Y21Z~yT_BgGI+Pn?di3BrN>+?cbD#q2zKZ0^fdFq!jX?MOXWQ5j=8$~gD_vMxh}OePlUv6sjJcTV_N7y z-e+Ej`f^ux{cKe%G0(8N9O+aI1heUSf@#4sLdMlw!n(NQW_jpLe*JgbQhtHxd28;e z7K7`h6SwJUd_Q~wb79|w%HP;%a=5UEZ1)=LfV<9X-Xw`X@p;z-dai$hQ!1!bxv_#f z^CspT7-6ockecV6-!`!8Ok6v!ecx-{Q?wOIS6Sa{dl0Pg`F5aUpQ7XqjR>#NtlG!st?!RQlIx$n{wGfTbni=oU$a& zofE9%c{M9|kS}j(*Kf9!lIPiDdK&dju)pO-f4;e)^XY>auXyV1*X>p%V)kt2Gen)a zg|$Y36fF)&J*51tQsx>O3JGVJDfLR6=2<2W)^ryhnNm2Ilq@iM z-u)!NHu3*Bd+WF;x2}C$K|vJ6LIfog0g+Nt8Y~)=9$M)}x<^Ds1wq;&l@wtZx?w=1 z8IT-cD5YZ{1M?ZT8-lNa&Y zSelM$3!<0ZzL%?~dFCGB>c#fX-e*V~th8$5pgxVYaPn-&yLKE`vnLV9G^hJ6xE5j` zE6;m$a}4FBDe)vo2YW(N+oH{eJWrhY7E)RG+OSA)vX*n};g3|_An2QOLW9R&*%?0D zLT+zf46AOS$Id2tT|3OtdctL>M>V_IW-USU$oA=lNq?KNcp2NGR_}r$=|s$qeWmbL z*GErokTyw(O+sxwVe*Grhtn2k94>bu8@Fpg`!w4!^`fca(u0`3)1+9AI z;z21m={~=OqZ#knyD4~X{nbKpT7i!4uH2GNlp=qEwzA69&PY<$_jx1in*=p<+4MZVu!{fg-(`A+#Exmn1_pI4Q;Jcx|M027EZ306#N1w+ z8)OB{dr#;*F#%_qo_r)xjLObsh3<}{JMwT$E)fuY9RWLzL6IWCn%Kipt;-SPxU6t# zQt>boaS8<=vv!5E#oKWawUC6=QT>%v)g66D`e6g-S`!|xg|ns|Tw)9hZjNcoS~|1c zn+4C81~Zqpgh|jA4%dfT!<^_12p1D3>XRRgC;AF`10lPtK!>*+u9{mG*q8MCst zIt5{LB7@vA<~^S(ON!Md*H(lO)b^kK&rjYn`DpEF(;St?( zVVhM^nC(tc0HP3ZxA-rKQ}i(P8p(QkyZH1|}!qGH~XVj?&9g4M`!>;@v{o-pr2_4L69d||M(MAmuy)%o~}tz@6h z@Js<<)|Fu;vDn_jNQRv?RraYZ+OTnjG;|IT!zxZ5R)^`7mXWU zQFFi7#Oy4@MVOWkHEG*fz?tEUhvO!&{HL2{;e$q;DbCB%PNx$QFr)gQm`BjPbzwG! zps>#tHfaxfFvIvHdZOziI7*B*8okRLj<)%D`>09x4|kTlTrPRu^@`>B`x`$p?8aQ# zAJoGhJxIgU&Ug0ZnT`9f>m-Y(a<$>H%#<3NMdmlOvG+M!#vW{brf|d9rRe+IXfr4h zAC-%$Y&ile@Xp;jnxJlfmsmrGBacdLq`$il! zl`tk7_cCt=i)UfVcZ@zc`y`4UYGIg3(qQRcuFh*=w1ZBhcAXitQi(cK`$jH-Uaw{9 z;V4T}uAnCDyP-r3cGcUPv}ktPh_PN)dsq__9|Ykl89U?didS*m&Npv7P9wOOaKg7h zv25xE(nA-1FF0()O;-LBed=jSD4=a~OH_FaeFnvTF38Yywn^}h$tR3@=nHE}GR5>5 z8gSB{LijFf-|kx#MsgdORb|k-&MvynEPXYCkF`&{MVW7D35H`oAi^l@I3r?(9Cht) z7CH8{4l95QY8IDyGDniksGSS|QfBTNm+lDn+GDV(I+?QHqc z)))m+nGMk?Cs42J%}B^2)&zH?X$3#AgY@`W5#n*)!Gus@e)>>mc8$gpqpVHpn&Bbt zyedj9!geWJKbG4X^;`#RiOyN_TgRIgi4@v!dMqgh&>fi-g{3xsGf7bg7J1T%LZH)x zUdW+Ks2}&;a~G!s29#6|IMozn8A>SgVpu($qrI=lL_y&CVJAKhyX_S4*!xh zvK#iXYKm4OFLH-=GcP)y%05c8@dyL1VHX;%ue-GE=zUN5l}La-W1elz{Vd}v@-58(dKBS>PBAaWXF^U^ef-T(HfR(~ON8qBQ+E)3J@bhFI z)JHj)NY|)$TF`(^P7t2xDsJAM9|FttZD-vThH0pS4u`6HjHeoQbv4rrKVD$-uI)BO zx$LgyU6OG5iYOp=67u1XCc9o6b&W-SvdsZM)=h&bdFlM&sk2u{{v$Eoc`P2+R@r=}-Q!Q62` zWf>$GeYkTt*K6F8M#$w^I{lnkUImB2aCL$Hje1$(1Vo`2T^=E^%;Vj0D^Blz`Fd!k z7QT>1X@?b*ua@RNr%Wi_Xcf8qVkX2xXw}gG3u^ka;!B_AFv#KfZqZaK;6N6lUwYGc zo^UG=S=S0+=_qt=sx@EDvAu@*{&E&>bE|z#fJ2=}f9QRTL*T4eLf=l)^62QCjp5s*X>a&g`Ihco}tqAq!Z z$U{``R#4T1;T%zXJ>W|eNG;Os)@EH$S3tIWP9{BVaS4 z6TcFz-gmNPX>u%}kk|Q3fQBRv|JbZH*H4YVCfk&@5N)=`;F%(oSD)+?N86Dk&zg6M zf~7FevmcOVD#W$=^w&%SA}AZHLmR3ZRWK4EQk7U(s&gfBZ&zoP?v^=Gt8(nE%O10a zWxcGP_!7ifDYTxdhN+S)nF4x*nBw-ajaGg?SoOC`c10G-tdVIgntd=-Vnlv;}n_xHJW<9Cg zOr{b$O8mTE6DrZHOQ-06m_kV_=LR!-LkqP;HnAC(`p<`hl8JiQVuzWcR%4e0*@ zG0w_LKT^-wF=Y&>w^ry~_}phLn5Az@6cguKN7h~Q!EJpsV-QS8y3@D1dx0Q*nw=Xy z`N7U{hr@hrNzDxNirn!^7Xl_xHwNu1+va>sBI+eqN2@D`%WcfI1djq{X^GyIoW@It z-M^IeVmB+cD9wn54may|ml$6kM0lG6IvZi*?kyK&WhD2 zUwZj|F43P@zjxh0z3iIlEO`2IRx|yS!oPI3qzl!zQ469NrhGhB(oj!#CJ-g8YNFa6W-ehj zY&#gAUzVw}SX$C;Uo&B2jE_BW3rW7rKT-bH0q>y*KkK$@awQ*))_m~7Tg4f(YLI*n!;0-2pyw|uF?~5$ zKJIBToZ@d4jliC3@+dq1 zYY*d_V$;vw#q}=W1&8IpcneYWN0)#jbeG@=-GRVmI9bERNqBJ=9aCk~o8_V*$lVE% z*_#{BOq+(i!mI|qJv1`z#?0!q-PzDD_L%m1{q}W;qK3VFTV-2Gztc&+ECcCM7mqK2 z4kWt55+^!JYZPwK#9)mmD1KzAB~=0^qOOk)QB&cr!;0$xmr)i;N6_?r4*@hpXk%0j3m~&ZL`7L6l+gx47ZWpVZ>^wJJ@3r zk*T#x;GpokK3^)N;OAZZbeUK|5O^qM29WXg;T^YYI6*s1->%~KG6tV~pIO~E=tQ#eq7#vV-s`P$ zyqB^-j&LZgKjeXE%bQ!f>y8}LpIRAzX0`)vH@gnPh@;`zMClU>G1okZsvsI!5cV4BJ;#6tA{S=D+A71dDz=0&HZQrv zd$sFaM&#PQo=fkAMDV?`smgxHlfTEC>xiarXEZG<-~M7>f{k2Cgv2t~?7sR6;bx&A z@-eH~a-k}j%n~u~`sO=PvZ}rnTWX$-%RQQxEc5+V-@>kc2d*R>BPF7onnHEWu*f7K zHcvVH{B^YlrV8}j(RkVBcOgc`_me zBFaJT$IoO>F5{z*t>ROQbh-lvwd3jd6a;)0LMmj?NkcrP8U>#&oqpA%D9_iXs-*#U0yr${2- z?UA{*UFHh?BA4@z0V5Id%U7nhyfr;XZ#i43M=@zk2{h?I`X&DaasBk&g(sI9vCI-oX6HzBh9QVDOiFb8OPm z#xV?q(I#8V^$EBW7Ty9@TSJw8{tmHY88&h8JvW=@Gz}uHnpiH7;uq9tyVBVP%bfVa zq6=-gk-L>OKQ;zNLgX9DarWGWcBwfJDjZm2MCC+16f^40Y24<*~qOJhe%ThSkf4$sx> zc|D%_6^9#mcSI;zL8oWkzWN}ijt&)EEC|mtYbzC+wG%&Em%P0?zKy-lzF{c>U}zK1 zDB&U&GYYW05ehoKTKy~^TjR(b6Ycs?mxRAX>D+4mKs9yw@JRgKtQ;Z&QNR=1@4e=) z-zdwJ-(ZfE;CxVTcX;ykWx(W(eM$d$`zg8CYC=n{u(-3~a-LY(d&38D6KsZ>#KQiR z0MjaGz(RVGKD1RVzWa0PExXHQ`%YqyW1S(25$;Re{^B_7sj1jUJ5maa4Zp>DwrKZ9 zxK$~M6#CO}*OBOcXFe_W@r!IykDzmd+`)yoxyY3Jv-r8CWWdBY*ut~eO~d2*{W0&H z@`tDC<~upctwO{;XU}AXTW|-Trikz6FvFIugfsYT_r)`#q$dkI0(+xrFpwtZA;Y+* zv=&5W&R(E9%j1ila=F!FxN}*4!JsK}DR-*zvt8wmQoP&ewDb(9C_K&T#~uk0h4t#s zYX!q;aKg-NN#RAhHr?~GLT*#pI<8wQQ&&7X44PE|>$I7H*LbuoDQxPMHbsoV4aBlB zoHjiokI$>PP;{`Qf`$t8Vr9=_=ZI^Lt)DqIMrsCCDkt44?UqbDF32j^F@2ThmQ&YJ zoS@nHAFYF14U+~o0vQv^352^;h$uBQ*y1#puS`{rTSx?v4ZcolO|O*FF_I8lCEsLH zs57B>Zq=0wpX;xvS8Hz@xu-GF9?JYCK*_2zCj(;^>{Hq5)7{i#tyi8n-k7%4Ky8rb z7JYgfkRJ!AFuvPyzs7zcef)Vxz2szpguGK_9lXCWD6Xu-`mtofyb4^7IS&d)uiR`u zF_Qm$sp0`n?gMqq)UFQkPAV#veQ#&ZQOJEJf52AnYhNyeiA|kns5?#OhyXRa_FWAQ z4-|(g?_LqEWA@odjR^bDbrAXSIZKYvpw94WGRCWU8qF^hF}_Ge*{vTP+Z7cJi$<2FmfB2^$d% zlI>H9e#2tR(~q{99+wc+IUwsHFgnX;%eF;%iLa*BrN)~){xpup0B58(ogZZ4MHk#l z{e)h^=k^Q;TJW$XXX_xY1;q0NelUD(rzis zxt{=5je=$&kl=e|nWTcLG%c;PPAO6~9-fa;txMCB2HLEvlz>PuDzryq)flyq$n0F$ z`2M{)B+tngmI|{qYty-Epbc~2XOvea5@~+tqai`Rp2h$u!+D!?W_9&b^FcM^&5l~- z@MajpJ)3~>*-(tkbdx7=%FSwwE-pd$N^7V_;D;}A_KQy!UKcBM z3f*+JeE%ex&Sknuoq$i(T=-xcu$ZKx^ku%UOj-YuglysNN}e{9ShO>K*`L+(i>W1$-5WZ4wHJ*}!;eW-d>oqEm+IsRe*A?uOzR^KVPDZ;Ij3VT%G)wX^|XxR*r!fwg#B8L%QWBHBR zpvhCb@m`#4-1B2ALMo4zJW|8m3MHHl!AkM=(b!M7Ey@9R(pid*-j10wMnIsst0ks+ zC_m_9X4_k@jV0}Bdsn%D)VD1!-g(Tap^uA-E^5ks)4i5}D!uWys|&%UeyB&%ny1$z zPv{$5XMk>&lo)-5Q#HvZc{nSW+SnLPeY%-DgN4p_o&D@zIGw(3DcH-XsHc&Q(VxT@ z?l_e$uXBKY?+1l`Wa0^YL*;0!&3ZdjA}44kY^)V?y@82h79yckYe}v61cY5r{_crZ zuXc$9W)5aQ^vSEti&Rz!-*mQWDA0`J51T~hsOiUxqOxc^Dg}5e&47!qDGE2DF{o#1 zJ+;tF^A{HY`Iu$-7)pH!gA*AqwQ7>>>&(2l>ZeRH#{k+j?khu5}w6qf|#dTiXBa7_^dUL4lCsR`rn9KV!+ITFn4g-{K0w# zcg09)NFUdmD>5yZ z#SIkHHwA7MyMKl#ldfTzBj)#5*N3vwf@uTY=GLEtJ>-91Wop9|j_=uE`)vXFW|yA1 z56;xmo~;jh7n6l-?KYv%Zw;wAZWHu|XH+RcoTO-R|@MhAQJ2k$~XNY<{nNg^J-@4aw!i= z7hnelBM!HzhkyetzIK$TFTp?eS%|jMHE(j`A-T0dCeY=2GtYb>mZaUCJ$hUPN0KCE z0O~f8_u>7~GatRU^P|$Jsl_ez7(Y`l_1^P)Y1RhSi-b^5R^&P4&|t}nS0>$nt zOblXZhIv7*hHEIuq?|5z%1&H(a0B+ksmIW4t0)l2ymp%oW{4=pdB|`$t=i*=Cr3H~ z(L)gJcXU86?oz*z-d8{{>c91df$$7^bmdl5l_|8~c4nGXHt4nCgXiNqYMQjRE70fJ zwIKmuyt5_Ihe%|>{rf@g8_zah3u;4{7_7cWPTymn=7ddh>2k1Zz$+L(LDH;J*~K2M zfAgc1ar$^$SdN7JA{*_8agONI+Qg*!)|=~Rr!C_6l=Vtor>Bl@TcS(+tv6biJuFV$ zo{dx-|E}Gg8Cbe^hM$C?rgF3h_T>|SE4OJEg)GH;h7J?L`kX&x@pt{ubxv22`YI~yq*HV!O=%$^CV`;^hN9+i23Z<_;YA)rkYE7&it?u!O+& zgQC=M}oLUlfaF=&-gaNf@Z-Q89EbzBp58?00XAhr1wd?_+ z=G4N^eROB zNbXvu4}N-a>6Tjq$V*4E>B*f(`E--7dCt2Nf_~dhzrIx$;-Nsi>mdq8^xD8qH^cb~ zdroKfe0L*s&b+t!tgih-`x}^Q{g?VJ3(uX&GihS6iI|CqX+)!EjCiOsyS`vjeYxnp zmDc-^oFk{}YwMbJ{YX*RpqfF=idt*0*N-R0a_K}*&zr1c;w2ET4GrvC zZ4j&o)r5hyp8D;LCHX9Qho*E@S!!d7qDgj(3eqJA?uEDKT8V9~jq-_lqwHC_Ad}Af zJybF{hFCxE!RQ?oFEXJGw82t+<&NoI_>j)!-pbS=JBzw(!vGS5#Ruh?MBgDnNVS^B z>840rQ8XjA%eliNi~5wFr{Xi`5QT>u%}ZS)3&1r~$PgYqEje-p{Pa!<>P-50Z*!Rz z1M!CbCkX$E^VbZqdu;*wU#F%WM?lFa`UG=#(jBz^L}H0d|J2%4;=MQ8)u^N6Utc3L zPe9fTPFY9O?=0f!M8ij~8rXR1Fl?=}Ne9I`thY&^?uEt|#uN!~MOu8c-g*$YvqIqaqg$;B;<|y(&jy42mwcATlWCrEvqQdE`wv3r_>tY~(b8$8 zxd?n(g?$&SnF#?a9nCxZ2pM4>7-JAN4``g9E0nEU-p z@+2J+Ge7gsr!0I8fh_d5fmFdapNE8b-~Y+Lfxb+`BS6^)0^H8cU*(WTr$8(C4MySU zYbGXr0EWm{|GtXElk&n92J6jR*|1LpHW4kN5YR(!Db#0?G^3}*cL#ocl1|y$?@Lca z6#y`vLuGHF8^e|!#MK=>n$m||AW;5(I<)mfpY$&eOLFzhtFAR9Sd*uXSkL*tFN!e< ztf|Oemz*Ys*OE9}m#|_mQU>(!e;?dUkvCvLw8hgpC3%ya3|kZ5zBaW11MXZdLNa-E z5_keG{h`tPE`84+kIlA+cV}@L6BBTa^L=+zcq)qrplySpf_C(Ga?KYVXauLb=f2jI zUU@3{{%CKoKTY_=Ut8m=z;!Ae2L->zGMVc6*P?&9g%E&(a6F+i`6TJb*)!bbIFIuX zSqV-UAu5+&Ya%oPRC#*Vgfr zJ*A&KpEI8+Z|G<`tg=E$`0tGJgdWhyfy~Tvplp1{R|eb0R1j-jOvVhY^inBkB6jGe zG8ucrlftZ-B|o+XdMPJuFymIUU!t%WC@P2}6Hxj-hX7)S_fi%FzVCg(X&564+1#a0 zA2)We-DmLquMX*V_x4Zj5+IIfm@wwwe}GE>^yOCe7GBsPFn@@ixN8ZePak*Fm#^Cw z(C62yd_>ixCVsc5b8-FpOfD{OkAPo${_6|_I4S&4V=-D`A>kq8?bTZUM4Yf%#pP>1a9 zk0X!O)J~S<3}AaE@8#l47A*>wOiVy7-8b_})!JTWE;EGk*U?B;NdS(Ng|^q)|9I=M zTLdN{YjR(d(z%yE)9|mttGdwL;^z z&zV=;&M(e!spy`h8(@XbZE{+{FIf@D#f+JJWnw>#@iJYk_@Jc@u1YS^Tyx%Bs!8qd zBMK(k)*lIZ@jqr|yAc?FK$vFv_3qD&=ul)-j`vboExr5t&706Tq+M7NlwPjLq@BR( z{#0_4EP3s9)BD~pDl<#Dp|P?k&%fV(3nPz7$w7&_G6xdB~k!lpgfjv(J zxZmAJJC^U7N~+a`MD^nh@YDzL+z_8F{zlr0Hseywg zqDm6~c`^QT+E)Nq6xYi@RQZLU>u8WUOksOXKR5sDSE`6lpFRc79dF*J&ucIyYHVzD z7+W~pS7gR2g@RdQzR?i`T?+^mDSN%we}1zN5H9te`Bg>Tk6Q?T81wsr!6ofS1k4I3 znm6}FP&bwcsN1mO#M%@|U-QN|R!XT>>PgdNG~uiph&&vYTBbW4etdl>FSPtP^M!{71A{0Dq<)db_W_rKZk|77uB*g-mN^zqewN3mS7llCCX zXB#}#$|Z;1fU4MLBsp!y9cyL@ln3)5*gjhyS1!WS^UeM^#9h)f20b2 zk!f}vh^E+yVCA0N1f*G-CbYl3r9XK;f{1OO>HW=LcJfai2D4TT2z>Wvr~&`dib<;T zfvzKgBe@<_M`BKQ{@KfaN(f^dpfpr6GmJja_YV^&4F$9lf!iMDt(iu0Fe&yS7@!=` zf}Id~{-213e%T(8VzeJgoo+Y1{70+*^_NE;f;lzyU|RHP|Njxm{2SwR ziU#o7GL5#V97qBJ2sRju+MvF|@(&O9-@N;&q{oS5z~CjX=5_nAg9mru>OnN%p~K1q zO&k=6inqS-JH7F*-v8^WpFRU9t)G8@kRL?Pp##WWQW~~ibfIDj{XH=^2%Rl@4{#wQVrgV+<|Kln2-sS(S9-^U0 zEC2e(J&bmP0oha(-^Dmx#W9CThR@px?Ch2RCMQI~yrevPa8H{bK?lq6FL zv%=^Eq@6xK<8l6?)tBQKwL*VG3y_d8M5c^^g692`G>j&45ut_3E@4UaLH;R&qV5jO z)178tIy79YlIPSlW&HfDn%gji&7o8oJYM*nY318rA{=V?(K4`(sK#{FG~?DUwM%Qm zP&2pTIJyNhu~^|1{e@D%>DgKx>So4h8a;1Y9=sI+i=kRr#-Y|sH!D`uc~lu z{!l5FawkfEX|RXE&$7%ZJyZp$#7Tfoo-s)LG}abnpv27F2%v`JvnUB* zmLg>+zS)SVvHpza=e6MxMFz&ts=*^z|M9#2UKu-k=I5k??= z@qtOnKvS0Ydc#9Vu^Z&>r*gbBai4+DqvC04Ae~;zSjr_#5FO9NIGe+S!bDRq)<*1^ zFNa3{E51*voh_yFgKNeZZsgHU^5s9g#gf9ulR-EMqRH60B6Gv#-jI$IGo%xlHNAw# zC#T$J=16kJYzQ&T|KuD~^kN^^u-%TV_{O$8z^r}vTKBcdhN?+^BgQVDwK**T%B;LL z%{|!~=la&DN{{h-Q!{)H&2NH1Ae%aH`8?ha_*fn7EAR~FvtBI5BV6}0_(H0e%z6%KQ~Ptx^~vW3UBSOL7A-Ou7bBJywk$@nA~Y?M~x z(qQe2ikfKSX4i)*t~_l1s+rmhQtchO%i7(an+OQVX|vYY5D!J2=8rYdREWfTGSC(k zwH{i_+_YKbiqj|PlBdy)5xi+vb)8ep_k4D|r6re1bcDW{keum_1=7c+o&4$?*Ci%@ zN`?qxQp)FI3vORWd>n=cT^qTP*mtJE1(cE1E!3}fH#@6~doQp7L7M9=kyeVA+u}cG zZkT5~+l;uzl%~wwI4`j(2P9Bp@Z-rp)Q8U*0m$P?PCLO#lNr-L_O2Det7EwA7qQC; zS_Vx@*IQq%nE=Rs>%7OM`Mz=4bj`F<*y@+{*SW^E*Lg*)zvVd$?+r4W?TRz;&kE)} zlU3Hc0d0*HGT!)tzAC_SztC&V58#+O)h9Qt5Rt1k+OJJtxAlW2O0)R;G3>>KcL9)_ZGB9^rGxxNdAQb-wC*l5tHPR^G3);3^ACe-;-3%4TAh6*8SS z+0^GasXOXT;WpFOKOQjFA$nWN0rg(3h+ zdgYGPeI1;iUfJ6FCji^|M;@;40wF!Jof_Il6hlDqt_^QNO%*Y&)0bn4yl{?iL~d?( zYs}5suC2KM#8`T~H_ARjZmn^ovI1ql_mRnXr)=-Yi~hGmD)XkWTQ1Mtg>5@BavP5G zbvwAjI(TfpTwr@JqOvTCPdLlE3k;k%ClcE`Es*Y7#F}CYpJBPSTAxtVXj2sKs)(+d zAVKvv(6YM~Ri$j0Ouy;YI5cwv?L;@Va`z&j8mOdM`!2l~p}(U_WPz0n=5y03b+Wd4 z_Q;Cg+BR2pOFNaWciVVNpaU?~h6!y}`p-nOXaW=}mAw^rjn6=dRWKjuXW3EnaK>Jx zL}z{Xi1#oAzr}Di#~uxb^M0wSLieG2YJ)D}+fJltt?;^s#^34t(`EvDFgnNlaxl_UPgth(+t2ep;PDwTZ*UkGFVX~3j*4^Dw zPJ1dw#MPDnWHzrtL=qg0&&4;vmqjjiNor7-cPrEsTr9lN=fQt* zG5@vEc<%Zc)9uWX6XRjZ$aluG!vx4aJX+#PX^WWWnruvv-grYuQWguA(a7^Fdf1B< zXFYIDVrM&p1`l{FU(uEC%;H3nEoN6;)R%9W^ue$Kq$y|^*S3e_ zmX=qs9l17Us%;4-miLqo7uUX_iWtx+9QJKnB|ycL2&Bnz@ji9=o)axJ%eo1C07*`k z-2I(%*qb{b3dyVk?Po#jdPnei6c-XY4U|Fw0Jymv9rgZ^0aO&9c6aedD7&UoyK~2f z1yC@L$g0?crMAY4nVAR=O=duN#d>#xE(&!SoA%rncEybzC!g<{ziF{6Blo}=;nUXW zzqj$Vn^Jl9<5>o)_PhOswvT2wb;}hdINa_8=^S1Z9JG(i&#Q|fV0KXYL8f>v>!Wvu zK|f?x=J;6y7#GfFv$spcZR*`s<~RYwEPwO)XpWIh+Dqb=vCfC9iU8Z{csx`++%^sq zGh*S572lw~4IQ=k_Rul}Q7HQ1%%+w7bgqBdpU--z4yPU`G|mh- zC39Jhtq+k}#Z;Ncl_&>usg!xB3wdt_s(G%T2AXBeQk8cy%gZS1Yi(UaYOq8|ymta` zCjebtqnPR9tGRPOD5Tp8T{h9~OK9aaE0r**Q-s=7D#Y-k`3Gr?%NPBEjjP_Bw)JgH zpG0UNUbElO=H0f0hK7ZW3O7D?&ee>o(e?BOhW~F45fe z#^UJ6On%gT$}o%_oUzOW7xI#Gx3MF$vrBp_MW4_ugN>h=K5w7Zmu66qt7G8qPLr2K zsfIAyL@ax-4{h>3wwEXY5azL{VVC!F4MY91Vw~Z-GXn1c(Z@}VfQew&@QxI@#um0w z#T%Y83azDc`8`If8QjhMCTBA_PW>C0D<*6EOvy5J=m~5Eefw>@9IBJD?1wbd0GfRlDZ<`#qK4FhpU|Bb zLAUP7JGVVbUf9jiQ+l%UP0{Xi0C}{8vJXNL={ExNPv3KS_fuH5YOJ}NN0gAg%Z$qq*k%(({`H?4%D{SDr#lKs&( z0|j~o3{1+tQ7RY;BZ?_7!@(i8xxGGnHOd{Ss+w!99`2^36P6L4nSm&?ws`&m{^rGt zj%m#%i_8j#a!Af#YFk^pKq)@Z$bK80?xB> z`0t5c;hzz7R?PDYRE`&Bi?9-KG6`}~Umvd8hN&vN92JbLU^CK>e_UsI15VPd6eFg5 z^3pByT*bY7n=-OD-Nz|rdP3)wop1OSs9OoTm^@o`g@Jowi78A-3q%DMS+dwm$1zGG;JXe1~qJs7Ni{DIudQ`d(RQ zXIpWuSs9BKu?5^;On-L9${JgSYn0Y7UEmzvWrW6wPFsJm>D4sMGo8~faEqU^QV$@B zM*hV+u?Biayig^a&;Li!`TvM@PK5&3|IUwPQAdKd5{5>8jk;#~%#6J|lN&%>)&>^U zjL;l9*KW4IP&hR0=S*9oSU{}D&sgM9!Qyb8O_nfo*sfAs`gnb9G@~v>UAF_a+r*@Y zox22uo0U~$zqa}9yM!XYNW(g?P%0>G4eb;_q_hO0YLl&1gA{j+QmTYiB-belZSQhb z!9KgA0#pGbpTuYyu+9othZpr^ zhGCJr^8#=Gc#FkB012O`0|s)xaA2NBH(=wvTj1*tPI?&q+R- z6lIn_7gf7#`pVR9Uc&8pCKN|(MI+ca_9RK6pfD~z(hk~d`O=P%91wVtX|;+g+8v{H zIZ0^y2gK+;>>oB>t<0?HR{5f+?8@%d1=Cs*MKO4+pJwxh$mR_Y8VoS; zxFyJacJD!!TS*`lPL&`|)qQtbsDe1AKIp;~SB>)SOdoA};fIP&rKZgBH_d#*nn&%m z4_9jofw?{QEBlV(Zi}XZhAlR@(sZR@G~G>ylHvmr8|k33Q{7Z|0}$90v8*_w;jU1j zCqKB+ZcuQyb4kWEGnSsJ`HYB{^n1ij<<2Emciy>QdCdxKmoL5ebJYC*8Ls{o=|_ZF z)PJS8>s<1jRbiajB_^G99p4uJ#`Gj)lFGjXuRVyq_riZsIRP9=y2^x2F!Khn| zNEMJ<)hyE@=EY7R0OI?kBkwXl!1-3Yz_}aEQ8qiQeWk=gJOwm2 zx`T_ZupuJ*3KMIeA2}LJ=F4@2EJr$MCN{5|v_JQkLg+?^Rc6K3o_ymP)cMG6_IqZA zFJ4n$U;DGDJ+cd~jxxT0o(_o2eD~z6Y9U;FO)wiW+kcNZ#}58Le+Hd?xo9{#^s=_! zXa?HqI*6e1GX8u&3HdUGcde#8I);w0juhkeIZZ|Cuuvs=OzWTt==hG+bGcYqu3!NZ zx5(6umO#=Bry6O7@%O?*@yVd&b;-JoE77Ls)3Cz%iq+5NsgrD-X3fAd41C7GLM1ll zD7dZAlyTxz-FV42~yYTEDp2xoT+S!K8&SvHzM%6kbqjb=v z2#dT)IS>PI9sAnj0W^Tlp)9)Ia@S#T_hMNqn;!M~iY)^k2Ycx90Bo>d{c^+MpS#($ zD?G^{IIW((@y$^F#S0x2r0L&;r&CI^#^< zU+wI-UG{P`38A;fZG$56G9kx_cl`3yas)LwnI)!DswV61J$;-;C5R#sIGENZ|s1 zOoh(p2V4(Xf@m)5dE%p#k?TB}7pUwi?$y1ey#hj^`<4CWwYWn5VyEgrw!~m<7wpEJ z2~J&(1fSiMOW8!W0q24ublAhAB7^zDJ2dQbBF{qO!a-swYKQb>^kn?*E#g$?QW7uA z5wn@Z>Qq(Ma&sQx1Tj}75YyCfc!IQqL+yAS+#+wJ#)30P%ikU{+dok`4G zEWLL@cqfBlEt~z2L5|%ER4d)nM=#HNlQcoaH(Kzzq}zI_X@_*nE~7~FkV+KGYP{QA zdL?o$xIT8^(BB*t79En)7O=5?iQIXV!QtsxV7)t`Ltw`la74#-Bw8?qd212J&kERD z6#5UkXa^5%o|QAqOsfc8-1q{4r2eeq0SeF!U#t3YYx*`>MX)hv1erbl2GP1Q^hTth|aXOh6U<6 z*k+a-`Tun$|FHT1!UPbomwZu=h`V1Vo}kLlJsTVWvW5B2oJgBDmcPrsgiA#-`IEnE zmX9jTJ>^O$-;fUAroa>!izNRPm$#N@&gxA)+u*<8R6A3^18qMO7PtBgWR(bIpGa;m z)RDA!Fp-89AN@u=RsD=zd394J=C*CdgI0JGD4-Z;MM!znelD<;LTNX)dU^foH!1=| z5;=aM&e~S&cNTgQ3QN=j0C$MG`~>W2x3a{ZT7UN~#F;rx$?aSaU;Y(M{Jxy8q9zKgtTU$H<&-zJfa={)7rv1sNUuc ztNE&DK7_wn@%kL*1;{0HJ+K~UFVSe7m@nYJmG9BrP>p zr{=GJ)FvDH5-mkhm2cGVdgl%{eanPdbe2tRp5uNvz+t+#OH6HyR>X}-ePzA41>XKrCof~_9L5sv9Sf+*pWHXnm;rqBfmg=b6ICn&1eS( zKFb=hyA^)|GXR^9sE~B#lc>i7eQ?}Gj4^ML`?%02q4(l{T(}`zs%f@&AcEh=GbdkF zB%PkReKZAdRfmcw+rB=Y>pghGER&aE$L333yB+Fk*XMQHt^)m!Ohx2`Vx1{0D$TGIkyV<)TjO z6KSPSsw2 zbLbGCfb3lIB5RxHtnC>erC6Y{b1MO0l-7jylP842$+iT z9(cI?%mKtllCcJ$X_D%s zA?Pj+aQD3Kd^?D==jKU*c>E`n(#-5w58Xk2ghz=WLLoUqqlJg$4`9X*9lmVD%T06; z7-xe$NTKq2ojuUUp8~4y%9EUZAS^lQLFA9|-D!g6KIRKWr3xJ=R)<&gOcIkNvB zXWtnP=i0TsBS;V>L_{ZuM2+4{swg4RJJGwSql-vY6T)DWBzo_CjHsiH-rIy>bfdSy ze7C*#^S;OX9LM)O>_3kAw_L&=GK2ZrFX8%d(c%pdy&4Q znw9c5Slzt*Pqx*hT+l`cLKYu_L2{Z}{1dzQZYE@Zv7+YR+h@J@4{8Vo%W3*gzMJKH zgOwce+Uz_t#Vx{rW>Bx?@Uuq!Dw{@G!M$G`9)Cw_k!P1H)_Ke5_N__IcDOx>VVLg!rW|HXz?R4k*bs-IT= z!O~eMFnDwhd)o*W3`!lF@-|}+*xC9D5}5AMW%fQ z!`~jwNyA?_`UKL{*f`f3PVc(fOhlAW`b^Yu>SknVhW$TqK9t75X7JA*Wmwpvvs<4i z4K*Yqi_M>aRaES2+e*}F9Ucxv@hcv=F9DY?2)u)!jzjxcFT7)ao!q!eE^1A|k9+g0 z2rKcmS=dvO`<=8$SpGP*(iyUWoopR>1B0~@^ z9VUy#cw2o{UjY`3kXFdWE^%*aK56)l#aM<0`55T;zW5rp81FF6DyAc&XN&|4bhyDW zJDI&C3}pC^`Pfqx@R6Efgg63<_EMw5ho9}Z?^B~ZO^(&?oeOBE6q@$AkDK6?H2P8 zvxx1A&)dM#qbqt~qg+>T``}Kt?jSAFm-cbv0|tKhkSPhDn)vPGDFO(a0cT7HI7S*6 zb$u8Lw%9C*>7FXS(2=t*p9rps!kgg!O2G(og*FxLK?aDa8(M|d@)-`q;hZcj$ZJ!; zV^9Z7sWTtv=?X_#zfLSoF5-3hr1#JI|Q;@VS6Ec8==32R+uYbO>dELQtVezx-ON2y;X=$y8 z;H$wG-Wgs38X~Y^14Qa_!uExH-e(p0U_|_}U(25e=WE@Bzi*N+ktk>R+aJN${U~(~ z6X9)gGT7}xt?V@~nzsltV$b#9UrjgK%Dst1Cn$?2p5Aou9HDBk0zEXm2wg#$2}CE< zOzpICQMF&SV1K3pr(&#dcKlfKNj-2?b`oRh&Y)>&N@1i$dt!UeZ(qX#qk6PgO6^*j zV$##&Qfq4dl^(!b&36nf%77|m%hetB9P~IWmo;4vHm+BDZ=QVX$gV`Kv>xr(j zgSORPoBfO<<~|P>q9ru$w8!-+bf?s_TX!daHK=rgmqC1Cay3ZKf?!1Prb@iv`vYqP zS9^5km-xU<6WP+AELMuBv@ufgn-rG&GrU4z7mmeqDn&yl(nsnSn#z{xc z#rCeS0$x**h$QqBQ{m<9t%qa@%|d*R&YbU`?|ez7P$Z#5$9JKC@|fa20Hm0?L=|3F zXEu!zLfowO!Jo}PC=c_*BMwFiOu*eT*!BV$EW`F@nyK5F1Mttixz$g?p`xp4@~|ly zD3N_l=ho?*wamRusv&E9zrM_ASXW8<R?AmDy!3V5+q9sUT z#S4}q2%pQ*i76%D-HD?PgWV%dsQ8)+S9=CQho?VC(x;@PyKRZgTG~=kfvL5ObShz- zlGttg+4=lOSyzJ|DRcm{#vVU{Lx%apUhT9GytSC=r$(PvZS3rBeg)}^MUQkqmk$Ul zosqG?cxdfbwPZdIJI$}-L%Gjhzb?`QrqWs_ualKMtSP9Ttag=j8p2394oe>%K`<6g z9WChNs`dek&*e@@1}eka?u=`v=KYl?VYO=mEg`xqKR*MfX-0@Ya*9=&kQ05K^4;sa zYoE_7<2jigTEIB|th^&;?)W{4dfoJddZDL>t!90MSLSn6m2(g5WNYRd%L+25F2Ze3 z)uI8NHQZi8@F0p;`6k%Is<608AwqwprhJOm;36?M=FW%Fh()IB7FQgkI`i*+j+fON8I|ArD5Oi{x%if zt_xALRPaxgAbs%mUPBDGPI`-!(OhAD{X+J68le5{U1+dvM(J8V8~sG1|*S%573e#O>4IaA?7s|KUcC) z@G|EG35D{)1`L^_zWB7rY){&$;FBI$@LVRN6r<73>T!f-zG0u{UF7K+xq9|P3Ao5Y zQtn8BVrfOv{Oflqh_<(gm-*4a1X#%cqQ+|HDLL1jszIkg=~>%|3o(L}U*8Zyg!E`t z|3o*BcZ~OUwSE$d()e!?W!q~_4ZT=jJq%@MXCD;kA=W;qsMI*{!~`K#%F(}nb4U2P z!!_O_o+EQOc@vX(OdtQymOy!WU5OB{yuB{r}Ra_M$b)oaHgl)6=4yj^F%#h3g> z%|{rA;NL81tYdpF?g#Il>M!W5y|YumI{&0JGoH8xyNBZgpq+=NZv@vU!vH4r!C=_1 z_I_E3_~$fPKtPOL^E6=&ag4eMz8=Y0=1iNxnRy(E-!%4@=hy(kYy*Lc+}g{JkB;ZY1fP+rR6m2lYz_4wFS{iy!4kTjy8Th~$3K z4yk~+0ka6~xoaYdmnM?UH4XBk@?tVQ$c}`B9p=Qxs(mAOjf6Tnd%$11*9;)#>2l1D zk{%#;^DB@wD!MF_nq{iPC9nxcEp-|H?pA0-(Kb?B%kpGw(Al&y4j)pgJG88Ej8$e* zlRk~h{#LMJ2su5rn|Hs^_!jwD>ig7?699;1nuzC^ZUbKDPFb#&y0zX9qx{yq=Z{jh zGt?+ekQ;wk+APZT|L|q*`@^~!eXjv6F(CU5?+`DKVjroFSsUgnoUyL6PGL%V5*%v) zJJydaG`21Pt&M8iX?r)Vk~0ZB9d@3a+cJz%nxCI4VC<>}n&c#p4V^+>e}6Dp!3&8i z1*VBc^H=z0Cz~e>fT)xdj=anO&SA{%`WfTOr(Fbu^FviGIUoRPbgbAZEWuz zVVf8M;VhQ?s90p&7u9UXe$*P)o|;@8lyantx6;b%hHzFAae@nUjhrT{W)^cCL+@;9 z*Xg523vS4`n>yb!+T!T*7PUK!umNoGB>}{I+rO-bGvzLi24|Sy6ge>4SJY|TjNG!K zk4O|gC}i?0b+`xqcmVf9j$Bze*Q`?erl*ts8q{yBAFZkK7SY}FJmDt$HVwbq`Y`ct znpkqVTTXbpJ`ED13=`$)Csl4xFsvNcHr~lE%?-36+y@_#((yqmoZPDOW2tij=jRvl zi=fXUWijo$UntzBBu4Y)9&~xQ7ZSE{u1(o|^(^A9Vmp{*Cn{d{(Ol zeL}eq6(ko44=lwbK*slxpFyh#81Jg5{eB?N>ai3Q>WsTY+g}f+zU$YJIMUHjisp@4 z!hJ|)Q|$p<2LQ6az=`LgNoe?$P_b+CU1M(7P39v|-y>T)Q3Tr%rJ}XkT^On+x#)5= z(hryFzDCW|AGXtKlic-1w$87{uDIIw>~`;gBQ?M_4kwrcqWCa-Vhc$)pBmlk+&f1w zi6PL&>oI7H$lYbX!aCfn8wb!U9)vFPfIDfoO#1I}z=tK?Mk=i==>&b2FH++*6FxuI9%8lK8L^%<~w2qyKUnq=-N=O}|lPY1PeWllV3aONz8 zSpsp_3VB#KryYab?W73-0-Oe~cnvj#raQsx;T)a5^Ke158#v;`Ge1GGNm8V%*sW;H z<B9edt;D zQsGu9)R&&nteh@!tleP}A9Nvau)0DZbs=o-be0N=1$!M{!PobYv~^*n&?>5a2!Q2U z1x|(Ckvi?TEs|V2JIGDvP(tnUHQXA=7Fk|tShr_FZL%QYtyDBdHn==l-m%ikU(vDW zTn`5DiNcZ7(W#05O{R{Mwa8#W?<0qW5zp2AaxNL`?r$%6~~;H%08e;yTM-^i^$A%^zdx!y-n^}%~>Z-Bo(feT3&26Ea1 z9W0~-&c$qLF@jgOeQrx4;Q_ryEMk%X#P%6)7>86kDK--i??K(MGmEdbb~K)(GCB;0 z%s@s8zN9Hex52r!J4ZijB-OgDg8i1fX$7)UBwz+b5;gY9QmzY&w}5j?aXXBgdRTG) zBli-gF+6y^2we{DIFxs5JHp6yG-r#^Wo@))?9|6F(?uB^@_SFFhEdM-(>Jf9dm4yp ztKf_XWgvaKv3eokUUmIy;pnPz(O9@f7@q>Q!!XP8o9U^vE^dCvsS^^$TbWms!T4h6 zzT5sZrai)_HkIMb;OOWWCA{8HyYry8_$r;IVogAp*xeMTD1=D>hkmubLBsK(#oizh z-+qmhSE|)z3DD`=+6fh}Y9qkYHJ0{#B7B@`2RJr`$Q1TYo!Sqf<&k-MC2?HkHOx~0 z>gyWnsP1#jmDS^CYgL7!_JByNR2w@p^O96y#Je^fn4^Y{z^?w`)@DUM5DW0=O_DCP8cZlV?$NFN(kdo#v8}GQ+I$WoSjV zE>%tDt59t{Pmsbi&F9=7>T$-*@Op_o|D#a66vt_^ z90BH|?IQnihc(mEf*<`cIR-eZDbH#rW_O!`hiB-3tlVrbCU>Sj@j=npO|yTSA^Cs# zzkk=$Z&en6f(L96^g^JTNvlW?+xum%S)eKZMV|dH?MK~iG`umk58P7o5_5Xs_C;gF zhUB!p${z)W-c|aaq|Vfh&q_bh6UdI$QHc;$)RelcvM)et6zk z1zpyVX?`u5!2xD)U*=<=s9pt2oBINTL&Vy6f5fE3v@&vi+93C<&Rre-j@W=FTUVs* zhJ#MRKEz&$Jdi&Niz!iygb)vz<(k2#MPm){3pF{*UjwLohRb=2(>|4^rF$=7hO|oq zsnGzLHrO{meNwLs2Qb(2P;P|V*R^KrqH5i*^6}E1?`S<6;GLGU-!{0)zak#>X!+IU z?W~No7)Im}7oOyS!QeSWv~oubABPsV4n@kf8P)}kPMyvwMU5xiSDp21C*!&j>PPXR zYt|UciHgICE$x%>j8KkFZN=W>C$5LCMXAasgeLx{pCwd3nY!g>gwL;i@Oa!*ZOJu6 zk^h!{VDH(wp`i8n$Ee%DxzSWQJE#7P=t-#efQu?<9UwkneN+3z7!R7Z@JLg)T`uZ$ zUpT1i#a@KfK-O6k9k8hX+=sM=`>GELTlzi}P@ zTrzITca+-y29TL^=fry*T4OaQaGHzSqLLW}U z%9aM3G@U)K*{!*+^**)J&Ua9KA~u>Q#-AwCLOi)3D(8!s@DS}$IL5}I+`HWueCE@g|ZDK7t6SM-R%14NEJjK91Ak5tu zZm84VmB?FcD`HNr0nM6jOrk}RbKNnIx&gumfPJ3TOk+Wev19;0sI>y)1*j;u$qd$+ zSD0O(bKh$?Q5mo)fS0w{jjFbi&M%p)F(=dcUA_12n3dEHH8X{*XO_UW~%% z@_5ZI)7^=B?1R>8%n}D0z?~_e>)cSYuD*Grnad1`C z{)uHi;W2;+5Vw#g8>M*%7D=nqrD}M~R5SM4qaAoM zuTP4J%bgOt2weXosF_2)1?_c^Om{6=*s2O20R$d2&!>Uw!lUD5Fa2#t=8b?^HK4@3 z{3P4ouE%XZX4jI2cTRRIk)5%Q6P7?C?Rw(#Nm{S!%Qr9dy=?VtXm!CWG{7)_CX8E) zZO(aivI(V)YLbHxJyA=bV>fZaX-6>1v7YLcKI_Z~XL!|5e#dX_s)v2A_fa!ByT(&F zs;J~jo9t{n|E)nQokOZGesp_eu-(x23FKTB8 zj?`$lXT3yO-y$S+<UzCz4i%`J+%*gwGgo=s!(0se@Qgj?;Ol&@o5P{P= ze8x3AZZ~QOAEHJO(0W~FCW{nl|JX&b!PvjJDxBUefWqW4sq0PzopRg9 z@?9)ucq^I*h zMxCLu#;@MD)K@m5S+^z!TGD{230CWSrQHoc&dIyhnS`p_aqJP9t(aZ0OgHJ_Lzij8 zk_Fu&R>vw$fC>^_24=W8Y1e>HHlwEc!)KAZg$N)Cgx9pj78sfnq=5A56Tj#w0r#M> z4d$(Ux1CV^S^vzm65#ec_(XEveH4)pBQk zN4AE48?qN`i~FpBDzOJhVEgQ?r{1|8E@#E2`VOdz#Iw^kf6&U=6&)NMOqNg3i#a_* zHZ&P=5l@hRyQig22%XT1;#6}=5qxEtALUSK;<+9L!jiiXJ-FFM!@?ZS%xbHiS?pYE zjz#RtDfIwz6wn9=$S~CFw%WR325~u|D4RW<=Ww;1a+YZ+5ZHGxLs}hRUdFL+T@s{s zKr7qH#|PVFZizr7j+gOx3&i~id=PNzgjN6EgcE807tdO4MAyu$CAa1A+>(_XP#iJ- z1b7VVq+Fks;)+WEPizgTY&9s-gSRE~=DM#8swVqpRAv2`a%T)bc^#rz^tN_4pT$*$ zOVpmZNB_YI`3F+U{A-MQ16#h-(FU0mXa{z3w#N`Hb)p6cZzjx;#QEIbPer4Hu@AHThX==_X z`y#jHcYTz8yCG%Ey;B?%44jw6J))#^RG0dylWzD5_7clZ(;gfHLHh8u(ib+bm7Oee zF&1a0rT!n`H@I|rHK~S40ODLUMn~?%ue7To=VaNX`KFE5d#AA-Ua(ztsa951dLhI-hX@Vno zT&@lhT9I@7icr8pL@c3mBwmA>?M{;=!HZ&c6F)ygwF+Ym0VW<8k|fpImoyr{?C(r^ z6dRadX7P+!cK`H|LfZnQ{@b^E8f~$=+|OTl0;J;4AEu5jOWCZu3}dCrB^HYF7&ZmQ zOU|_&h1w(m`AUvLPVP7^b1g=vp}b-n0_$4a*M7@g)g`+tYg=Q>k%%P$?Kk?l)PTdc z?+n;;ity1>U3!J*pnkjjdz;m_NpmL|OSL~>xP zPg49E6J=fJ;sz`5PA=4<5!B30!Q_x(=9U=vigbQ=ZLEcu`N~*np;6uAWV8ZL&no|&(`I|R8DRlYNa9YbvWi#3DV^Fw!%f=1UR+M^bEFF!e&Lxj z2wb^QjJjh%pJaW8Uh{_C#(`_1+4*%YH9}3QQ>5ltyFqK?fIMq4sy5Pm{Kd~L^klSDp zh;wjVTh@|zm0w#_)P{TSN+zlU)P3A7hr@Oq=vJ(cfGO#&r;RObA3jC9>DS5VZKa(> zPMa97i!!U<&D^8|3`oVq91Yc653r%y#KXTW91;l0M0!C$FGakB^$&;XBQ1xT8Cra4 z^{@e1LEJI_f3W9p2YezOeYeFdn>DO&Q2 zHMG8_xx^)kB;e%ZOIKdtt#-8l0~=py)eK?ACG+vahmPyH<1Vi^Vyfb9`m_9giN`om z@dco<<0b4ZS7fkcZO(xV-)`JfZ_{{kIow4o5 z)9sTe>pyP_Ip-^W&rtnYE#2HAzk1F0z&J$=XhBt@yvk$v3^*#v<-I)@o2Mt#*`f@d z-w(z`BXQ#Fa>qNNfX~=W8`=C+9Sh_=H-O4Ywd-zzN{UDi8@h}ALe&~+jW?#H)&XKx zXBVpcI}P!lNs2e}KPg#86SHJF`7b(A26)pOYhoLq-R&>F@Pjv+`~E*^*P+;Ozobz- zHuBN5G&ArS`a*3Hf35BfCrO-XDUf)noqp*0CmqC*G(hjxX37_ubG3K=-+l3Gn*iWU z!6Arb9EbOXg6ZGSk=A9A9!-1zJ-&?BBz+(g2MknhPL=QYmfgbdroFk>&2mpY;|4!} zp%+7V!Szq^^18Zl$5eD&zyQpsZEICUo5kHYk6%+4Qat&)vo^1n#fL!$w^Z$iv!@#Wjx>{N+6e1H}2;#9vxDq$iW_*9iq?zWioRM-xa>V;f^bR3?hL%{udlG2C2 zVI($L5`i;6ql6XJg}`psYVGgcGC73VA2&8SA@>gjF7;6N;x=t2wpaFDwkVB_71=Fw zG-n|Kp%z`=hIYI=iA8Y5&YUx7@#`5>7l11)ukf#ma)t?|vix?zlW?6EdC+&CC_42c zd3!p^g7i9L+(z&{ijfAZh03nqke9A#A`1SMNW#rX#>0U_LF~mOTJ#3pDR_7o0uGcn z9lb2AmH~7#6W#qHsAbo#bN>Yt3Y}}434WJA)qDEt7ME-=VmWgLZRowF7t_)-_d95c zlFsMkmFYa=MGQZZ1|U)3fe(%U2&$sl=sElZNK30bNrrPC2 zZcsT4e1TpWa_YbPKYSPW&Bw1chsvMAL!NbLX3zb0uH~3JD)*udR%JF0dUsVcQz@xc zlaZX1=&Kyrs-IV}x7!62^eY&@kz16?Veh+ItLF7jfkBN=ls*AKe0AzD_1*ia2%t(d z^Mmr)Cjg!6P~J0~qqRTzpto}H@nZ)Ve38Nb(pvJ{mO#r<_ULLh^A~S63T8Z_>(b$o z-UFG|SDUT;(^XN9@7JCb7}Zg(0asXw_pK+=Uqlo5vxf3+CbTDhbGSeL_ydRgm>l=> zU$=>_PFg4492&dw+xL>Kx+l2i>fN7Ncl&Oowq||i5U}Iv8x`?1|JgcS+e+zM_?aUi ztFX>~l_1>aw&(Th_>Y(B!2T(;Thi+#@24!e4V(CDg%%Pe7P>@A4|n#vxtFEBi!>u1Uyk|0T&HayGzHJP{>c2O?4H)RhQ~;De;VO+7Wp@S zT*3qX?LYsJPawMg_?2_i(7N@`o!&Wo{D+W9ZhLk5g6{nolU@m*9b=g-L0nX- z__lLbTcKIU9Fej=dO5Fju>IP9e)#`>K#(NOvtR@PBLQT^a2yXm;maF&8!8{Y=dW|M zPGFKA>r7KROG)01EkDp0=5lcuy^!FY2;4e0!?io7MpdHH?)Lds^G6UN1@NBt1aA?f zrp}6Z;o9;nWqq)Oc{6jZLs-qt-7r2QUyEc=`wO?&@8YeIj8rHXeUjtd?OLvtZ`F#I z&mpis*CZS#Pv^kbM&saABxd>wFZ{)KKD-~i1{GmIgP2HTW_kM2&8%Z=M<|nwPWW3@ zBcX|$yMu{SE-os`;(V=POkW-E?$&O3D3N+RIG2)ucZSQekc#GxPF|gU$E&Gl3T6zX z<}SCnp5iYcEPLqiEC!RCALNhS`+RzG^aNv^+q^nfib}>ZhX7+#!3i*<+8A?roQK(3 zfc;~LsH0i0FD`t48ppyo(2M0S?<3~309|(u6(1-3;JKR z|7Us98UDE@LpjW=a9A#Ux^_){zNG-RIUFZpuN?y~ynETicfI&o4(=3%V~U`6Iy9)0 z-TQH6o#vzEsm|8HJfCkX(0}9l{K-^p>vJ6(M-6Bt)*f50z!aJ5TqRQH)ONBUTJ|k^ z{OUWW;;}m}n}^ms|Fc~AU=qNV8sA2I)foiI4|SM}9>hY{2`eU#IXA&1-342h zSD_1+1OghHxgt$o5{hcx_ev!d`?OM6C2pyj(;jfJX)Rzj*H*QBWMDQ8^_Dm_D5$$V z%-kzimn>*$cZ{vWHA9@qL%_z~GLH6Hz~Xe@M+5i3-(fwipMI@XW;Y~A$|rX^Sh_j= zn>^JQ%vX$@u2pyt5Kt2;g{hNsszqV6M!&LlM*k`aFwrW{D)h*yVMhHo2l21feRmCF z-yc$JL3ez$%dNC1@*XXk5%69fRLYbEdaISS@Cw{FhzG}BH2eA*#T2 z-a`!jC{iZgpjdf9)de?I`D8-Wv0wgR9m_~bbw{;hS4!`cAa)mtqUt{Rxmcn_h zD261h0o~|F4dJ%VNt%voK!?07E-&rhl2Lv{5RCKNvn@tw8?=_$4w!$O+P_Dot2(H& zHpRVOzcRc-VCXxr=7M_^8S70;T8&rW-#hxen-XYUgZi!~V-Q;48r#4>Ju)Gd1b zDtn0RWD4do`T(2hm?H}QsmIC}JCDsFJNw=w8CvD@elPXe_r8sIi=d_&GFEfE0*PM! z<*$Q@)~;hb-EC8+cO1^sbLZmulqR{p6E9>wS+ywuT8FBe$9jJ4LRyy(nQeV*pVuCA z9LllWSs&xhP)&*Lt}i=`9X5%($2eWCyl_jxPNgeBCbw~Bo*>wu!hzObdyNqpMoT4R zrS0eZK3e++s-L7gB*hV?=O`GchXvh>DuOt0GkAsc2#Zd)z5vO zJMmbWl#ul3h*XTuXgwf8b&{&{dz&~K@hh{q=kf;9(WHQ^YGQ=CYwsF+`%Ght}=808onH-t+<90fk zm=dttiF0sU2elvW%WixU-yTGnM~91s#Y|K@hqLb+y3Utxo?c;o@^ZgmGgFR%$>4N{ z^DF-AX!FKq#m#1+9{3t)I@(xgS7Bv1e>M)u%mi_+aG|qPUFYWGi@>x-L|$R`m)6)k zDBh`=^Wz9^;IBV=%HTFPQ@k#DVwN7GtpvZ*fPyD`Niz8yXGc{&!IfiM97jxIDw19w zNeC>~;bNE7_yUTiYg`6~0u?84?)|izNOIY*1!i9sZDa3wn`ytz(mP2cG<-^Ml@wW- z3Z!GosQtcCzyCJ|4cAE#v1vMTnLP_1we}O_>rP~U#cJfv$y%k(vFAwAZMk~$3W*F= z$jwOEO^4&Pb>B`+IaNXqj4eV$J2F!CP`I*k5+Ne47c0LQ6o10(y%@BVGqF~qqd!vv z`;r93BckZ-{Y4MuQ@>hNfW z%5+jX$nGAbnvbaHdLQl33Ro!&_A+oWUywTXVI==BSNSH6D#1 z38_HJ?=G1>7ZNx3lDX@YA9+wP@1CBqH#b=LK4IkX3tl5KZ*-J zX5RPnNp+EXzvu=scs5O_yVEnXzO>;td)7(t?op;zpfc$JvSof`nj%vARHeYa&SN-Y z!r73)L3VS;4V4d?M(kRui5@igxNM7n)4_0yw+PG$=rwd_r>U>vIb`*VdVTbAZ~f>} zWif`SwnZRNJWtc4J=ET*hL@@BU0F0+EiUjF*;H&%bodPw5rSNWnztJ$C&}#rz8#!f zz&{kLIS<3FPxQ+K94}cfVR=Qq6Vh~2UX{U?QJH@TVN>S6o$Zn>(*o(0c;4$=P`9W;50#7FbY+*%%%k8032pjSN!0}m9T z8y(!6Exj|g8gw=?2XDmZ-uU;g^nAD$4o~*GLm0TDxTBx0z;2I$HqHLp1~( zthy6olK4s*&<;((k+Ro&q;X~$N6s#*u$747&WL)Y+9j2 zM>h|k>VO$lJ<-f#rmtOPou{F~&t&dW$tO$ss?xQcp0HkdnybyrMx=-{%aj>-<0x2> z(STl`ssR81LL-&^#VMi^+2WF!7^6Z2p)yGt=HoEo0*t?`R z)*dV!rYUN;V%ydO+fBD+eUI!B8D&P!thIwI{q?4KrWD7n4b6OyaA-F-(9`YuRb%AcC;ByJ`IV$?bW)tYMof8*u{zdl zR!GfGW0^ZVz=^mY@RShl!n%7S;6y{;u@rMuxTwm6vCmq#IQFeF8ifm!f_GL+bGof4e5i?>HXHSAQqjXV0Yi6`-*z3<2)!T3}@+2+kt9 z3V=}q87Ng5?rHD+dXCzG?VG&%&8hoSF@&$8UvKFxm_*TQxSrOLN#A}NL6@|mthzVh zwl99v_GqAD>tUAMX&*VNrjuU@8{NAdS_n%{*4aaK8x(|=WAm>^XR3U@94UUMRV_|; zY{Sq*^p>G9izIEC(#w6XMP^VMs<8HOuL{$kFtFXq;BdN>K@X>_4N6NNX1$%ToUL}RL8dsdA9J>~sN z{s+eLMTfqjl5)AKMTfxEC5&ov*oRJPW4|}^#!Ag5OfWBV^j!tBEL!GFR-p=AaRNdh z=k{oJX~;VpxSyD)CvyL{p-%f@<=AFr_1@2z?Jk#r=l^Su- z?bw~;RXOXo2!4K<(M0_8=wI%xKsaVg4m=#Xb+9M3*#}Q)>C!9h-`fhX2uAcAucTLP z^mBJ!v+aX=LpK|vZLvoCvJ{omE+C)jxM0Lb#wrtrghLb8M4#^W0@mG=JJ7kvb)(w3 z=?2S@$y-3Y_NQKnLfN6D>80}(S$6no5*gboWnKE`4tAYsp5Wq&g0fZJ(jB1TZsJ+vG*3Q>&wWLX=r+40IQg`Duthx(Cn3vMq zlc8TVW0R#o#@VM1MTg9|{nwKKo>AJhuseh!SAGGho3Y@HdQdD&6K%uYr(Z@6ikl*pxg3EEXH8@AU zPMQ!>2-|7gIgGx7+u7!K?Wvzr1YlzNiK69-)OxjZ`?`LuF#ypbCUCdAB@YkW`Oln% zpTuge>%K?NnpiWPzfNQWa{xo2`1&_jxQ(IgU%>m>g@N}Oi9~StccNlM2qBmI6AiS) zBnB35Go`=%#gW2dK1=X9wlN;k#MN@#SzIg<3UO;yxvuj0Dr2?@%O=z}`QmdT5?swk77#h-| z!}hc_;3GqPUHTvf=8tWzt3<|CBzaG_Y;LVMpF!o02^@x8;y&4UU}2fkzNpkX(>6M$ zK;6hHO43Oe5YihDEF8esU)ML%b&MlR`JiQ8PJtzxPhE!6Y0}mrWp*8o=W|v%!}^Mj ziFrErwHRZL^fM)W1!K?a(7A4&aZ`YdNQTVR02x(|Iz(5k_Jm%ph9~)GL$}uhJ6E9q z^w?bV2R&I(eZSdn@7(;S#~!%#7Ez6`P>t$9L{10llmTfI5I}F%OC84Y&jIChwZ(Q0; zX<(i^O5(xcb#D12E%_k>#Q4Z0x8mYN9euJ7)nxY;M;*b8{T1}iwQTEkxF3eNestkl2;waL-tHFX zFp|q&#`aucZeQc@-2%k-bl+*pY~y1}GjoKL#Dt+!3+hZn9VDWhLE|(jK6go#>#-^O zC^gO$z2%DGe1n$DpsAZiwVsL-D?+1_Kw|8598{mV5&&3=_|~X~89GldeXnwh*ZsbHD_{rR5@-eWR|y=L7|?W32#wQkv3<4h0CA^|BLB7a$$UP!5!F)iiv)6tfB7 ztjE~Bwb@ZNa-ew#_hek`pea%aa^BATl zo23Y`P@}*)|8X&g(T#(XVooq|_b{kPf`}Gpl97YqdNMVT0~4F}QGkkWtQ}Z&RlXd| zw?E>UaIDp;L2?(?N-+7Xw(m$Ez8nZav;(I0+A?n>FlkPh$?IU#YwmNVx%M4-s1vn({4n0E-EwlL&ctxk=fdhuZ&ryz!(>ez80!^5F?EW_m&%s1o5D*Fa> z#kf9{R*2?G3N~+C>ePFIcgbHd>!cFPOsUxGrWXN-7(GAVqL-OWA$9BunG{9$q%bQE zZKUx_0J8;{KQg=qGXUQldTVWRFO-_0nUC%URQgvB!H1SQM_m<1%%37e_4+KCg%oe4p zej=jcjG@y&HlQsw9V^BKyJgmzb#2pQTz&Nbf~d@9S{ih6maPwTAG)2vfsqQH2K4F5 zPAGi=RG9SY!W|x$E%XGY87())?wE}+b)!6xS=gaM=}b5onJJ{JEg&E?oJrmR?9`d1 z3hE~ZdC`$Q#;vNpt0w3Zv}CTAI(<;@oD9=(t-m=^#xpdH&a>V8cm0(X5wS_SRG|l+ z@DR?TGEA;Q&(P2B8Y9=bW-0ty9<~U0&sN^bkm}60afyy} zl03sJ1)I1#)eH-8CNp8`a1UZeWR^x#&GwBv!#+pfffokWn!Mk)Y)Y#iv&G(R>*)zs z#%prEb_|+4uBJ8;edymqi(%--*VE7Q+tXx?4 zGN=A$D9m3woa%Jv<6`}=e)WdEy5j*v0NLWk54{08K?Mm8eKZL`e-)1N>+vUVeO`&w zTOV|WZdlNLfdS>~ zN;ML9?evt<$bFi&csCZJpuXvYYl`F&vEDP=9rA(8t?L@fNDx`n@1YIG3MMffdnwTR zCh}=H&uJHI8)lRhq_%Y$RlQz?W!@b6^tF0#F6}JKh+QcNvslYa@nz+!IZjYN5vk5X z$lJil7|rqn{8R10d;)IhE_a+sB!gD8W3{!!^{kHlq4F$i_oZ>R?PlutkWlmEDglA@ zi9YHaP0J)==ize~6L$aQCqM@^#zo%SR(;hP3o2FBkGkT8#9AZhnM)VCvR5qar2|W? z5I+_6o%(8?Lk`dof!&f30E9ioB-fb@=f!UtWkl^Zz7Ms7;>KYD-hoDvp!Ro=33wQ7 z{7ERyB9qf1buOf~iis=P`ulBP29c zyfy5ID?=rrXi%ZBTT(fx>yPsqe3si8>0MpKvK4YfcDG=Ah)4d7d^82cRY%Ig>~^P; z8l(=E?641TFaryWB4#!!DWd60J;qZWIl|cw{%h^+Z`|<08c%UOSL1;~16qUMBj_fc$?FOQ1*yaGI}Rrf&J2 z318%?ZUP_P$X`h0PzQq*0^o({%f#(-@;?dh&MICM4o;N(op*=^!9e}aU$HQ5WpHF) z%VZ@l&bd#6>U@;m_dSBBNu7m8JZgx2%5WHdLcy<^6=#Ms!l(TT=Bdl(&kQV!=k#5I zWwzIe;1Ql-BOE*)Qz^d9NN)FyO!UeBtnR=3NGFK$)60BF4N=amx?S{^B(Hk!s4vH3 z<-oN~LBM)1kJ_*aHBuq(j}I}Zw57Mt6JbGj$5TC$!+oz{Zty#|@~+^$MR>fiQ^I?0 z^gV0At}olIGEV%JgP)YoV~s!ttg{8)?sbPfQn3=SlhipMn)UWf){D2!68{@Iub`~> zSM&bB%hxaeEHcbQ0SHQ9d4=D6dG0g!FAd-3Bg(85Q@4NznTyrSbT17J8B79BUv?!) z)xww%lIMIQOVKsFP;OD0tD^kDKMaU?z<9Yk%TxqOHBVj8$i>2sL}L74=jvBXIeb9) z8+hS{Z0kIRNV^#N3E6oyNvCO@dqa+&y=H+#RL7v36hGE2=qAFTIbp?#0s#?GOL3ol zS8V&jeLYbkUa2ZHb~|(5%j4dQi3T(jYIUYL;ZI0Fy6!6gqy6beN&p3kU_Ik`7uYL| zuj$}KhiT^{68
9jtasxwF9`5EZhARG&RLZqqiy{bw)Lxqmn93Dn#S3s6ne9lMP z;Nx+LioFG!lHk+T%I9i3{wz|iv>7QdFLM}DYO5};FQRq`RE+bUo~#xyZyk&i4sZap z<4ZubQFYEY=*r?p0cQLF-gedt;C1N5u6Ax$|JN(zHK<+`S7+A=V9$%u|8fgO48(sI z%)m{cm9OVz1pcy2VcMV;YKRoEs93z1Rq8p{`nL~>eH7HvU_}i9H|bpXwTzdjA&~Cc z5{>hE`M=)TnGXy_>s-s-HNDuUCPV=9#0I=|JeB^7L;AmuHLa#TLrjN%)`~Y>vEP`& z&YMU6rG}uVyey-WStgN)^Y(iD^?Y3ce0+#b*#7Fh^IGlS>a+`DA2rnN75Fn2It@yd zOI%FA_^}2-WoB*nxV1bXYgkrT8;m@xyzT`Yz@u zC~TU~T<9#=-2MXU0X|y$Mv(BRVO*ivcF0zv4CjSJ9MpQZsLHI=)Xm&aZ13@Z+z9{v zT+{d0pS?vSkpP*t@vG^oD;M*ry;lkPL(Wnqu|^$pFQ%e!?X%tV zHwj6<#^-oZUo)?7>e7P7Ye3H*@%oCN_vKk0LBo#YPhTxK^%eMk)Xlcc1#p8K0A9b# z;)kS)qkACqLK$kIid3zwiDRY9=3|m3=D=(G03ZymIJYM{drE5DoYUcVmrrwB*b$~a&Vz!80bLz zrA}bg6?fBwoW4J;38+c)?~2f$-@@1Ecq+2lqU1s6hrU2I2TwA;z?ir{&p-bt& z$MZMd<993yIInzJihdH8ISq&o>)QVcIXXy&{1J5L_Qw2|NVD!$z2_E{>6O`LZnoq) z=iX%B4TG5K1cwr&Ja{YB*noXc7y2vhGs>(hYbM-MYG_;mz@=R}#nt+Dgy%&2Z+;PT z_7>uf`bsQUsMo7y?q9-tu53CDmDp^_WEReUj&rUA5FzM>gHx1qPsX{mGmSqyWs3UO zZW185q2S^t==H*U+U&ir0fU2farHx%b7Mn!mg~>(KxBZ_g$aTpuhb9RsL->DcH7y1Pq%X5pq*9PBCXcVd*dQ%{<;qB9H2T^smpyVe; z=DEB;8wJvjzL3nX5KR?RRmz9v5vUuWmX|N`Ch%%!`g&eF(KZaSBoeK(8{(=M-T7GD zPCgQ%CWg9SS}RkD4map6L8a3 ziI?O(fqnFoxE&H2+R2z%>`9Q|UL>*2J5RmK_**%9%DK_uy!*3=foM>86snCJnkB!2 zq2;c0)cDc*mVg8`{V_QtD40-3JfqmdpuTFU(PSji*dN4k*TT(~QFTt{?WMSyxYB+CaPjtJbO5uAue-@I zPc%@yqA|vTpx?Y{${AlfeZsh8lN&Z&D&ORJix6oB(f@mZ`2R)|8)7^WB2jW(YlQgn zm%5)ox9Ad^YLX}$;mJNKPTbq|#?3i}lv5nl2~3+Onj%E5VFvf1kRS1l@!Z)5*qXKyE(QCCuwm?m@q#s z2ON?+-knysO>1JH@W?N+ynLTyAv@gqx`VO$kjS~0s~drzL+{`;P@`q&HQoo*uWB`C zl5hbxKuFx9FTOZUq<`Rmi{n_p&PixZ^!1RN8dA)qm^4V z6E)BeTtMz^D!nxrS}u?;k;D3hRFs@vfQzqgJwgyv;autJP`vb^c-vF1haeFd0D3*? zf<#%i4wwBX_KOsm3T(&Pq4W}5i>iyho6iCJ?u8*`i^RE?!Lm^&?-7is#raGe3=PXY z=R819xVUVV2ZjR*43vBIxfGVW7cvkb!|v#fOYK2rNHO2-GuRW(ehW!Kvcl-4Sgm^E z4eaEBR#|=4%13uz$oL<1#YubU^XQjl{XfpWJFKbg+xA!xg`-#~N>x#iE}}s?>QMv) zRGLT?1Vl=tcMv^TXi<@7Xo`UJPUsOOASFb4Pm~fs2oVB=kdV9;J?FmPyWjWTP2T^6 zz4uyk%`(QAbM+@j$=`Oq_-Ue!xAk>ipTm#UBYb=H#|ng+u5JHftvchpgpLNNY0`*) zcC=})MW3BF$q8+rAUa{+Oyf{=THH7zW777n3!QM*XJJ(8&Fs18vMI^dPtr|EdO7;_ z`D)H9u|D^ZUc63CZ>3VCCv~$I990te-MYKC;>Do*If;$lCl(~z*1{VVG(o?Tn|}&! zAdqulOSC}AzdiP(hc3qtU{q1H-ueteg0T5YLtS|MKxpDCS4RvStfz6#n?^hDb%RZV z|Etaa_jucJ;DbjgrMChWdm>ZWAKOs#Gk?r}vF7@P zDql(CT1LQ%ZAt5=NXH%Bx*z?Y+PWQqL(4Bn%#A7rTMD|-R|;Nw5-%C)oY7gY+33=8 zOfPUa+X;SM-aJn{g9%u2RCa{-;R@D%96*lTS(rnb;cgNxH3^IIXl>p6ld%cENf>DS zpjR50F;NZ6fL7nYr#Q88n2#TPx>)bJx=??=^8Uyf34;{DT$=|5b#A_Lui$>2>SKN> ze^47Jq^aaS1++VI`i--VkF{`VTU^=FsB2Eds@3#m4MsOzY|qC2#UcNnb^0 zzld06D+NFL48|LGy;t+xA&;=>6aj~yI+rH%xedE_!o-)tS8D?tztxY7NpD#X*mnJRn;`9^~}uYpp1+X*XccMfilorx za_mr6TZ^h|ak{0vkX!Nd%V*l5{`AXkNqgWJ+tQ=-fR4+xffcS2Z+{-Mje90mgwq-_ zU%5N}cYDg|jo31~=1*4M)?F#r=viWghmcwiU{eUm_Ynqg=Bi z^t>r zM;Kg_QlY>Tqmv#~c7^NYV|4pY5?YIotw8UC<0ozvtTxDy-(~N z@hd19XcN$Dg$gwW`$fA~51-g99+V*j&pj8Iq?qIMfY@$~?$$T#nuH>Uu24z?DCi3* zFP1m1=F37~2R_-$Sr{J|y1eE;8PS%mkKqQ{ruluW`o0OGa@l6SMqfv-`M8h&Xwu)o zl13?s>5`^dm@j|7srQk77719UT6B=TfwozcgEzz^X{qL>3v-@_XgP- zb26VlH{RsCj_)J%$ovy0=^H#jjI6`wg4{;WE-u_SSANyQ=JsxtdVDZ3{2=ttO{n0c z{ExwtzC@5Pj5baO=S8bfSC zFLHyE)<8&vx7gIvrPnqW8euJJjI9q>{J@@)@MxH;N2BBur;F&5uzdvcIGJuW#iFBX zL0!W&tD9;L9yz1l>-RvEPEFi%nEd5$m-vUvJ-ODY1^XI^OKNvV)!HhLR29E)aptc zRTPH6iKG(r!lAYEPdq=St7Xp`(HE)$CTq?0*UZ=6fq}=2kPVA*I{E3%=vS`&{qLlY zP5X0(A4K3t4^AY6Rp{h?iu3H5Z@c?x0^MmsD~eWusRon`+kG6JU-8E>ZZ({nd%S4Y zi)%b%+G@1b*W8ou*tuEVSOR=K8g@X~lB~WskUtrtS&HChv)m52R&=BhTYYLwoU^#mKB{KUL#PZKRg_$Lpz*i>qa&yC) zIR+U=B-4X0w--xim`e_)nv`qeW00@8?j<3v`^qNrU$m)ta7Wm8^yHEHNXJA~H^zj0 z{9exYcSel6kJ1|m)&(z-Idk7M3;zo^!F-_{F{Z^)pr;c3RYWr-|Rh|od zHE->oSIYO+SFHxI?np|0eOrdH=fgS){*iABG!E`bSgzIUF~+vw#lx2LyICLH(9=S? zO$qP09)N!|kmg->k>y9gE2y3~ZRR#SoWRLFoMkgAM8%i5pJ ztD29mk`J%U{2YheU)lExUT|<%ar?OLZq+L(u80TmlnQ8VwT696BQt3NIvU$JLe2xu~OBY z5r%uJQgFy^DSnS`hUem)+CZ66^YUC>BGycu(vyp%Z%CRTgcB{yT zis@G27pa)WOh(%45UB4FH%)4b_&c)n<@tPPD`n0Gk90^i-)8ST5d z_8cA7aG|i?40kVS&%-43rp+K9tKc!MUS!JqPz$<5JT+-uwmmsp#6V?u{9oqVX7thz zNtUq?nS5=qi@FIz1q$jXLESC2*F8OSKDO+^_fLBBpuIwOV0tY25qX zI>T2WBR%hr3EA|p@38@kBS{72*@Cfov4F_2@8Nxf-m_>bkjhIKD-ssF`-q@SBhbBy z5EEYU!>&WN*`dCCZc9By`l;S4FX#QqXvEZHLnqX5$THFY#kK1eV990;l{b)Lx?gHP zSk4Y_Jl$d6i4Nn{O<0%?_^B3yZ33w{mSP5ZEDQSOkqUN6N7SL5^Hb+Z}e9yDUr z&&gCXm+X1}4NfAT(4V)|E(e3G$rC4zG25NI^C3o!!qd~}I-9KI?~xjU9hfKP?}W#n zdsX#n>Xj>lUw3UdUs$McNFh%3ZidgjgI}&aO3@e1752SeIO0E0S^Lh8)Y5J|S;<{u zy)t;7{2r%+5uKQd$2>)yLrhraAgcxMiw`%*@*A8SrsaMn$8K9FOxZOAZCYUT{tRjN z|5~JfCpA5}IdVl?#qY$rHZUI)6jfYVtm^c0>HBPi1ds{eTm2^r@lhSRjx(>?-h>9B zo`~H~n;eMue;(3Pq}&A4{Mcej3*i zt#Ov)HiZ)@m&7B$nlPOWq+~ zB9CcI;DR4(#f3L+`UTmn=l~+da7doD=ooWXZRm%!SMbFzm_!L=Vd{KxJr42O-$^#;F+LYFkHWo)bzlqeKc!C1GI$0lrXNMIlaOXBx4cjy+B}2WL?m;)uVpRa+;*ZWb=w^5eL2HLi zKGX=ae*5#|1!9|#vW@VS65Uc2O0ih@dcrtO?hMZ8;60lv=No@k`TFSoI}2bAry%gk zs0mr%K@W~Od`?2y&jNeljIJX{J!cK+S${`&pO>ifk-jQptwo!TUZ%HYwpd0??Ya9;fJDUs z3ndejB6?BVzIE(ui_73ejBnY2rjZIA)5mDtKS(b^{bLQnS067HIfhBTofo19*@kSQ z%xjxoH7{6AF$`bn)n)A6k{-16FRF9|9Zt7hSD(kzH|lR9PGTMmo*bGT4Jx_Gn=2e@ zcjEh0R2)oGNKhHOtSv!nD1CPp$~&K~0TAc|%8pInL4AK0{w^mxzI~^DaN@6`3M?B_1tb$#RmU$t|)YpRArYu8~~yMQOd0n;{q?PKM1Pw_;D=NgA(wgFjv!>1uV zFUqQ?NBz({?2Ne<46S4uI94(aQI_3vr-`xbygW@RK_wGgXGbd}0f=RE#xz?w_7QF0 zNX}tRuWRZ*=e*1OkOTHZ1cYJ1fN0=K#;+zs#rAfX@-5}+&3Tvux~U+C;Lv6eQz75@ zwm-S4V6CI8%(Sr4DdQ%AiGUQGM=le|uEO^XCEOOH*fE9fcF922kGfV}wl$~~TT8;DK*jeeEnm--?opCvA8i1MVJ07W0#@<$ z2zh*$LAr_y=t=ikusTek5FCC~9#Qn8wcIMTyYrfcUBWallCS}})9!rf8KIvJ^r#sp z)HaCc?b3AoQF(ZaP!m9H%9k_X<=mBe#1=*eZG|a`gyIC8Zo=H7IFWZil4-x5s-oz*(tqtijnC2AhDcSv zdjcY32$*il{`&{3i@RT%7W1aLBh|9} zduxdugG|_|a6HFAwu2`0au1MWw^&2d&P@&jzn@#X*jjHtS6U(iFE^wn&WOkof(z%z zbpx98b7??>X&RGpT51bh@aQ`5-FKHj1$ucw82|(SsDIo{8s6m&7_ypI>@+(a0s1FO z?Cy#p4(vIRfbgfG_~-wt2+<=HJ5c>RS9&}tAybNrdC0AT);)i@>vbf>&+1QbHPy9llewkT zy!TsKVyhUF<2F(RfRu!{?SN_Fc*XkCmA0c&!ZZF9?LQ&+it+HhG0=>7$A<(%SVo!G zB$%XQ$~WhQAWO`}o%b#6m=#5HF!5j-ms+BlucxP{I;}r?nI;2G7%M4r*$o1#WYfm# zRrSx=H&pj>-_C?FDX7sjf0|N3ov(O6~d zxk16N->9X`mo<_2m;O!I>rxr2`@b2F@q7L*4y^uT?aTwaSLaglM#n7%@K=6HyG8sF z;k8r3@URdrPV|PMQQy6rWwkC1lJzX>!>=5D0koWEgnF||*P*uu7F+p&Gyd85G3H3$ zqik=4P_%TuR^Zegw4^JvqzEHu#Lk!1t^jyDkjcwz1>M zilMi!b^QGXG`pFKbq-Ple>JNQsZ?^X9BY2NvJbbwNZu&-RaQ^gKMwG^5m3$y9jbI> z|8b?Y_Cgvg8^mvi(;v@Nrgp)8HM1?a7<97vy4snqfmlvzp8%WBJ^MHK*`*Jb@htoO zUoQp#j{i6TR{3olEFb89SgRkpM~jB7UMLJ(8DSHTv0MTF`n*hGraI}=u+_d;36`;i z-;emAuU%T(^uCDYzV6@asU5l9rL-0p5?6!|n>%xz{2zj~I@@k8Hg{U)*L>~wSO+G4Zwr>Grk(xt=$D$d25VNepIxia+x=nK zYWrs?Hh;JGm!6Ph>gI7ctbkST_Rm_r*ezQ;EIYjD`rql)|KZ?gZsYj%wLonz^i$*( z>xmQ7em-(3yfAZd^dlf_E>hlqXy29CJaG@TFEN+N3g}|@ zZw#^;oBnx0yRwgyQD^{0Xaf1z|FEk6XZj^*2#(y73d9*UKa~OOek7MYr}B_FWD3kM z`e(?L=VM|Hm(ab59Cu#7C4R!F|FeYK)lm&yTAbUq%j9T#^0v_ey&YOtT@+nD4JRuF z4@T|zp2sTE^6;bA(XV%?^y$mb}t zp}oBRl!tmtq%M8oab1JcURGa4)pWGlSR zY%K718*j#oYEVD)H(U%&6ixhCzUNx%X-JcZhCP`7rOf*FTrXjI!`K>o?*)jvLu}Qq zewirL?)~>Rx7+TcoB~bA1FK>!hgd!k5qy7T_JI7ON5;6Q)_90-+Be|Z_MP|j?Rr~0 z6`lHjj=t$uXWitqMYzvyMxnPhOT@s)xnhT1eTR;gHthK~gPkFisq?V>skb~h1Vc*$ zC-y?(ep}7Xd~uN5EVqBVFi?wPxy>C&I=B7mH46JBuNi+x{qSJ^?qjB=uK%xZQ_aNw zX$YC+^#Yx&U{1;SA9IN(v@^qiDtV!8M4IIV%7pAR8@BTXY8$r-R4WrV{RP+7dmg&~ z*}FC^>sUh&zi(_2Ag-^3lH40qe>O_Ej=j4~0>$k2n`Gonj?fiAnL4!0DJs%H=eTN9 zH78S^WM*6CTsT^A>UPzAJ|;^B_*jDmo!>^Vh;DVmIjg*1_8lN{}^Z%k5z+~Qr* z8qWxl)2luiF>>Pk*z35xy9z~gL>kN!5FukYu_%8|1&y6HJ%9UtuNrAx&>@4ub~MIf zdD0b{WU*Oz4G>4WBy|p|8*QJ9-MX5(Z!8?KN-exsB7P<8kn+S@xME;O<;_ciXAXyS zC_ZfgC~!74MbW`2BsFi4)(rSJARSIwHrL%@*z}|8yXKnn9bGS$wdMP}(aPn}#=e2jB9mxR7g>`)= zl5E6y9omdO_{6miS}AcQ1 zA)zTV=2AZdEw@(N6gS7pmVbOCcdRN`KS4uoYwa6#PFfw)TVEgfT4KL3glhqW)4O0q zx1wFzIdOok3F`)I6zW4`QYxHshuUFJxOOQ+v9iU;>D9{Ht6#4L(4v5tA(~&VgCL`l z*OB~WG(0a>-t*XS!_H^=ECImQu2O=*q=bg zf5khdbRauF-vHW+wIXs$?xm*tUa+c4J8$ElOegdklx?@B{EAvNjDwrM6}J>zX;L*c zZSe~FfT7_R!^AdMd+3y(O3NJ|{k>t#IfYBsYHg9ZA_+@V5p4`pkF~}87#A>A@sCcG z18(x1ms#)Fxc8nJ(K4T#x*5tUs7@L6RC!-ul5gELb9G#S@p@=5HF3ITfi7wPEt4!G zY-yu&Gp{Do;pyDhx7MKbTM2LlW;t)%1IYuvmJWL%%9x!Y(RKdMfw<5Si+)|H=XaUE zO2o}DAF7OTP0VZ3P~?}t;aSt5ae`QViJFjg8hPnjyNPMKy5sG@&-wj{A<^y1DZ-Dn zL=P%sJ;Q6sRUQLp=w98+Obygw#T7VHDT8lK5ctz3?{aj%cSuhD+M&_jK2*tNzL%{{l~t`b^`+$8 zT{=r>i)vMTRmRkV2g;tD_)Bk{GU1(z=}T#kiE$&TD-q*|T+{1QgS%^q-dp75HI}C) z?(DG5lp3}YG4);B8#_}oUiq*qJ>*@L}ujl6%{{DQtv${252q%ibu4zZl3fP8}4ZDoVIeZa-JjXUBNln04 zN??;qRI%6vk470_f7W$PIL%;ZQlTi+`R%BKA>DUAX!+aMt)@c-jtMzhUq1C{snKIp zwj)uqxRBY#^Gp4tC|DI$t)v;R;Xfi@Jr-0EGVbcpBvTSF5%P+6UBib#yt36Hga7`` z!HD7;6Ar!Eg?23(;A~6s$3k}Pn-kNQEep_`u9O{D59(T2&Xs|FX-&TM2LHEs*uhZO z0&!Cn`RWnIzT-#@O;RWRnma7*NlWQ5%e%qK;``1ozPBIR7!_4(g=FB)Bh-&`SA{DT zJZf0FT`+rWreBKCIc20J5Z<(sv+3g_Hg7@f6`ekY8Qs78dz6xSE|CiPOjbye44$^$p*1G5SE z=5IO~0TNB+3#EtE@1e#k%E}b6A=HV*vEaLLYk!KKJ$i*c9ZcCk*8Rj-tS$9q*G6#` z>=osGxR#!>#&jqd^1ur2+&%zz3{L~k!yyVH10x;~x6kUbE(dpMUJxFV|o`h5) zvO{mjB(hw?+8Mps5WbAzR)9U1f!2t(#)M`CS_QE~eu^OzJses*HZy(ABGta%tm%nar=fZA*{iR%k5m&vk~0dUoqR zotb4MaloOQW1-o>^WTuR1wRVqSX)d~yT!xW z;lv*UO5GcjXIJmc_s5B?!4H%1W2iBTS}G;_Kp-Ekeku3_&-NsbYVXs)mP`~k<_cAz zD{w_`Ct>9uv;ax&LjsmTv#U#a*4}+-o?GtZ>9&mNDj*hXHTS0)X+t6(^fmF0^}F)CW|Yr@g>qR&ByFCMjjZrY&GdS`~lfud^*hDVI4 z8{&p7Bt;WBKN|Bb$7AYFaoZTVR6v6TqMN_9Wdv#RG%JkBUdPc=czKO*x@ARmD<97< ze$25WSy+@(@@8$MvC><`Hb}3SFQ3-(vh7IT3XKN=bs8voyIY}pS+bF8KcboQ(>u7R z*2r@UF`mFtZ^;3tD&Q1s9r8^k%&2oyd7ix%$Y?~lT{vmo1zCj0DxAZBz-mLa#ZvR>A4~v;OlLVDW zn60n|eY>+Ec81Kyh*YOS>C`Z(`9a+^6#a_lg_<|OSPnS z8^ghksKnP>fz!**lh~)CotMgdWn=|{2@u&`tG4pUi4a(Cq{&3UMi>sgYx$Y$ZBrj1B6dm+T+LTTg02^Cilc+VepDlnz)gsfbnRa z;6zZe{ps`8M*GYs0(8{LlgSO`q+JU|N9u_+?oCU)!dl;c#ygB63t$-6(pqOs`i;iL zGqtAqhtqEd;ledZ_F<{{(YX+WCd!w02nVLNNT-XXbCeh-hnbnBP@jw~p}TD3S{H3U z-!4EIUP5$SV$4%1Gbu>9Ky)V}L7`?utQqfgcpvK)lEQ7`QySQ5pe2MGbxlg$h{0JM z#5DcX<6gQ{!BypYH{rfkviiv@!r@nfK-VbowoB2p1yBO}4S(dXdv? zJm-IxzgPGL$35e#-i()$ucfzVcy%=go?7pwipFW#20FONNm->`9PG=*JQLMOjA1&* ztnRH#^YpJpkl02)+%YlYM{z?JMcX{%g2(t~yrKi{3wHUlnFif^Gi;rn*8wF}wMGDfg5gy8v(>Xz--BFR!l)r~VzZC5Y!2{f zEoW%^*VPabGomqP3llhU#k(mJj*9M^s}0ai@2`vyC7wgDaHzeo=SAZLd@p)2QykbR zE;7j~g|i|Wvo2@Srof|il)F=26pUX+ou?wgrLa+3P4(s{>igxJ7Jt+*-b(6|doOUM zFcu|o=*Oe;w7#8i*K+OEpJJhEX1AK4L1&?0N=e)hHbBkhh?;ylCjSH&71>GA6d)=y z09TJRD3yBVKt zr+OqasP(POkNW5sY8B?oD5tD938YT3b~bwf~D#I0(?c?xdpzDeAuvn0m3 zLYrieT3R?WP3?7^(%MtslUS~OUL&=+AY_^TE2h?=m-sd0kcs@mHu}e=`r^by_2tX{ z61{G4VZgqiX#S9-*jk@@=D6y;l+8+ke(P{RDLy1BbNCvyOY4lO$&g^lv zIqYB)tb`<|HTuWOX_8X{rizw57xQKlHZy$sBYF_k5_IAdHG5bZ0{ys1OJj1nQ15~9 z7iCj#L(?=%&UF19m7ZVKhsRq9{7Klf(^C5wGZu2i*9 zM6J53Q$Nj}pHfarELPSUKE@pquEHIbyYBJ`ofpofOiVDPVl(fcY4z8J-XM4wYd6$a zhmAcy7m@!`^}t3oe?wpcmE$xiLulwsTi)dI9v#{VEjrE>`h09@Qzy)7sTnW6Z}UQ7 zdGEsjd2YXqFziY12J^ZH{l~OoF31d`j4_S8&ej{V4MrpezZL;un)(;pTXq@1R}ES; ze0o{C^I$A2*DxB)vaK@F2pooFU}Weyno@UDKeeJQ!MV16>Mm0M=a>XBXbF`xYnlkx zN{XWnnj}eIwhcn<^(1e>rmZ1ZyNO})STKIzF^^`D$+h-J1zSl(0WGmw+pc64rLdrB zspaUG)Y;}yZ-q^Lo0o|Ed}$tMh5D}Ez@L;HH99t^4uXMm!fgdLAQEAxYtdoz_W8HA zhFqa(Q}mB@GgHqVhaF}njPo{J8R?$%Y&q$pxo|?DT*c~5&r#2((*rM!k-R&@=gPuc zxfPItP*}$4w)1&LWoT$=Aeb^u^E1U6?xAYYTWFXiF6T@~B=j;bA$Xp>}J9(iq zNHFu;gMVeqA>`-x*S%}@r4+hOdQ3EfmhmF2?DJ%_9RDm#Ra&d@dv);C5BQsl(=)F&K0+au&!b19+EjWfFe3wUspeT${{ZZr~%?Rx6782plf5o~YaL7o# zX*NP<@)Z*Mf~yc;LBfdz*~i%h)@dP|@lwWgK2FDNE7iBT zIy{zZ_ge0R4_UcC_>-}WSXn|+`%IT%I*gIQtd|$@xz5z&(S}`cOCI$HAGykUb}h7k z5E5;p5j>5sG@p~-1s}^lZ6i#4)EC!X@WLDn)!b4}rd!g^x^F1vCTz!%Hr@GV54p+f zZLr=FKcn!~q^$PQW3<#?0C3^FoZm{F)5?e@g)@+4pS-pv1)dU;7Hq@T(4JHcp6;?k z1P)&wTC=i!W0iR)6&6T9tE9`=fHFe+J(UrHk8Ie4E^mI>L4LlOVBRr%&0CH*| zrx4%mg9w!CjY*1a^@S{%6sefHdlc-o2}GTzeI(8z_)S2(F+wgfj-Q{*6#I)PtDYup zAogHbH}D+M2Pv})4!6wlcOxS6R&jkTWUVjx(F~dHtqEJbX8dYbLx|;kZ9f>&AWUO@ zffJ6tc!b{Ks!FgdQ#qDv=fO4C4IIio&HE;93>4ZSiGb*SlvOeHb;=ONQ_;~>#PnDb z{=x^{$_j~-zMtD(?ua>RC5{Sy-!CSL831q zvZF^X1lypgys#ut=?vZueZW78JuztEFhKKiaBvV#H8oEM#dg#Le;^8fc6lDuR*u06 zhm4#eR=vagEnMW#JWo8TTo$XqEUxUhUGU(w#UmQQ@+q8s#@RoN?xUcxYbyU#{ptR; z;61!6^u8P2P55MqFo{$*6*nRb~S->mFNqjH8mZXd{fUd!nAm5Y`{>-GNLj$ z#vhxO{4_awf`{It>_8YWk2OIB=DxMOoZ*=CoV%h44EXmc>$D1omw4MqLvl0czhs0c zm|3`JUrSyMv-0oMSOzHqde&X$;b{8MIDg&0X<&WTz6pLANG?T(PBZuZvud_@3{~o` znentUTzxVrn~I}#be9d_h1y=rmWEyDPxL4VbtX>OM`PR%$7^n!zH;~bx;?d>r%80L zYe_m06KLT}Y_p7f*yrzk%d99cB|E7@Z2n%Uis^*m0K(Y!$LEUu)W4sL@#dyZA!_J8 zQ#Hnz=pme72H8dC@c8C;ZbLQIk|&^!*aRh9m}Yt2MM_3VgW^&H@%tbQwQoc;L}e_x zpC9xDp#8gkhB|zENMG(EWleQ)%Nr!|U!}f>7Mzc;^S<#(fn{FgRCt8^0XM@g9p~@u z3FSYh!C8EE(ZE+8XR|iqQ_+~|;ck#|9yHRd%o?vv#$)KA*U}(zS_*UvC}F_9Wa7z8 zt~vIBPBd4osi|q8={IbrY#Qeqjv!ZkynN zn<~lsdd%8|fnMW%W2$~fstq)QxWaSyZ!m*VA!6`;;CXUOg?yB`ZE)7v;9wi6kcQU$ zf|k`IeN&(Gi(_~sjHEQ;_uZ^H9~on;MrhD#B`L(V!}>DpznmB0(OR!mNw+=>5P}Pv zl3E*AY%Tvl@7a6l-wz2e1GWyY6 zJn5Tn9W$Bs1fxLS{cX!KjC`HGJ+M@zQ_ai=gmHDB1$0H}^7_eTX1e-RSV`KcJJqyq zkd_Iy??*+O;U#-e4&Wzjk5GfgeHmHHD>4oh-{xMGH3zvmeIHVoKGw?{AnG}OWO=zi zv3XX^$@GmfrK>J`zIy|m4K63h(w{}kEapTrQ*FcZq))wDal&VLAULm~EyblZgw}GI zK*gF-P`DS?c_zX)&~%dbROrTY=SfgFy|g7-eu$gWSxn-*gA--g2y6XN4y>B?sn94v zE03IBr`$Gv5ZE=WJee=V;LmD{i_Txf1-TBi4OQs1<{e*qYT-Ncy)7Mke9^MdE44NJ zctL8&&<jXd|CgHY}o2qUV6XdT`7Dwx)o{ja`c?-@7 z062iJ{DH^)sq*Wzy$Ec)sY94D7W1jDlPuFBxF*xKq!y|ew6>T+b`c547y>_Wn~tFV z1pvu}*2aY?7 zKwWV-a#)25mE-tqT)#9}Pbv*mP0xVJXk@$ry1vIq)~UUNJKq4klUZ1)N~E)q%I3F* zgV0AvN$z%=l_8gFR%w)tn25Jlncm0|T542X5z?b0H-T(w1(ByFesj^J-E zB{2s|HJ88a$g+Ja5x%-Pw)7lay-~4tIwJt9K1_Qq!U$E0Ofr5J*nTqcnK%#S&Y!pL z)T+M-?SDSo!YM}2?N})VUTA&Xv7VMUMhrVDkHinsYBWmDVt;Nmi4+v8msZA%R5;H* zX;-Y8S8{3~0ffFTDaN7aaiG2%w*YRKx-(hvd3F~3gCZ`ff7 zE&L1IXU_Zq++?!ziHUHNRDCOPf=(ea^R#sIYHj3CbmE>o;JaC{rO_P1yq} zIRlW?GX!HxIn!wyP#LaRMgxdLLZs5(I%b$L)QLD^|1fONl!>4+}H3k0lm0>a* zNaH^m0`*N(Vd`_77Lho5&+zbJEiJ;MhRvUnfpUo!gx98JST7Rh%G;c}vsU?cw|>+p zm;XUT6K(_DeO>M69~S4P`msl}j>W0Zr_usog}pxkDDupuFSm^@Zq!wht%cIVNWIXY zCHLtK!No#cORVjg_|735<7k*4Ed>othyzT)@~vY9@2!k0+(J$V>Vdi5f2%*l)|1b8 zwl04rg}D&?$EkMnDYYW2U-E;9AZhspm!1JN(!1c55dTl_Yfcv23c+ZJx_T<4rF!?? z33Ob9ms1)3TLkZMBY|$MwGZkIIJ3iHY3IyiC;Z1DyY?=M%~K8J2+eC2eA9|0jW0Q( zk#Jj~wFNCu%fXltVB1zw^wbsFc`e2oDyePCVr_A-NIVo@P2V&%>lh@XZn>^C%RR|B zJ-^iuMQUUOH1OV+_e%yxH1^oA)$3KZsFzMK7VB2O)GZ6k3CSRZ3KRZ=X@Z9&LSIBG zFpM4j@1QO7rq(;8SJoFSg^U^Q37;B7T=HHK4_bUsjZAj#cFP|gdpWD=o&iEZv}59J zX(wlfO(+4a-Y^jqvo>fWw-03hE1kKiRBC|B>hPeos*jDOrO4y*hXI>@tw?3X*2ZPS z_xWGB>$CR<<%_1_^>D8FnbwBxc{UnZL}WpNQP&{3r_ETcvLTRCy%VnXR{`rx-zn}0 zdDF65*}vr86>|9DJg+V`Oe9hS$MS=$8r_KJeHhTBN6lIqE4&xh%dtI1HmP}5LAvK{=|uQ(6@DHYHVz*=gbn9b&mdciZ+1o6JR!hnwf;-nP6oPWA z(_4IN z2Tl*l@-qE8;hA0>E1+ehwARc2YX|Fl$XpnoZ9umvU;3Qg)y?LfZpPmReO94jMHwt( z-^?}DzRV3yO+gme*tccxa4nPUz3~1Iw<6Sst$>8sUis@M_I~Tb0cOS4f07U4WVyun zwGn*sM~AJx3*Doz?oXLZQupXTkbVdAfMZ1Vw)LU*%pRaT4h(ievH9RsZ6><;VR(GT zZ;+e4d9fdkM&v}HfzrCtF_FCyx0wrj-rIR2FBV0it)Z@s2iQAz9S6REiPYdSi&T*c zHpg9BUkCDeZjXF(R<8~I{btu)K=oK#t(yPihmfF*`|=>`f%~_I8QRZGjb(<%&sFzX zvgt0aF<1HABXmM-s2R&`PQMSkE0Cj)pIh%?_mTI2@yi5JWz*R^u0#8;f>}`Rz9s_$ z%4hvz%~$x@GsfK}Z@{GGglY#i*Ef@!3%Hnzpt;}r0krWTd)5?nfVnkt!&ZuFPH)+J z-?~g~0P;POhLCUHpFg!~c`Qg_Ofqq?M=8HNJONy$q3CoigWp&)RrnAtSTZM!cH<;6 zu8KWppDY;>l{aiP<$I(5 zWYLhVd*}!ov-Ez2RH+x7aPD(Aeq*4}Vb{EDhpKZz9iA@Sp$7kEUXhxxJuJW5FAMgm z)*j)6a~8ffbzQ0taaQx~pdFHofho7e&cCiTV9$5i-Qao+bkVWC5WuMev1xpHit9hg zZ6Z2I_HVY1%*Rg$|KrHW7wY1j$9##vc(L5--f% zii@OhcHXz`{5_1cmhJq*LW!*Fn850OHQ4z(QE)?8me#r70dYXq-{#0;)yJwm(J*RZwjU#pn!B91Q3m1l z(Bk@X>}Ub1Rj3e7uBZKgPfg|Z;WxX+RTt0SjUJxTzeMt&!qgSPj2=ydj^+ff zL~(wrU2X}}`P`1w@FDOC(<~z6{gMC90$_PHKc+H~OPLVaVf9?JRmpA$CgCcZNr}io zkON)1=I(#c&0`E@%8o68I>QWv%!FG2t?oA?Ki3QmYI`b1myrlPt>T19pU5}wo6p?~ zAuo<7Z7P^Q%j?TQ1s}q~EC?}UA?3Ll0YH@Hz~1w$Z-Ppf%6I>F)+dZR{m)`Ev3o_P zJgV11plTo+RVDJ&<4Ex0*%L^}h^f z{A_XkT5_WFU@T=2z0!fFyN$`Nw#qo*o;5ODZxvb~^<2hXENUn~J@dQ(N*j~h zmC6Cvzq^m+tzzzRT`34*4D2`q>peTXNfJWT50KS=-m_X6TCg04f_6aw2ORbtKNg6q zFaNpXBvc{>ZYsOC?6&;A$TOPUTVm(!y|;~Z*9#tLZThSHsq;tS3DqOAO?3;UAuwZ( zIW-3TL4)d2(c!ZXBAKH(vceJ)R)Wo~f-T1Z5Su=J_Xg zr{sqa5M<~)pYmPPw_;L$?#m!)dGvV?JI6&^fR7nco15`Wa{!tF)gi(>lI8_i+sFpo zvnkA%9j){4G3A{t9~^j|=jZ1KY@z*`(z7_1kz`c0NlCim%10vVAzxy&MTIjHUwf9K zobJ?qKTwkfN<-pmKr+6=g_PNmJ& zdY~U}>zp-BGHwF`t$zh0;q>un1=xI_^K=`Uv09WD2PIVLht2oOcsv8fenM;MYANM= zTgG#4`Pz4s2ia3h&O84CF+Rg~b+%wCLML5zLPm5qTktN^0(=}JqIe*%Fkh=wpTW!C z6IuVBLE@15a07$99=CVU+PfGdpf*nWDDK&A3;klGcF?V9GY5&<3nmJMG@ziEbd9Oh zLpt~&4#>GaL6~F zrUMH_jkh&X$wOE-*L>ROw+lOtPcUYc6k32DCEj)cED@7LQO3#-kINSdv1Kkh&oGnn zstM)c25Q_T`&mMDj75c08{otbD>-#9{(Rc=(vI{Qrgccb%Ih-Rq4>8-n&F3do_DJGV0+jF^xoO>SW_@iR)LRGKCT^I_ ztCQwqD4Ob_@$+kuGOQ6y2}FjB=gp!Pps~=2lRM$FM?KlXM?iM%Lj@muE_4zov^o&$ zYs$E|e~8T@lvxR4{l3K?fw#g49YRabB-4X5nbxP`+px97$%Gow8B+9lWC-&7N+wYU zY(7c4C-1gTzTiY1m^sdlxEdU!uY4SJ0D7#4 z&w+dP#uVxCJ)7!&2FrzTWB}J;xC7^l6JOrw!=T=iCEG>|N(9G%HUKc1FED}s zXw_beA?V`6f0^R9$#iT~}F?_^4CrW9sN{4~W_a zLTVXi$R7rJZIA&jaX2u~s8t2IrLRt%l*tYj*#p5*zd4l6I}gEt+V4hCF{L?_M?(~e zTpYzG-4^DCAih1g_<#1B{=a?A*UKEz)%K(toQ%`EfD}K;CQ!~$Wp=XG1G5iM84BSp z_ps27vRo8rd~Ric(=v)bdctp5qN$W}rEKwxe$o;ekas1OA^*kBr0;v;PNEEZ4-wu0 zo&LI5;Kgehib_b^eAr57-->m+j6hhQ}?*j$H1%CDPt-Ksp5w|A5NUqE>fAXag&_+bxes=v)p_G7 zlyk;n;m~Q;#SOo3&C3$k*ZDxTboM+~Xp-&SquloI(lvrw!U^gp#C`yF`FW2+8@jLb zp9(4^*I9$T{`|~WJ3BOo$N#DAq}zQWiq>WX(xdXk!Ye?R)1 zy_O>Epo82W8(T5!e+t81DgR-2MAUAGWIM0N5^^$gZVVb3^)PMda*iGvid2MfpG$Jz zMhFZ&hhYzdyJyZMhN=dbCG+(Ix}!Gwaw3tkn;_EugPZ?0wE#mn13|0VD&P?-wrgQSmrCQTX7tdYilj0zQMA4DUKIEcr*teyfYA17|Yga z)=m-twOS{%On?ox3cNMYw}3Wdq>kBDb|QjyW+(SAAlCgiWJf`)Jz_5p+Wk5k)H{x~ za{Me`zxe9`nZbMhf$Sz+#~S6>+u4AVE_^2z57mwzq%I=Rb`vx1@DDyZ<2ZUpm*LmTJS^ z93JhI-M_Y!Zsh`q3?%m+LR+@oQLby)$^B(57W!)m1vy9X4`^`BwYct;6) z#N9(}Np5i7+J)?b*aJ^@9%r7LE2v+0$+6|L(~?p5|lXwWeezLCS`H)iT;qnvEFv*kOtZBGNrlJR*td-XICB%_4@spMnwyFGjemw;MOk;Y#t1MLSefqM0}LpXMWRymOG zgyD~OpJIsy3mwSx>!%3|rk({q_yaj|5CV4^tA*Ao^leP}iG*Wx61DGz9L85D+zRu~WBDe@}~?n9r8Cq{u3 z1@LET8C#>iMVkKx7_NmbO<3qrRF2)C1|jWCCGo&0@46u5veG2^4e%Q8NhKbNm4ie> zo*WcThL; zMy*t-NWW^`VX&srt81OGJJpdidW*~IgzlrT>(clK2yMM1*n^y?%xV>@&uQ0=y7c!* z{AF#FGhD+~g^r=((xLaXOF&{6Nk>?BiFv`5Bb%`e$J8=d6XYO5l!q77COO~0QF^q> z(O^?!r@h9JYY~D)L=H=m!>PQ=+&tSD85@xQ78kKXsZMNM8B#cAkOqR%Xu;pg$lqSk zPI2X!&eTVSTl`mK5T|XA+;LUdj1;Y}sW#IcPRPKEIA~^8@X2@T2}tu1`^7cXDyjMn z`P<#v13BX-?drJHt`s*G9-b$N-n~c6kXJKf5*SMAm<&Xtj#%hkT?OY^E4K<6dvSM9 z0t0)1rGTxBzx@@{Yua}#3As(hb$)<}^7pHjB7b=AG!&@LzaEVBxMik(6{I@(-B3rn z1X@&RU#NKmk#)jK8>Hb-y)s~7J?x)-$5aaXFLR6@$Gme;X56AMer>HhSKj;EZO6T9 zi*KhV<~|RVK)b>2;vV@~Yv0Y9fn~c`-pP$}Jnu0v2wh_-ui5uNu*@$#FL<(SljoL%8I9ow^&f;j^m}$`b z@wj#(GJ=xg){tXMo2ChS)p@B%m#*uK;4k5PIePe*hwG8|S~w*ot9!124bu4fMMji1 zDKWK{;X)FD7hI^xtyw>H{NZzH$HQvm$0~O1hh-~_=V6}!F(Ui(O{~;;jWgdP3)jgF z%EszEc%9iuYeymuRJcauSk~pzCL_uYtRw}=7}BrlzUo1qPoW-MruQOQWZ2Z630ABi zvKX1?1idy9CY+mjuPnc4$BALs(5GDxJ?&%w2IpuX-4$h@OGg0GW7X+abd7rWcjt>?=6s~x`7I2w6RhS`L%^Q5;Mx% zUyMw-2uw+q{9y4ZnaXzSBkzVBrV9%%bL;T^96I#pKPC2X6vczxCv{32d9T}T82s)9 z=8Z@%UNlT>T3pqdHr1Q?C1F>kwf_M~6n{4qRRj%`jEy|lh!bFf)gJ{TFLl0wpCPcr z<_1m6kWyr(&zI$m*V)4EyP7c!X5y~d2957e zfJb0S{>b9&a}!Ii|0(RMwDwaVz0{KVSa5hLBeA3AHqA?rGMJZaj#-(^L=%0Nwsw z?{E??O?oNk!8WUSbQihgd>xvwchce8Nc=LkVU41juNM0Y!I!pTpC}`Tsa6VmFURyF zJyySigvx-QVbS1>rAdt>(WsH38Im{@P3JBU_AUi>0tPN(S4}46Wtn)g`)o_6B5%eC z2E>Gp&8B{bsrgn;+7H1I-4;h$<_n&4km5^8(2-ctIR_JwRcC@m8Hi&+P1{!XVa?ic zk~**AK>+qqKO2zwuzhzIBTqI#XAsH)o1znt`;_7)1C`Q0=^6HH^_75XGW28@udItk zs$17$kq3L646pydTWO?g9xta^;vQ2T&key1N|7doj{w)E`X*X#XFxFO>URlCr`Z5h zn_Gn<=@Y@N!C-VluMc+72!^U6`{QD)RSASot+i)ibNhp?n;#PP7^0EfI-%y&k|xvk zB7$4j4w`)M%Patd@N*XfnRkeFxpBx3AP#(}5zM%1+}rjLu@)hwni4bGvjDkYzte)z z>~RF47Whol8L6L)+k`|I(t~=H`~qnoCRJPN$VB+*ux{ zEZ$|mF>*~tiY2hm3w-JmalWeZ6mX?HtAtKM=lwWSmZHl&O_cavI9o6S1wq^@h*>(c zEn>>g>Xjniqa+O5uVe1S0Ik5WF%7cEUF9t(fuQD&8HTf-uWUd;o~i8#UZ2~S*;b0X zP|9JN#CDE3x^Xz}Yl}vR15nN-bE!@op}`~w*)cgR8-u8qEEOjE>{GZ)rlpN z5V{*_bC7CXnGE0B7ubqC*L?$aQ{QLKaGdK_d63}8q1yTi#~zn3VI}|qJrX+Ax&DM~ zPn}BWrFZQHq+U)>$;zv|r563IJNKWJNemlEU;fBq1PF`(M%3}-9Ch51QruGLj?r-8 zH{&!bZ6OST*`s$H7s)f8{hK$4X?*wr={8NXlW@0rm7lREOC^j5blt7XS%C}dr>%jF zJ}5tiJ649J#8d`d@yAMP(idB~wqg4eoPID;WC?~^ofjg^Hbri8EyInIbzzno&K3}_ z?)E&+=3y5bj5Zy!Y}K(@_jAAo85KFl<2+L4Z<1J(qt!fnGH#c2h=klrn&`RBYjOBG zeYDsom`k_+!@v!9Mjopre65v!%~_n9JeUYmC(F*=!{zi+HiGEHikS}e;?xT`G@vp0 zfXtfEd){#HFWlD}5^T6lWO=#o-!e#yg}@|F^g<2UQ*8jTX~^R%A-L<9whlx#)tIPD zxF|A-!B&pmfJHT&Gm*!dW+6$|=N;XB99M$Fhz53~W16t;see6=0%zz!;%L1aN3Ul# z;TIQ{d$6*#T9-N5$kDDBD9+nkEFh8;wfdOKQHXz0axg^Z-W4%Qn1>pcKT^}$N&+%G9k$GNr|hf$uE6!#oaO5n-%*^;i0+$J47!ffw* zg*a27t_;HCm!64qKj~twQcVA4EBHF&WQhE?VkJD!H?AJpAN!DSmZrp%ukrn>9kH$+^(8NrZSjuL{#36>}P&&fkfYIVPreg;|6RCs& zOIiHZ3Yn^5$it$J!c)j&O>-KslRQTw_$k#VM7d$D)ClGRhl!vdf7qx*Xj;-^>kHcIUAeV#=5W`7*ehJWs5*g`N3p z6teY#Suhw=_H06r!AO+wuQhoKY?0es0x_j_^fm?Dea?k)lbXT>m$8fe8&hfr`6JgP zPgklGIz7I=AD8J$u^{5@L#0is@y*saOSxsV*iw2ujz>0B8xq+rq6o;xzd#??=9w}E zyh5PT?f=lnMx(YMvE@biDoBYXoGdZDcwvh_x_@TB#8UDer$yt8Y#I~NYe{6DZe8L* z92^*G1I8X-lTX3z^H&IcY6?sbAw~A*e=X&4$2{Dc(xf$zEzR7rVGoW8tvokCgYcRI zba9FqKmk_!H0#F*6rzImM+g*9%e$xcBe{%C;G*mK*3{$NBs+pQ`GLg<46r3ixEzYp z3b4cp)a-Kd3TBPP37iixL2P|?G?K)y|5xDZW}*9g%ZSh@oIz?H$O*hUHui4{p z_n89#l3V1Gi=;GngYc+$&7Oo^j7Z?!5>pF1)*#npq}YLSo%Jb2_MOffBdL~X^>~?y zn)S~}F_vU1uijF_wF5yqWuWaO>1b;v?k<-rNivn5A+tu|PB)WKO^4hvq-xp@W-4b} z8sf~XH0h!|AoD7I-Uo{UB8j^7E8|A$n4^;W&;O@B~=?k;MoM*iCTgH+l-NMTRmke~8K_Ya3LZ$`?Q;MI2MQo6F$firlIs4k_DeBaCpuPE#DH z=n2EUlR>7`O+BE&gZQ_HE-FVJ4wR4g%h7u#KJ$~xU`bDNe96!s zC6$2KRa@v|N&M7+t?_lNWQxTFJ#Vlh9|bYsw)o!c z+=A4tk#HGd*!I0T$S;KUjQ9y_J-*~9(G#QmeuEwot$!MD;c|q43Yj|t%|7hqjfCV> z!c%dyWIDVZJ7J9?X&~v*!6HC1Fc^d);xUI@;%<&qUa}7rWIGLT+yaT?jEcehWQAfY zPWdAQJ0z@YW3IF>&eAbx9_)|W*sJ{E66k?I@(FUm2!R$TVjHs)D>cPLb3V~{x#}&> zagZj(6IGd!amlO8`d%F68X-6$=@==lsw=>e7>ZqSpv~coUaU5M zAZpLVl>;04fAz&Z9RO1yDVA;=W%frm2khx6V3~ugpP*3HFe&#l&+os8=m~v zzi_?4zjkSE@y0F)%z3wl@^~B_K=Zo@GM?SgfGnsI7!mL&o>{P?BOJRZL$nPHUUgIv z)-ex`w$f&Jj*nOp?mdI;ynj{V zchE>=3TM=27mB@^vjqCgrMv9I%dEuW(826`=XZMu-%_|l}QQ0s3tR`?cqK)sHV5{d3cJ9OLa3^4=S zJ;UWCj@6{K?*hOLI|J^MoNgQz{eG9!_1HWOz;&odlPZ8JGC%!%Eb>CFRlwB~*Lb!( zFfu`og_9VAC~}nIl2q0MFo4UG{9Y1HS~&!K#4YI6yb_PrMTn& z9as>yy%hI0DeTO64aAq}g(GnlUJTLLfjs+W{&2X47s~HjZXy(466C+jVu3_GsNwm_s{Zoj3Ald#T4F%b(>n z8`RV;D89EGsa%u?L{ZS@bS0#xcN}QV<9>f44>WCRJB1#1Bc1rzB4@`$<;ka$nwrjuY3V z3dn=KL)}j1nU|K_Yzkd;1ZheRk65Y1vsB`((0YipUrZWQPDG?)Yp~}L2V@-Z`NgjV zHJ<76NMImWk&~2ovOql2FCdXDO{YCU7^=$7j5<$6uIfpeLQf4|k0eM1B>0&@#Ccrf zku=GZ6vyf?PRQ@`FRHIzei6w8MKH9am3Ym@{~(+28C-!fdtAj7I#q2t(juiWMJ8F@ zlY~yM(vbcWmn1Qsq(x`Za(hhZG(8RXsHYUy70xDFZZ4AMNU?)_q8)PqQfzLCu^@;L zb!t%cx90!rZ#Wrv<(QO|!lY$O7TW#4f%;f5h+{pLBY`0eX5Ks1*cu_Bb2b5;TO~TW z`GM|tou_ZEj*%rW%nCM^r&CvK!Lr~vgb>Q`E=#Lm4KPvG?~{fdim;owqM2EF2UejT ze1BRPWWyp>rAyOBlj#t#O}5v-{L!m3Xz` zZ0Q#t8}G;s-QaKoWXMT1ADjboJzc?B>iFZ&yY-{gT+KvBeotOAbSg;56Fl+1@moP< zL*J>$mn!U$?=s$QeQ~qx&)SOZ&WPGmI;nSZ|#wQsGNlVx)MwfTh+ws&^F=r`FP zj`<)^O}o2O?LB`F*?Hr^aiQkt!!n-FBdcCLTaOx@%(SGLrY3Ay_p;8k?cJBCgxA$l zcKhBhcV~nI@6zh+a<8c`i8da3RaBZb?R?KAw=qkqp3WN;5TQSXUSa0yYG<^y`%R9< z4q^9Oqt%xY59u-|@@u<%-SNntoN_+8EIWPMfmaeXbg>XD zBnOU`k2U+{{q>!N&Y$Ljqh`}Kjvc#~Y&m1uOPdQGB8sPM4(54FK3CvA`su5f(^Q|7 z-1?&cxQ^EB*!^;r`WHUz()G1(7Jm8)HNI*<;D-&M!AUTk2wau)3}-spMX<@)s&PVAXI-vjZlzgje& zdrSM)GL1=kX7_rujD-n`+Scn%yn82~ZDkUfJ@n&;HOG>oEuv3rSQ^~Bx`8*D2t)|y z!*uCLqj%~D%T_0ObC2rOI-+XK_y+_NXFW?hw<=mS|3kj_C{?&mRk+XK$Uj?F=j5Qm zD%LRWKdYYEXoTgnhVB03IBuFybFZ4L*^2{fed0Ofxu~rwQ0!tfWqC5@blR%0T`=}m z)0!|+fhTGdG)>!ej%2!CcyRF|igFT0F^+U? z2FhnHxNoJpqza*O!WhA<>jf6MU01rQWy-I8gl`R;lNCILZ`#4h2-_I^hOY9c<>eU_ z{R@~rCfY)lc}c^mL&KwYj#rwIyZ+Np^kgEunVWDzK~t~&^_;56stO1Du7;1Vr#ms_ zhkgnXg6kx2*&Wf=)EOhgoP_hQeTEO_B?&btUBk_GFP&YsNKZG^tHVx3{~YFfQzme8 z7G1MQl~a{9r@tT97+ckKvxy-zYJXk(q4=}Q(|5kRqx)QVsptJ)40E}-FK)_^MVIAE z>M6VXGbNXke(KOY)rzpa{Pp3>Zos|$^5$LzXIoU+%fIcqiV_d?w#`-;-x9w01Hz;A z)8sb!=hlv&)`q*I~qVbG4wp5*X=XWj9tk>y-NA5pb#?#&mM9DeD`sk*Q0eJCmg zGw6dKE7-EW{vrL-eZ7#H$JZZ^YHTUEWggb`?rhJ8T=V?o@K0Gi8V0ZK@9~^KHjsk< NmRVRY$y~&W{69feY9Igr literal 0 HcmV?d00001 diff --git a/docs/pytorch/assets/img2.png b/docs/pytorch/assets/img2.png new file mode 100644 index 0000000000000000000000000000000000000000..6e86649dd8bdc443ec2c44251a667a8b8aff4f67 GIT binary patch literal 164383 zcmeFZbyyt1vNsF_hu}_dHaJU=;O_43?(Xg^1Sd#v2?Tct7Th5~65JCcxVwLoBR4tn z+;jf@p7-vv%l6JpP4{$FSN%$wC?y5S=g36JP*70MrKQAFprGJ)p`c(-5#fO&hI>uh zP*A8s)}o?H(xRf|N-mBT)^_GlP*PDTnh08|!+1HmNeMz$`LamuNR%8w$rw7@B*JPU zSmenB&xHF7@No6oN`jv2<0|j7^O{&H+s)yN<0QM+8fmUjL%S_&c5tq8dOqGwq(5bG zd;VPEg(}HfixlH7p@(Ye@pG0-AoKTAVa=X;hA2c#3&Z=p>&MTicgo7fP^mvIk2aT? zf}|GrJX9(JFP~-$2G+Nnp@j;_71lSMT`2@mp$vv`qt2koWxOY?Zz$;^aIGSZgD}9U zYpC2+$!qM~E8zO5Xk+0en4l0W2-P^0&<*{@4%`xIN&XUgZD;}w227p^WR9296B&I? zEwVB%7SW_sfa(AtJEqh6L#&kx7a^6F0KT^JG5$uB3)ke+gxmd5QYIDQr7?85$_Fkk z2_DOLO^frYB1KwAYXZhPV$^-R7=akaW|tW6ljWqHJ}r?>Z~d$_8V|zjL*t7Jr0u)K zKtOh-?1{K;a1$mcHfa|5A|foni)KO+iGOwmV*;`J7+KthdLy#;t)KBHMqn^i>P^b6 z5AATACE8Bs_(nbC{MBZ`=Vq};I*d$QL@Y{jjfgn`Fz;)(Aqzd_mkcIz5{yxa1CH>{ zsD=a^G_!-q-}-}JiC%(QynE03Kbj%WVTxWG1z70S@Cj<6q)|najl;E#;Ipnn+^@aY zdn2}b*6$F8w&E_1!%u2OdC0_W0$!cga)~EnbC|_v9=nDVenRRS_lBz#H9ZOQYbM{k z^=hRRq*oxNF-Y!5ib@&`t%foVg1d(%kus8hrYeMo(Qq++)y?azNd6t_)ew|lGsEWR z<47^>cnkwmVa#i6N`WC_!Y!J;%}|)3Koa9kP2L3NJ*I@%A$UcKakzYPgcVt{_hD2k zSY6mdh>t(ApE2MMWIiizcv?N{-S><@O0S`iVBJrwVzrY-CBMZEjS3dC)kD7;;6GvU zx(J`fwETY9dv@)h=p|!C!}oHD+70Va4(~Jm6OK98cGzvOXp7X{EBt1-DYxziCvXW` ziZhcQ`rME4FX-4;!8!WmqeO3XGjMMe7XmZ8(G^XH;`W6kh8N5X>!v5DQtXrUCDaOA z9$yKbYE4R4Z4*PV>d2d#f{@Jy}dE5P(@@RAmUuN}d5QjLsKI72P=VG=W_`!;nC@q)o zcjD0E#-XG-!KbscnO5_j&%W9`j5A$7gQ|WT8X79u=}JKZ&Gj9=DRA9${0+1q$r~s= zs|}exI2W`u1H@ydd>GFRW)|2FUC85un9VSX-%((M5F=ro$&utjzFPLUGs91I$+4j0 zgsRlQRKRT-vCYAI1h;=DFF_dWvUWn#gV*eqcS5hqaX5xK5B2=c7D7%P_T~$Qejnm1 zNjpr*uwXALXek1$P-==&DR@dT>LEEZXq7h=A|r{FQXDQ&)?x}&_=z4HXs3uf;s#W? zN~r2097@loU`Rq)m0l9P8OX=}it0wM6UHo-mcRFv-VI#`k|P0r&-qp19F8XlS`Krv zC*~DzOmJ$Z!iPZ^h!?|=eII?f> zL2*gq>5|%trA1jqOU0j*)zsLP-85q~aA?QX^{SfRc(zfTTl;Wuw+OTruw#Mkl^NadRJ>Z0JdgCU2)|Hv zafgzB<}Kj`Gm!*aQLUBU0?dNPg0@Z*ymGHfXk}uNa#^kBd-?Z6c0zec8T1}4uC+XVcX4}gd*aLLht)DcBO@ye zGji5BBd3A938m35vn-=du|JS{4X82_n-aUMV!uki-*S`Zd7C3@sOXq~MZ8ChKr9>6 zMASqW__~%oiZk7@Zek;tu0Pp0!=ZJcVZLGY^AO{b-3RU1d41P9?q0r0zw8_Dvn#w* zyePbA#$6^KMt8>fRPj`f)T_!84FL_+%CZUvE$m9dnfV#w%Ibx@*UYceU*o>kv{TmA z)D1q6SiJli_Vw_6`1^7R&pEpJR|m-ZHv6RWw)2qriKgnNK<)uMxzl$krsMH9KfkXf ztk*36Skbzur7+!!wE4(8GGE=fR#gqwCLsi3^BeYdZIa>=nSM;-@+|b;) zqsUd?R0LbRy=1|kWE+NSt9W^&7-UMM$1Go{S*U7W)(8%PtR%ja8i_br9gloR`v{#m zT4iwsX%7F4Y>lLj9Q&LJ-T#FJ_DeKdWXWF99!p3!U!`J7m?GrjbJ2b>=}h^1!rkGg%*ylE+#+vqTims+?3-H&^BkaJk3`V zu`$^_oC0S_Dd1X?$7XA!&rFy*&pI0(`+3`Xwflh|=^yLes?XJT_z4XN%h`O`{8>j? z?_e2)TfVDnIL-X5xSCI#=dDL@#d5veq~G}P^EL8x_w~tk(N{;d zW5OlEF}7IC@yr`5JlC0-`Jw%W`7u|yEyk=#eom{yD(__P`LlrBBbLDF`z91De0C;w zEiO;vS+H>D6b=J^bTluXH}k?L0z*cvrH_5q!x5lW881+p42jIBOefE!)rgf3C2$CADXq*DxEIUn|HCFVrO69IFv2h)wH;s6*Gk}H8sK8SC-e6 z&@W0XY1AC-T!tNN1)AP#w2L}Q?!;B|hV$}z_V3%2dX!hKRCfA$Ki8ou-C2HHx3z2V z)c(BDqfz(D`hZ~>{ZjT)P%y#g`%L$j&@l)B#0EW+tcp*BS49OfC11T)JyCIFCuL*h zh-z);aB9cnGWEDUZaOeXA7@Ky7!U$5KdSq!I1PeAl)^>!IM?ztU(3x(`iZ4OgIH5T z_H%yUA9~+g*tXKQ6b+7Bo%Ra``v+uWWG~Y6!LyC64Wu{An^#o!RrchTmR)JK#>?B_JOx)XeRG{UH-dLV z>max2<&QOwriR6~TPKo_@j7IK0v)sc2R;z85!P zTgzZEu)r1WQ|YDUrfhi{-bZe($M5@kcF~%g|f3G zc8|5?J*txNDEScVkRCHqGvOcQ6LYx(Bk#8Gq|~#!Ck{HH-gWs!;;@l40WQTGb1i8L zd3h*$pp6Ix3rz$C2ehDpj{r3B|Fk8bX`!C|-(>&3pFuHtZ>I4oX;6R9YJNRyB1oH+OKga&+_gc0U9hKys4O zc7=k%1O56yORG>H1NWb^R?~9Rl9%H#b+l(THghyFXZEsp`gI>DJ}(}iX>aakOzvfG z=itiY#ZUQ{D|mqRuft$U^1oc-X3I~hC9gy->gZxl{+gMEnT1jSnVg)Q&&AAwM@3BH zZ{)x)eo8AhHzyu2*wfRK*^`ag(Zv${ikq7o%)$z0Wn}`cU~=_#a5MH|a&V>k>rMWC zA2D-RQx|I|H)}@+@?ZBgHgR-!SC;Bf(fteOS<^%r^*94F!pKlugjC^4& zrl+6;q5bPO(1tD!`{IsI3I!zuB`qeb<^{c9z)$BrwG zv)`Uq07FlXS16@g`mLN+1-4M;txdNGHinWAh8Uw@0lBzP2K!FFs0iZCR`w{PZ{}W_ z$YjR;y|L9Sm7nLsxQP34)dc&k z=`mrZu?P{_oz8vh!x1s4h_;^Wf1?5JC}sGIOemijsrs@3ZIbiNx&QlXi(b9|Kt>+? z1+mcafuhn@wxI;&pbR}<gRFxDg=j?#gl_Hz>d zOoBp6_+Q^)66F4S=+^qW1>*Wvn*NB5->zb@U|2LL{?~~C9`acB7a`8>y#aYJzSJIz z{{*}LDs9I+OB$?H(Es`-XNTF*4V2njz+&1JY19W}S2d#lGPA)(f18n31Cx)A zDchZ|hx@Y^sV!CwqM~^|s8CUhcEBXHm`M+-bR?OzKfWAR$xoUnXJ|1R$eE+ZkAI}Y zSG;FRQ#eQ*{W?f(;EtExm}fSkFH^VCslbZgEX}&vwz75IGkPBIexIvF{gJ1`z$0hr z<=ymk++dL5r;Q06DVa?=CUaNyB;r%e#8dAcLPI~VUQ7Ew1{!PzI{qq2HoDdy?M_C> zW|8E)J|I=4*OJtz=(|p%kd^S-enDDq36y<5rK&j0P|e?7lZM zT&3S;zS`mMvO7gLoWW84>3R)C#)%1}x%dg|c@EOBgwftiWt~Ysa{0qZ3d88b)v9AQ zo5&yHcWg-cT|ZDNUHe1TV}jlS)|_}D)Its8Zb6en>RGwwNP2EyGM_eUIXG;dTJ*zb zN7)-^?60&h^M`8Wq zSgf8P?eO2CsuB(bTaSXz47+q?-?aR4x?B^b`BPI!b;p9-c&?zMIj8-C8foDDt3AFJ zD&Vr@ok_v^)|Q_cwr1Z7qh`EyqAIk{GVPX{l|7DDn0&7eRYmk;>J&bM%}0_op3)K# zNWFxR0WvEdjFwin9h)M21`W9gbTW(VVzZ6v&c`H7Dv6PFHp`_}Cc}qrTx46aSbk?J(V85PT%~jMFGXrEj}jg>fzFG?pZ6jCIUK% zVu@19lU><`aJH{l?yrkCDvo2!IxnqNS)$^$4`i+#CSbHb*;ZwZ5;DzK4^t!(QckCR zhV_8ThsVtyzaXc=5xqcJI4hozzO0`wv}m#1_^Q%a#g3=M=o zv}EymGJj9xyW6jqHu1h#@eMV9xV_Nme1?FQU|Zcj!-G!48u&PSder%(BR^H855x$- z4%t+7rh{lZ%Wd%~9($EdGP70s6=&uAFkg|^#_G&R{m?ih<6eBL(${4&nMW~@7tx=s zT23Bno)zFD@kDmV071~na!OucJ?@EQ7vH%Zf4?ep+xbT7{Z0%qQ}F&clzV!x)qPI} z7{}7L9`4|~h3ZZNBM#`VO$ylyGu|@dNxCH{8~&i8pCbW(D%7P5Nb*+VFAmwrDtKsC zDtX#GGIGD#bEV7gSdyeg;hDvv9hUw2#pEqf%S^_3j9SLG9gGH4Twmo&vN^E#ZabrQ zPU+Ezl$SjHyRDBC$*P$jWYbx_r>|L?2OtAr(QBDzBk~Z664w~na(9frDg1l>r>q3M z6k?h2H&M$Y&Gg}?KARHhoo6WN^#^Rjx~&OjBT3Rx?__AzdP8^>urVgrd$uo*r_!-R z!KH?t?KocGmoz~4D`{)=Oye{hW4AG^6gun-M0HBDysO(se4sSIaV zfkCzvJ6A!Yvx|vtp&#@i_9y{kYf6x5=7-v->8`r=WXoC=t4`vZHpV-&{gnGyoo4MB z5ZvdpHoT$I=h#J`8*dt-N4+9l)9mq)c)pq?S`46qP(uo^dJ~E(o-QA;K`rGCaw4NV zv$1t@+TZ+dXCX<|a>kp%Y;RX=MTAvMl^@pgv}_EjsQERlqInc!nN(si?;>8T`YRcf z-e*kFl9ZTd#|H041pn5KQeAA&#ZXX9QQGwuCwf{%0*}}0bh`!L_JsV*?q|tLz}j_U zALQ?bV!Ik=Q{OH+lrwX?{>0JZGOn(AN3f|PK#{EGYmH9A)0bty5i&EL&0C~dy)x4b zXhg>M60r^A2Z`OG2&1j;niVX%O#!0OpDtN7qoJ(HR8{)2ujm%S?nY162d#s@sgjjz zRI2z2#wjL?=QCp_1py1}hyhyIh~l?xZMH#dK8H*ZE9~x~OXo9rn-*_UJElt~lP2$t zzOxS;{)+UzFtV;7dMIlc)K3Salcx%!>K6j_^V_?aZ?He4;Zll?t?|=szA_sm^`88}cKK+lcZb93-Uk})E> z%VI`H>JC?_(m{$Bmr2}#D*M{_6pIC~hx-I2hl)hfrjP4NPV`c}%;k&kB=r*CeCfBmJ53|((IB#0PUJM6(rZ-E&exgOkIQdt z48L7qqST|x;c`~dZBL`p_R(uhFAOe=r9TqQ}_UG#* zue2MiFoku!&wi$?55|u^KHOCbpy1L|i$~)%j<<`$qY{){6|vpSan^`JE_Wf$HjcYt zP8&m*n`m{#nYyZNuxh?Z^i)3~;OUw;U-Fe9Rd#Z(s!gHJ zY#1vUD(d0^KsuX4iSIM8X9~rl))wSTvWwDl9|%6XTUX=TEP3Z(+v)amC{KVHfc?YY)RXjq8AUSVFUg_Xnz{K&w& za*M(Oo%8UFE>1MrFMc>IZf_;6Fm@LF!4D#cZZ*2)oaOFw4m@JGbT&Cs{N#AB8YpDA z=jGV<;^a|1PYBvqc|iIPSRL{b3YvmL6p-&9HnJT_K{vDioqxNw4T_90_kr%e3!tH^OG)td2HU;Ny)YA^+{O zJwDR`xzoox(T1+g1_kfCgASP5epo~a3=07I*d;B+0b-h{X=qQ6E^r-`a+npJVjPW1 zeA$5qM^29Rf@^3bIB2TP6TJ7y_bTUfLa?p!Ea2gG_>=e924GTZzRAHdBLY~8J5kCV zz{rGrUL{!Uo*4zVk+q%2J!iKFc4UF~Q7Kn*rtv%6+DHbJ2aBFxMDm9n=Ie_1?vF)( zPVtB|Yc)A!y8{X`UfXxt8*3MUdyHE6Z9#8F=Ibr-E6`!#eXUkLh3n2oVGH84mCGdC zPw#R-BVYrjh5^Iz=jWRd`K4Dkm+K^+OB9YB^TJ4;;PPtpYw?1sW8kqHht{nPdLiht zYR)U4ZhTN{ZOUORgNz{l59GT%qO`L`*3a~Q*l;L#v$s4w-eK3WZt{_|>o2ZcYVi48 zM>aUFuBfW6$(Nv{A|1-_zujh7a-Y?=d7;_SDV&$hZtX$|z1k4WkIU=fr%JC`WtUX3 zZhhfTI@Rut7iM3BaTpo`KQw<8*D zdbQR9^>8S**EZ2v*V?hwZVKwUQQ%L~t6?m)Lz{5-$Ly47^% zcL4d>bU6Jeg?~{x5K+D16dVQKvb-cob7*>=aO!;@t9a4-r>p)wE$EEGEl-4m$U~rH zFn*4_ubT(n1U8IMD<}Q92L(LDHZk?J_%WsqU9x<#xIJD^D3 z!@6)5Y_Oix&<@Y}GA5i5vN$3XPUJZI`mhBsx;D|QcU+cZ8I@r}+?Ui0htee|;ZbLg zO*}Q?`ywQlMf)r5kj(I+$`aiEtc8#EOrER$O)2mJ7&DxXD|SWPR2WzLHwyX8Q;+Sn zO;?U9nhBRbAlQw2+P)pdTuaj(50|>ohR<>qjEH{wB87%9GJ;am39Yx!3R5xenMoCIrhT*SIMWk)eG@jghbk`!;-^Lo~gX$DTw^`GiFa3rcgRVI4MVgNIuJnNmvnQ7(k<-AEj;FNL$Qg@F=E`6 zGp%9f4Sf8>-56KX6LM_QvoWk{1^@v-U74VB7x!ljqpFphQTGU4mm0KZs8#Byx0Qtv z%NusnP0f$`WK!wL3*2nH)%fZZ){4uZG19aez~F?A$UuY5YBp37wqkPIU^AV$S(Ijg z6=|Br+)v*W3fWxiCs0+3iinz>=B_sB2fT-ZaqG>f0rGdrjM^dqq_5J)$w2#FhOtI{ zIVOsD(JH2|^%{-T&pckJuv-U1BQzNH&R^$)oVISM6?gwL7iq(7N+dHCggE=ma>6YtFg-2FJ zu2+=!%bO%#*!|nn?PKU9*C*29EdFAwv?)?Y8?%865PYdSC{q4Sq{nm#@-{0yIu_3U zU;|Fk={lAA3*2ONt%}pmz*aFjx~bAM*#;3)im(#HJL$*B$|O2Tjf*om&1c(+{0z#^ zl(Mz&K9x2T6-%?~w+vK|yS?6E!5t%@z@IQkc(Ow@hYhhBZwCdMSwR$|)Q3l*Mnz2?YKm zyO^P_8KUn2YdK_(Ub@`oX-ff@n=ZdSS={7z+)dCeU{HmT4;vp~liPgyA=0B3(YOz` zZEqBaanVB?RSkbLkv~{ytZ;2!ygQo^tUSYG${2mlxnz{hE8YbtLMZ@FR``j9G?s*3 ze`#@*WC}A@tOx8S9cM)O@NoYG8=Y_+Q2kyr{JoA;#isBiJ($%3psL4e?-v0;rYm;hT?8CMr1MB71 zM(z}+FrG=o%cH%;rfTkKN~#JTemKPPOMdh=&!Z2sy#_Pf;5H@5kwc|!Q%Xt6x<(4H zTBX~4+%LFKC1VKaY-cKJPXT1$Y`xGBC4j!w8fH$Xm%7mVC13QM)bEnr(fQ6F*|=6F zg*b&_=BAbF(mixf5xb_Cs1F2-s6E%|)E9~Qa&hd!yjkuG?P0%g=t}(pBz&vw=n{bH zuxAjK=%5E$-^=|{F{QM;eZ3~B%@p(9so{kY?9J?1oesa7#%Yo;f&H_{hxHn=0)Ajk zINeIN1%Ik$)cvSuF`i}p{8^PZ#p_HKziY&%y-^2DyOCq5Kf-z#h|siXrGzS)fl7w@ zNldidbn~KJBFn)F)zXQm>0+$qPt%1=wP>P!Lh2cW;CI_xBXs?AB2jL%L2l(0qlF4< z>&zoRqMcRzuCHstTB&3+WTSfUL6oX5?Q1{WsZL0I2`s#Yfc09B!fJQ13tJ*067%1D zEOdQtFlg{<3dYfgxiUbdk;Qehnd-=NuOuTW~qBClBiu@?kq9 zeB7zvWkG*`7%Hx0I;u@B&pt)siB%!0@0ZnUwRO}#Aw~ew92`_xbQ0P83W+GvEQIa}lqG4k$t4;4gr zr+K8=?3Z3x2JmPv^7KI>QhQUS^>p-endns~F|@&`hx2CIT`Suz$e1eMUP4m*mbdtcd-NLo7o@Tjwi z!827e;27a6j_XH+W50`O3k?+j-8I-sQ-}HLkH32*+7hi_&{@uZIz)9k!tlH@;9h+H ziY~0x<6wan`cS@{i18@~L%?HSOIEDo=4WOji)C)pasjj6O9G)hk!%hvFy>ZA!q0$^ zwlA}WfxQ|GA}GPQ^m>?&wdf{5+-m7k6i|8};rS%mFC38(PCHCSe0YI43K^^0Hm>qT zs;)z#!*Z(+!?@cY3JyxBMIT!l_22JJFH|H{H2e zc_eoqFQgHCG0_-Rbf$dXeSQ4ua8Y%Y2gr%-B!`;j#npaHQo%%XKsMnnVkUDH)6EE; zGF#l#-Q0OqK<7!O#u%+-bd{WAql0= z(o!&}j3_)7?VDN-(kw>4Z`<(#++fykm1?wUT4+!nL4Y(l9%m$qD3cH7G`ydd31pJl8Oz=krwA>+7V(;kTE16s$s zMnqpuF2;im*;cPPIDAhIaaQo*{4Ft7z2S#?e?=Gu`UN+&F}{^m6`2C|A6Nu?%Nvt-GXD6hFZO`cR6&g!XzZiu_P!t66tC1*!WxVPUvM6K`HcD@GN@NMh zMvmsa_1=WjO>x|OjN1U~s!fpTZtn<}9H!a{IV8%bb6)CCLWlR@7KU|jd*q<|2xX95VcjHBH0f}`Mi``_V%q5 zE)49gwM$%2Kfe4)6Vi@Qi(uS5Ta@eIXR&ysN;Sn~_}DI&v4N#oySfM=Q&1{6P~5%F zcNi@^Sb2!o%m>uN_7Gj6x2wPR$(p!(dtm-mb;Hq*pZ04KCA!T{`B<`o^>2g=kEV7; z+^t!&T%CUbBgX~w4SzUuXRPppyyuvEglQoD_rBYcJ z4QxecBlmq8nxq=EtAgd%u!Ds>CEE^rU7$=nlVp|6AEYs)!x5=C8~KT)%VO}Pq86{H z&z&~?#2j@d6UtoqMSS=oBcT{h4BvJ(Jm1D5Cb@ZzVAzRTB!Zv)LWYXe^itc0u}kbv z4JZy^A*e`iE>gxU02b@O1h+ekWpETZuS;A~lXwc8e?)Pog;L(QJJK9N7rZypeh#t` z19gd@QW90P%Q=0_{w9(=DTB23q1b;Gh)*+)rhz$oQvoDWo*0JM_x5U)tV(|{lkbky zbw{}*)K{a~x)A}^TCJa)8tm{@2u>2o&hMrA?Xkm&ZQ>l_`u$J|K?pD& z&`|R~e>6i14Bjf_@RjNrdbqgmSRk+0F9ks#?Y1w<5yg!|RuuBb4e*#rjp^XKe{AIj zCSubR?9ZRBMT2DkpT3gaa*>W{s{6V5*0_Gf36O@{%!TqfFOpYk?Yg}2=9yX@xe4O3ZV9L;MXY}{cyTSNo>{nEV=u~S?A7v>;} z7-CKhAjD~@(0HK?x>IRCHGVyxH?pmJ^1kVdjg zzp!nMJfVO_GVchnBcz}0LoM+Xed=1=JiS1uxzSeotpomU&(3NC{?Oe>0)^=Ep`z9k z|E1RRc17wwDH;Z3_Xy_r-w+LhF}UxINDWh%rhxke1h$ms6P;ISjn3P&D7hE3M8`taa0U zNM*$o*AV#vLsnM8x{DC$a;lX)#jK}jqrd0q_fbO5l<&{l3u;Oue5eF#(l{FWZ7Sob zlCOZf+NlM~3K33cHE+~{Ku%tAk@+l&lSXQ$G3&F6!#NP@3#MWRU$c4abAjd_>^6Dz zWqB4IW-8LBN={1DNih)xFaqSd49weo4KcA=J&$%Q9!$m1RY@fstH7@YfIzwSU06P2 zUkZ?Eu}MU}bCJ*C6AyT}&@|Jgz{nq`6B9-x;rA&tdJ?d7I-EE6B;2;6!I@3NuzY&B zGA!3^E5gGv)@S#-c48n)oWj}7s^W9q8n1+%!c1b)ecklxaH&PLa#{SspUD7-pqsS# zJ_n!&&D<6G?BFj~PJhxN+n2j`wHQDw>jSa7D@5-~G)~s~r&kh~H5)p@TAkAKOpC9R zX%tDB3e$b@%zut$f(-+1Km|Np)$Ur8K;BKHw#8*N^NMnV!}{Bl+O*^rf4l!3T6=Cm zdxw4rN^#(1ZRZuP;?+Z;G%}1#zP0Q=#gh zDIuzqAObS7(s&GtiT(H_ho+0PUqQu*bBxa=+Nr;TNZ-hy+n|ExXG@fO-*1}^8KHj! z5;qyD+Cb15m6WfK2dq*UiFaqD438i3lHKO~4IdD>`N^Jm5u;xs3RHh6wfkHY8nm~z z9s+eE7mtd9kK1*WA_evKi1-*Ra9$YRpIQu6s|>rTiu7*;^Mg$W~#yn#{Y zJa(}o7%*hX`J(!>o&c!P-|pr>O3P@*dxms*H^n^l!^djzqkjQElA+HIs8qi#)nOq` z2?FzjMcBW%pZFX3!ixKB_N`AJM+=bSNq>DnWwxW#V5QvXXh7C}wOXoLyb+2%Fa)d= zxsL*X;iLfq`A+o$V+j=Rr^+splfj0J7r zVt>EHzv4J0{~S;Dl%L6~11L9~e^74FX=|8vm4yts*LQl~8V@>)+^D9Y`%i{VL3rAL)^^@__?FY;fIeSSZUpRk?jkaEwY zvzPIshwotvA?a+%W;m4rf>r-TZ~)?fEhc_mJzyG(n~vnl^3sz4UjmispvQVMm2Hqe zsn~{>zvI||(rjcv`Be8DoTTc%6RT-R-lze2I9JO>ybdq3{iNyBGe_kvFc4l>BcUd_ zd3m}&tm>jW(^fby9~^^TZ}M-$KbE=v8bHyJ0e);$lm;`ntvL!do7XdSce;G1eRCi_ z*N6jYlv?W=hZZUxw?p5Mhpg;9UtbBUxlAeK6k(Zkj>_@#JM;<bGEh2@}rf{|28HQmm0ajeo^8ONIvMl&f3FDb#v@|h||rTvwU7f_BZj# zKLqmpDJe60<%E6^BEf(PJhH@X#S#<<(gK74o`@Tx%< z^He1O_!nHinp0ST*BO49y92U^U(OItwK6(07&FWFm?Bn$gqG!Z*_{wAtR9glEyvpr zzw*()@^u8%L!2FN!m{ye_GHg->My|DJkAX`W69EMY!)gusaP>J?IEr3kaIL@RZ=QS1Ro?4s~C^uFAO!A zYR#p*-mNt~cah_cG-E3jLjECU`kY`L+&|I@AmN-{5^C2+;*0AZ->3w88XzluK}B7q z^vO&OHASOXd%M=Ed))oaE6NXs)6tlxZcn=}I$2}Y?^#LJJC!OzQjOY1s ^s5c7t8msvOAo0T*!%iEEpzkDuq<6Z+C#SLoocVj5O@3w zZWr3x`ruo?fcdczS|IIVN0B6)2e`l%KezdMe9Bde6;zs?C>R(3*#JtkhIo(8Rc-I% zr>po?iezv%Yfa<3Oh&?{@OD|ynx%mfxlW^8Aw%dpQj;f9?4B0p+CfUaqJ?LNxJzum zV$ZucyWf+r@ho3*^^*mPknDz!^qq4%EOBW!?n=7aki;rMVE56jJhg3cRWO?=#TXRr1MM# zvzY5hkHJ^_9}>KNm_Zo{85Hgp2uzBg$@6NSe6%yZdbdO|e^%``X|!}nuinc9BmTr{ zZ-6h3YTn<+h*c6HNF4(4wAjtBO+OOdT^+r>kIfk6i02QtaF;@Ux8(^=o+N%Ly~FAa zGSJSZ<5%$4)_f?(=S0G^-X&w)ckB9ol6y-ww9&?o zVs>kYO>LN7U=w;AlWLw_?#h8@{$mT7HeT37h`tOhvgZ_R@IZ94eHd7zyg6GWVSNQ* zscOwJkZ;R5eJAld(n4*7*kJYb+n9Q$^pywuU4pS=okDi&Tv!33x$tWxpghe~{Q)o{ zg@&D;m9Anwm-`eLPPdo-x-Fj0jl55YH!PDcj3L66K0Sg$h0sS_Q z4AsWmWvIr9rN$X2X z0E4lF8%OY?V`~X0+-xL>s?}2wd}M>xuzDxk3g;XhkZH5-FJl>Gk!fGm31E zq@>)pkgLOQGZagK3Mp766Gp!)Qun*RC; ztL3=n?I1KpxBRdAsRiu1+S_q|A;6C{OWI77BuxYY`?^~{F5@Ha+pF_X8F6UjGb%<7 zKVB|{iZg^6euB)cVjl<^2j__Hf~#7Km(3au+)W8>!t zOwGzT3&#QL$Wg0WWTT>C>X^A8nfM&@VAd;Px_>FIt*txi>P%{Sj81A(@SiL!e~u#+ z;&gdL=;ASIsZ!=f8WnV57;+u{KIu>|o;^kg5?tDCGc)&eKzUgdoutRfo}86JMBQyA zhuwAYeNB#uf2=r!{87Yc9c-touFBzwP@DrLzXV_)2!&+}DMo0u<6P!Hbm0 zmIl2N%4;tC6o~^ikgGA&*Fh{+%$?EDt3iXU?g>Y+9jy`?ARTd!at|?rH9og_{)6R} zYtIGL1TDrOe>hNtgYvF-v&8xNPH9##`_jZ<9Em1g;@2d>z<}Uzn;MF&M|nd4>~wtx z@)te<1eSSsm>HMe)PEF(AB&S8ABA@4An}6%=gp07%D8siuSrV5zh3Fopp{nJMgKi1 zYL}&12tk^Y8;F+oH6Oc@h+4yf*sa(H)TA~Gl9mqxZ+R9Ulag5`j&#T^n^@VI~Zk@`h^rNjH z-mQ@v+XB!c2lv!5tChFH9t0Wov4!+wiI0rXCqFs^<}7Z8`EiSsj9CHVFi`f6pt~au z8#3vi1vTGjf&nkrdXJdRS8JdDV(RTehU3-Ya-GGPW7HB98VQe>OsZNAAHkm{@2_n? z45wK#KODIa6+q-8Nk!KM26>IMcDxKa7h!$Hm)ywqN+Db zlG-AE#N3*HAAT2|bXIjoJ?M`vGqXpZ`En^v`Z?R>^~;u;z;L~}HWe3r zY>N4M^Hez-Kg?@fDp=l&y_x7&wKmv$(JrSAQgJFab|BKE&u}pFXJhu;&{@fF3`CTH3BdT@+6w3sqK> z2GUwNWuStz!u}B9?}BnrzRt#q6+)u1L>z_M^_tI^#$A4US%0m1NQgtCo*`4gllWD4 zSIIO#uYVzuO}O7r5;z(@Moew7wTjhd*y0JK=8url@`GFsv_6HIF&iq^X;u})rO4sN z4{n4I5;uyQq|oAFH_DqR9wauON#=)2N7#i^o93t_fgC}@dNps%Mtrjeok8*iSH$Ln z!%U}|Q}FZNE}8p+7x7~KFD6ASJM7?!`Z$UT^>SofL?1JY{Brhs31oIxXpK>`)c<7r z<1^UrD^Bxun>OgJ6I1A$1?}UsZ(S-^3#CmDh$d|*EPlhdzi*?ML-d*@vkOmpl>rpt zx>(CXrCNlw7V0ej+UWuL*PR~0=OR$P#+7$U8lpwae7#N>w&r z9-y(cAAjfjb6CG5i`?&T+ay%9s=&RSUQ4>bE{kQ$bw1fu&Npx2C;s{RKdQt2Dws(j zLbo$y-&#M40g;@;;GhMmRZbb9NUg{}`1&tIf9YB3SI>D$_#2yR zW)t%IWVOQr{2qT6x%|8KPSW4Ij&8+iv!1+)O)_~mkNHPf?eDYs=llY@KK{~gS{S}B z&?~Vxu>Hla-kAA6kOBW6<8Rw^Ksr<59@7vd24E)&1(Yf7A9m#r&Gm;j)A`qpI^#!h zSYE*Z1Fz(Op{`{#)q|aKu@)dnfw`M*4SiN{vAjd|oM7=x!f{=bo>BK`|u*@Ds61>yIrA ze`WkX|Lp?*B2+3l^9_bIAg}B_=b!vnIsLa2^#9t9q$Hho#*=&|KHJ29C!hby&oCKs zO3Qp!oL!NkL7#uthX0<4l);e&n+^p^_>_DaRR4C;|LI_`5|UC0O3I79|7uJ^1hBMI z;n4B_pymEqApU37ldHi1V+Z}WV~2-Eb~o-aRsWCc>emH`DNsOR&TuX1e_FnOnd&g% zAQ63nW380`q@4aTb`cP;yH3reLh?VYnZNMB7z5B{@O&Def125U`}6;%`(LHy|EBw2 zMCJda`(GC7|0$Dy(To4DD3kAHxVu0e=;aMEUMdj!0yb}IQ&Un>##qr zB7+5<3nV*Jk;EBDF>IJuljD|!we#`m!tza|JJ5yZCo}Emlm=_&PdV8xyL~6;&bUsg z{|z!Il_B?BxmPK0q-ikE0@ijk@@nZi&%<*3p|KE;~iz5~m zTDVUmy5m;0Y2QID*V?}qGvvyp%$rC0ulPHMWHy$n?;9Kw6_mnnPkQUsURpPr zu}Yq%)4bOruBOG7v(2!K=lN}SCeqGq->s{OV zdQZbH_g*@!Dn0F%d=qgx^}1%gW+!2R2bYh10|PQQwxd$*mbRmpB)sP1_u?A(o1^*+ z>W-g;*QPtYQ94tan@2SaR3ufRrBxthD`YN{{R!LZ`zwHo*EM_yUMv1n0^CT764KsG~4NsrrcMUR8t3#=E*zxfE zLai{YcsVY8aWzIpbp41|U?y4ANN9R5Jljv5|C!=^IhUQJaLWA+((eXir0rl&Ine}C z+V;gkZg1-QT|(c#>ON)DuYy?hw=?(%-Ro0JV5mbC9(_v__EGvs$G=sByj83*_*+x~ zRTVfzI8$ix6We8dmB9B>b#tbaW7@fr=Xu2Kgss_HbSG8#b=Fv1S>^0L-M6Yhyw4WG zj6HOZKC3)G0%Y3pbcR^7WJbF&MWpIa5{n?V;f5zxO$Wopw+oIcen2&Ox)I`HXl6Z# zh}~SVUM;M(tE$EhGDK4>R~8(Zhr=|VXQF-LC5_ON&<`hP#-X3{Tv3^74L0OK?l`-e*hz-5rBIr9E8?sOg93VPw&l zYkP|-qteggk{l(_(u^V{yp_hnbtjZUfa4V_ec2X}(_kk37Q*{wER z8|FsJ-;?D$WrD|?>o>h8cSnL!IOd?gc!#aeFY32;ZofO&;hnwn6x3Bf~()Gh<>Co?6V~G-YKt zUcU^fHGM+(@bpMxvPLmG$Aoh}OuW9v9w#t?sr0;oWAwWMReJOf^WWv^rmz};Vs%hR z`Ectog8tVR&QN(n%gH2giSIhmcco=cgM(3dLyM!(Jm^%MS!U~l8%3?nXl6W%Kl%N# zKGOf}v0gX=>H8Oxi_PsBLLSW-Ql6$P+MQD(ou@FPRRsna*Y_SjDfFX%k~Z%eb=WpK zkyK^(s->;fi8)gr`s*o&4H5lY2k%iY3ILUZST1zOAG;yiA8#wQ84yFu3osy@v5Lw* z&O>0I<;5BLLBb8qk~a1WvtjfSgYDkf7nov}h-IqH+4AV=M6qa1-i5vC;Q?eo2CGzl#@`?jroRfVVza(oW zn=D_)*99ZG%CBjVq{6Ux$v|j`p=F@sEWz>6htj)-SSP*_#FaIkbd2}=FG0%>``@U* zP+}07QLzUMEd`nL*tToHYn`DAFzDgcCL}_sjUQfdpvm4P?YDp6!sU+2KnyH5H7<{H))#;iJRm+T5pZVEy6gS zo=Lc`YkuFyi&pu0@7qE?!3jj&8CKL8?O}TMJztM5;&a6q=H%sz6tNndzK$HqsYs?K-TLxy;gG~&!+gXS zzpZj0eRIg?GVfGaceyOl&u7Tso?A9pjSjjLDqc6DAx$hf*gh#*9jSF2ZCWihL!>x# zNy!ARah<&XTEnUyyCJw{a6Iy&GA%}Blph0qY{DyZs5SW(sV{W}>J<}MXb9^HFiL>vB2>|qgQ#x1S%-; z8H>J<+aJoRckYnKGKn}PhNn|1mRD!LsHR44Q6F}i9p~5kSBB|ybmRe4OQi_=HsUPr zeuwG(I+TDrdbW8lv|3o*@Q$A}nz!k6vuS}3ChE*XJCHo>cz6G)d%c5>?;sVku!#9# zmde`jCxCVE;@!1aJ;Iwe$Y$%!=cI21cGo~q+3n(0o|_|#GLXJ z$FCC%JRByKt6)0gbKy7H7lDZ3TXNl6e-jcUs%EH2fJ?sqa+`N?QtATzTk0j>7&>BB z)p@PC45!Ac5!7UXOIL6$ZU-~bj!Vz_=%re|_wCX|-b;B5BjWbw6V()KY=xGSXXo)I zCFoM(lueQSR`z@U85zwk6y*^i7$6mXg`7a z1Dl8H^&Nnv$=l`jpFBlS{Zjy4{lptaUgC9uoU*~Z`)g_TxB88$@Pe)>D*VrM?`Gdn=;_8C=~`{B4t3kIn_lnNetf6sr7P5BuoXmdS& z#Us>fJWoyWSkbVT^rqjVpq_zmhf9e7o54s;tmQKaX%bry>H7fI=m0aGBmq>zQHMWy z0T{8QOwn%9^T^dDuw}W?n9NsD*f&3Qu0hwngpg)zs_+N(JU=O&H14i{)bVedgo7VcvqRd+cyy zhhL$g_7FlgUEy*3FbI!47T=X zg+Jj-1!AxAc&RJ0V2S!98>uaB>$%jXU1J8x(@TJ22Ofet#4#=4@7B>TobSkamu^6U z(cf5}dJDV-gfkmeO1cRsxaZ;qXB9&(|94!yvD<8)?JxgC%_blacH_z2eQJIB%B+B^9a4c({XWPsk0^hworE0c9YsyY|b9cNE^T9C-+Ul=H4FP3|tO z37C}gb;+cQ%h6Nz&H*+kmr`C{W!>zku*Q z*-Nzdpjy7@%d6I}!h-yoj~Sm>75F@8Rce6^3LGv%+uCRG!l1dXv~}F|EKuu~8^2a_ z^&Ibmkj77?0T5k`bX~~JY#M&>$GT?^oP$4z=3bw9NOSiiJt(izQCcf+{e0$>e{T~B z9=J)KWKU!Ch@8SGsF~-Wm(!#`j{HT*qRT zi&s*--Y)%&N@^WB4*aAonUX>?&=- z(7-^g^XN!0obD$(4BEHd3N{h#t-QPTzJ=Xkwh_DrI#MuPb4-fsiMYaIjtH3G_<{M} zq9XNhhX*xc_J&~hl@4qOH&>_m^y}BJ!finf!zAu%lVIcL28qMQ)duPr;@qA(ZMt(! zPNj%z+rq5=tyR`g+NTFIx4IBR)1kjM=>T=XrL+*|MVDQlZ< zm6Lq>w&Xmq)*^(0w8~OCqYSjee(&B&l|-4+qId&op1>cHds~z~qYB`c@DyJ$fVp|N zktdc7fjoC6d9<+i1thS2W@F}HQCYJwI=S}_%`?z^L;k?`^4IFo*M1zGM0|RN4C}on z{qnLf*5-FYUECL5jXsc+Reaml4WykskPc?8nlBC5)4GP$q5;u$Ns8E%;wvK%o8ynV z*oK=@#FyJo2Oo%bV5Z?DKgqfbMW+r)fTmrKH;cm=f*K+YOYFs?8MMvUd!<;me17@g z?=-Tk7Sc0!nQnH)39KtzS?J>s7gy+$U)6oO7^B@6l>n*gF9o%CWC-d95LZQ4%l0!G zpZVpQGB>e0K0zpXW|0VVQ3(7o@`>ob+R?T%b`LV%n(r`ItJ~LkWE-5otqW9WW2C{z z6Nw^0c>DK2+e?r!BOH?t_eR#%?M;uZInX3m%%lw8TWgzr(WL~(dCz8DEawylsyXDW z;)jTNpHem23dZvPcVC6o7|~et#@u1&(#T9o+q~Fzg(h3tD=^3P&{Hd`j1Z@E{rHcq zZqa=gR%#$W-{6uVp2{?JFmX5eyX=HtKwqGsqF8#yEqT8uL?~4HY{|`S{9XOY<}1Rb z!^J|Dl#lMZRsBD%3x4|H93%ecMyb*!h!$kID!%{GpGUDh(CTSlykG2xZBZrKwnh_t z_IgR7(pg7AjR6Fk9aHa`4=M?d9J{2<1R6STevPMF~PCnnw@91z@OdD%glDC`r2Bm&A(yI*8(X&M$ z{9<=7t#x&u<%zwZ`E}nrP}TLx%KXp6cS%elzbx{6O%*i~nYJ?Me@ZE`wWC+XzL&a8D_?b{w=Dvcyd09I`9 z@d*D^7sv`p1mtbrOVj+|fv$1IhxXk1l!=5D>{j4bNl-m3P*!dzw z&j^90y-cr^kM)}GXzgY_>=ss=hxA^%xvIyTc7pL)LpiuWAX7#=BCfim0%nqbnXyRi z4-i6^4+d)@u$EVbNe4nRpE46)|JspUQrW$9>6FTLp*EB5mzwcXXzR)y&+2U2ApLL(8fq(E93jALR;C4~J!2`V^1L@PU zipHg_r9S(O#N@1G(KOK;&Uzrdt_$~oq)d?dQ0mHy^_z4y-(i%m4vv(tJrynD+iD4| z0A6+=ZTn!wU9N=p-BBE%z&5p)pMWzYGc2zlpZD0_c%!3WLO2fWR4QiS-y65=3Vr6R z`pJ>y$Re-;SmI6p9KQxyCl>rx*`X>3q_Rp`(aX}WdFNyvb33oK8`oQh- z`}c00M=y-S%cH=g!1%3jBHS!*_3SpIg6oJ5D=|Q+LO%Dcx^O zZs>#8JY_K3f8HUQJHl_^i8)hezjz1w)3rfv(dM9=&=6SR=Ia&Zr*YS_%B0pSYmj3b zkQ&k^G_;rqy2a;)A3xsUGA(&&{1cQ&_dyugoc*0yu>H>EKJ^284|+l zHLCrfw{H-nwLLDbXGu6GACh-eM)%T?gqD5jQA$- z&eNyW*VaDu7pSI$mM1hAH#IFa%v}@u1(7F-@3d9AHEj1*MafH;HUnR-ZSM zkw}Dkgr=Ge1IS&Kfw=F@%{bpblROc#f^;ZWw|7MhsiC?k8zwGcR@YmEF`vuRv?x1U1weSP?5SqGjSu_M;r2angtU%4YCpHFpBn#<1MED#aBy|;@EynI_3h4p zQ>P&HbK8?^$1Y`%9RcS&ohlQ=H}(eSTcix6TVw}|hrBC?P$sLGo2FZInys>L1(X1` zqJs?onbF}jdaGql|CKkl;faY%3kPKOnDwYDjH1EUf7v7$w{sw23E2V1*$xZ!KtK3B zYzS5L*AR#jg^ZZniq1J|Oi$lsBizL-$|#BH+sf_DEjmYGdqfjH+;xQ9;@Y#z&9s$- zxIv$5){);yc*_*CTrpC%Fv_OXY7%)bd{0r*ytek`Bt057nv(aI!&_4#S*67)0b?1(a%c&1H{rxTIH-C^c8wN9(zc=lL z=fh*!KZd8Ce~PuVu5#u2XkNu^KrUUthjDSg+#Kt-6-(am!j#~h%rxh+M7gzpW`1fF zgtiF0n}R#wnhG1BJp7CeGuwb~WCX|hpL)oB?NG-*9(lsYX)Nqk16@nqr!Cjxs;w9D z9uuvS$=Ca+#EURyxO#1-%~5_N=x6hJ%zy9f7BIHj2?K|qsCWH33Y)`zHBy` zojBQH3mB??jS&Yg#%qR0{^JbF*ssq{hkijenkCFTnZ24>AgkSgf%I-@i)!YM%roY9 z{b^UAi+}0=`lngRlGy!5Fxlv@Q5)4X-6mNuW&h0XIIU3yf9K*fQ9PQGAI4OGnGckW zR?L(YTBiX#C4iG*n9u7Say=An=r8+y(4u*D89u4tZlsVwKnGXOoIGYFi4= z*SS}4dy_}^fdH%jlR4*XJc$OvcXlW3$Rap=S$+vqZX&v5;jFhq-4L*>aIDY*Xbmvn zTdoS3Q+FkAB6JB0y0xJ%HK5wcB}^jg%q@iI`6(8ojzc$}{O8(1Dq5s%@2$VmZIK)7 ziy@>v@y9?vl8vb?dw9&98XE}ZT_7iXP%+l*t=W10xl`*ran0%M=T{1&?;6_xW>$k$ z1nLcQ=<>Am2UgZ6A7mpTm45_rgfcsK1<2$@Hu=t`6ZEHo9$xP1+aO}gM`b3q*=rQp zDljBhrpuQ2EZ?>9-A6JUb?}+u;eGZl{U81LZfuc^ja~F7(@PDyycI|h!AKg2xIS>O zjV+SYt$R107P@b0PQSUtDm+s@IH!r2%+{pm5ol{z@$FU^JbfM!#P#yQ%Zd^@*VtEP z#m~qbuQac{!4R&}2;FtKd=to+$Fkm_J~qGMifC@hx=WPK!^4E=dcW<{x@7+H3A=y; z$|L@?4cIM`VxXZd2)kO9Xt%7md5F&Miyj;tHbpoBA_f1zn5sF~@7~-JpG~J|tZEWg zNbjnDo;%WUHS#}JkPA2fG&R@&B$;uxjW(2nb!62D=~M7hQcKw7p*u8cKmr z4*tVkalN40r%;Ul}b%=6|A9Oo2+XXS(Rso&r* zN$(F3X`4FUBe)Xn7&ZR-q2tSvIvB~{R3`oxE&T9%tLv4UwA4QK`#j(JZjomVPa0oZ4x09vXwDoGoohx{D8_~?6mST zbly^*WY}dW`bGFmrF#)^|um)!x6cLzr14hH43If8P5b2^b#oY=~@w5A2 z-mIT)K!nVs0t>m(DvspZn-J>pAiP$-vr2JEw9~Z^2U*H z#4scLel`C16>fCmtde-QaoZI1J=}STWqaP33V3sjDUzkhSo;-gnQnPtjg9Z`)ecu5 zx&%Kz!-t#0atbH_p%y&R+)z~CkUf`(2V`qsMl|BLp6wB~JLG{fSNY`K&7ZH@1V0zr zPxFXwTD55+IyF4KS8@rfE2WcK`?Ft@1ZaZUNPdZ9Ii{G!{S0<)+3X;s`5%%Q18N`U zzk^NlN)r8SIFiANc8{wu^w-IRbdkB?D|3aCeIc_I;3)gic(Y3OhEIo-tX{>5QXdAW zh`o+A5@tngzd=+X*{bIr&9Kwp?PZV;#dw=j<5WB~gx04Iv(q zEgjE%N_NPCD4;;D0WLwyxQFtc;3cjhuKwH&jxc!7lHTMHzgCH?pQ~5=4RA7c{FvjN z_#FlGGmAo&ozEYiu3Jx#cNb`Ok@&o5c6xJ`rdeinC3SD_ng7+pWYC-k_SiCIqEMNq zeBwqNP%;4*&oVLl8F{A#xYWZUCUJ$_`UNhpj&(kKJh~C_ThrR$lR=lgPzU5qJq?CkK{N=(sUf`)XH(W9j2BjF$Q zf@pr1gH-HuuYc(Q={m)&r`T@H2fkl|4M|gN-3TLJqv&d$1}<0GB;$?lcRh(5l=-Um z);tW#3-A?|Cd(XAS)7CD@pMh{H@`4hU?_Z*SMLhm*7e=*j8L*2-1(}meJM9Wa2~Tj zWb^tprBSb&1OGfMx|oB%%2WYT`2_TkZq4Xc&Ou|T(naRWmCtM-Dt(Qx4Iz!EnKvvn zbM(&MIiVq*w4S?TH|(%R#-fmiQyUWWc{AK;K{vi(e-6Eo06~c^{6o$E~~U~J}_5_7vO7TN0XnBB$sz1(Zw)0<@oqx z?>YBlTvLNR%OltMO5MWo!O^xBTs@^Mjp5qp(KAo7h*89xXQCSlHpI&C{ZeH&69}66 zCSImCGysSfcz{w6keEg^fZ>dIfm^#Qfo1qNO8l~1ghXzHWocttm&JEQE|V0p zR0L~KIUt6rE&y5g)!1uY$CsEg*Oz;US<7lA@;ELQe}faedy|33Rp^6%R3J%ue}PDA z)qhS=xkrUCkcWvo^M3i8k!}8KGmN*2{P@y|bL=-ZSy^g5osdS@)!}^b#i#3C0nnh(0=iAMk7|s9 zs>U8&(<~bwXNLS=Bu&OX1knIUDLBe8eEikzu?00@6^Qu z)G}x6C9#XEbEIA8|p_w3KS%57;PP+}cnhy+{^oS`Z>cwgLa}(V#lt zy(agz94Y05 zR3}Fr0VRLldF&BBB)W*HBshfJZeC$O9(8PbG0c-Fl@L6Q)Keh0c&w5%7@gU+ zI`FHfpo(F>%)J!7C4RzHNCIuGv}yB9^xbLE=5jpoo7bNx(3LcEWpz!ITOw5<-S z5UtWve~!w0s2$;C{8>Hyk+HNONC0Of;c?oW$y%OF^fJe$+{m*m z-8^$%%OJxpg|jjsZn;7>w6L0$oA|o^?XRa-D!6O{>^sGS=$RqveNygPGZls$uq$JB z^zbI6N8L_VAxb`7ebH(@*-XOFAGRsxhJ7-wuZBz~>|EQIzX;!9Jvye}H+1R1eB~J-0l12 zRMD-SPxyL8CxHOhYU{EbG2U2e$rPYjPTxp7dUGByxo8@01sY-}Gq$)gwc%vh zFaf>C?A$DyS+#ThV1w}aApQ!C`^H|RKy?myJBmT%^X0lr`sXH>Wh^N*ChcwI<<-oBt_C~UP;3~d`SHm_K~e%>VsOQSt(oyg5!V??zv&vZ zc7@N6HEnk=HGbuRUK~Bl4>CvF@`^cf7=J!>Vbhu* z=oAyxjC+s4sL$$oCAbzn-0bF`XAsLe_0Bj)wwle{ z*1p|!=1aE;v6v%~H?9v;ajr5j?^Bqhv$o_^zmkSqL?q9>omfe>IWZEp74?Sb@zoQq zY>BI}o{9)nTCh}jb*>DiBbwA4Gk1HE&HAffF0afpxB2bERV5JwZXlz6>~RNnqGrrt z+`~XnZ<}vxdkRIm`j1s~-FEEv&=0d0d9iM6%URV@R$w=de{ZJ30j{!)vulcsNQPl= z7feFGq^@K0o?lf77XcNn(CWXB)+pKlH1r#v&y~YqNu@;{2XKTAqTZ)eyROXHgcGb` zTtmeqY^scOsT6Wv7<1Z*Ca@krT1u9`LhX>sPMxc-JM{$T<{;H}37RLbs&jrQL3?n22`r{M{uA?*A1_%_~PLg9_R1fx7QUisuV8m^o(53gVQ$$hCVX? z{$PBYJEYJ6t=&|3@lItY`F5MJJD-Z0R5lYE3>Gl2j?Ok{&q&;9sjCnY1-UeYWyT-) z-I3Yg@P=S~v2(w~Jn&<&%U0W8IMkCS@#4@^%EoDvYUa&j@6Yw_gta#2T5Q8~QNyN! z#-RqZ!nn<-c7Jd-oTzEB2NaS9wvHQDn*@>Qav~ru-7qti=g36}33ov+IT$^>&N_w& zNa8nY)Vh3)nL)@`Iay+E=c*hf?mYG9CbtV5gTy49s2QES4mTE^1NOseA?1eNaQX}X zNoB)MriEi!U0-J`?Z+$sx<6=S*_bJ5N>ut1<4q%qz|^Kzt$vao2PLPw%K-#UKB*+# zK8}H8nQTy6BhUUs8E9sCOwW-%-QQYNZd%G0|M4>YvXZ#S0vRe>fDm}nkFc0T1K6=DXYFXCjpx!1SCbdZk6Oi~Z< zrBl+-j`^pT?ve+bh?#h_sa^tysa{y!i7(#r&I>A=QufK@&)Sb$FZZyUYe30VqVr~p z8`&gKTXnm}RP+mm~rA(c=FP6FN>xF~wmN_0kl$_BrJ3WLWWQ$EBh= zpg=d>ZPZ_9%R8co=8AfRatcDPOzDNqW#%^qOjSK(Ezv^NlOi+D_CNm3nfIYagYoS8ZNO$7tBTPwq*$k6{X1hyaGx3Za@ zysqqk$KEi?8FFm;c8?m7fV|Ee>)bhi7a&R{nNhQ-#y&;~Eek9K?ku{_7KW{82|9t3 z_zVHaa}kRZowx2z8nq_iceJcz97z9q-2dhDKN7t{Q$i4(7U}zM)A^6VesThaT9{sz z!}eb%lhV*YZ^-xnsLfJJiGroH{&9_$~#{(-zNo3oA7nZNVz z;mbc)0l2Yj>kT%|WT&#=;J^O$-)HwU9T>)0x_!Fk^S?R3e|b8Z7osGjn-glfnl%4z zxqta#P+&YTj6iKiw~GIJS~e8k%7R-*aawZlaPpISuCw~K8Zm_y)xW59ptH{O_2I)Y623(-mFrd|JelZnp z{$XpOK|-Fz%}3&V@|LSYjS_l!*LCztGFRnbcGgILpo-z|V8P_&)?=}~KGs?wIe5wH z(Y(*S8I{>fdvniZG)}Ber}W@$#w((6r(K=Hxf9Cnwx#~RPSSl~f2afr=GqrxU7;U% zSBE`k1_-SFzveBXcxVeluRN#-Ab^j5EJHhvV>#1=N_c@z2rh}vb5qkSF;Oj#q&~KX zfP-`GS?$?MtDKYJ&URe5-K{s#$K%}D`U1$rI0e^+l3y-!7JBDuO*SfZ`(RU!k(t}& zTE^E|rGI7h)ZcjEHV=Bd-eNjqan?JG^&(U;OmM(6&o{1!<$o_&edE=FdNvTBw5p+j zTJ9|6V>W7fD3jo&8GT2J^H8sfVLDe)JUeghyWL2}d(=q_D(hZ3-EgB;cc1DffV7eM zTG%7}Uo7J-MNsSQCzPgHPn18>!c~2wL^Vx~&yt)YV5$sU&#%(Q6%+8uqDy{#qB0Yq zb?{+hndH}%635oCBUv$SlbpAwuDhbC!p^zK+Q=i$hG@@$X8M(ZUrIo5ZEnkUPl{8Z zFMEk4vM|YUgQ+SxHiR&1FAm*I0M30ifOEkiuNWOKqgoyoXgrY4)ihP1)z5;5ICpRW z%}CubpP5T0W$P~&2wEsRqdN611rQ$1j?c>i!J;o6Nn#`_qq_&C`c$&&c>;5mJ2(8> z5&G?VHJ3NrGc@*+Kr=YOO)0Jh5~!X3^SByP285lH}zK5(9xKi-+1 z9&BKCGLGFeL#XMbe_mEE|84(!aa@%Htm{Y4KCC-9T9|G{khlO!yWKhhdKT$3s#GZp zHp&jEnQ{B!W4bBj!n}0v@8`lZ0bt|$2zCnB4-S6F51-qP+V-tZvQ=SKLzwwCL^4Z{ zdGr^9_n*fDr)YlSOBVN2w`b#O-eSbSLHQpY8Zz963s3+VaW~+Z@-hGv%B|c}82Ch~ zN8@;eUXBL-UM}q!l{2m~wI6Sar+=HSSAV?TDz*EmC_LuyzcJ=`@m=l=W2^p+ktu}l-btc=s~GSAm1K(ie`&17a_S1XLmTkD;UQ~aEP2ox7UfoEq;$-h2SB?~ySjL(D;s88>Ara3*yyiiz7 zm7>}j^(E{oAPBsj9P;{1lY1`Q(Pnjsn+%BEfso4(pc)}G)1!Pk8kGm)xa;g5we;0Z zlA%yHF^_E#+i=&dqneV#QeH1HqB(UCbVwZI0R=i%MBZ;%qvl9>HMSRAe}=WJ07O>7 zcjW3S4~NPf&ux6aWm-EWJ9}=$`9v9qO5T@>rWAOByfYZ)dbIv>w)6b$Dbd#*Y`NKL zZBE@wWM*n@(<7|7=&&ii+YNG{>+c&(C zFGilf*r=^!6doo3y5GqvbJsWEJL=xhzEvg{|FYT6ZlB$S; zpi5kEINsmp@jCF9jI$wa(TFY*=>BWfq*L#G<+Y(yKZ1+VZHy3jE{xLTNbl%8GQJ5<>Fc+uwRjcCi;^Z2e>GC3ZenzT;SY99#2F3#JYd+ap}z)-Nq`PQ45+*R6Wc zd3sVlRe^lg+8FrM3Eon_9-^tnI5g$M^4Ox{%GjNQN|dVAXh2cD(~{1>LO`AW9|ilu z?^|~IF7?{fFzCC)7L#+2-{~aW-A#9f11(2FiRH?-lPe7C=KSr(nYkDd1(7pDZk zW?-hw^J;lB$?;PTKjRVs0)zW#ml9aC&`}?h2C~V7>W#Ub?*c$g$4PhNw-Im6N>?(b z61V8?(z?C)qT|*gXcAdgwvqmCsAKWDeO5=vyMAmUY~H-x%~W!#I@Nt=&eVz9KV&_` zQf+P(voEjkz2>^WK(_u|y#tTuDN2Ni6tQ|D3lzC^AHNwG{EeGD;+K#dH&HfuX>xF9 zqCl<{*hC42ZuH0Cc-n0NE)A$X3YB}DNsx0E)LUb6hFf0Rm;=G2y;uZa)wVzO40^q< zQe!f?XL>JyjKb?yR7hPJ;nb|&Dok=b??VFoOfMTQUghQ35P9b?59l+PG92C!PI6Pk za7Ny-Z<@?C)|jM4+XT!{iJrW58>+tO5x}%Bf1R24VH!kY_VVyG7!HPRb)4vi%MIQh zlzKd~{KtWbKnrFq-3s04IMJ?Vv*=$dWMOXBQZt3CeYOAEJ5OG;ZQ-{IW3jQ>#>Oy; z_NO|US;EhIf2}tyvpF9So_DuG3tBhmi~_fQ6tEudBvYj~4(vX$P>+a`cvlzj10n>tkVA|j3xq?e4Vs{Z0N)d zmQhPF8r(+`o^_(ugBGBDfeupiJO-aVO->fwf{#^A3B>SP*Pnb6W}|P;q|xf*0sPP1 zt}C=T7Mm2RfNeS;!f}HUv^7YaJ8|z8msK4@o9CzFD(14bGux3v+s#F8df2~&V>Lh) z9xqlOuagDW%=U>dl zP#Jadud{)e#JfaFNj>Yd7}8~t4Cb*^yp~iqs(e+I)Yv=YN~mi3qj~38=`CXs0dCFW zi=Tfdfn@BbkJCg)saalyFo+}3dz;e9O4RYK zwlMcV5}pt04ZO|0=f87Lt)RZr)OpU8%|P7sAms_QT=wR|3@K!XI%#-h^2GV%nIaOl z4lN>_>U2JCTHK_+;6?)7-nW=s8!g-lbW&?%m7<^X#MrUEwTHBuE5WGY>I<$|W`%3y4d2o+|S`m7k)~ug9TG+Xth`#=H+uM81q*K=dUbILz{y!V3 zGNi86jo8_7;@AT4i2dZCHZA)n<6PNFeZ@W}Bxxgl{!yz7Z(5~ha1tJ9j^9YJ`Uyud zrB!ArsH$NVDuif6_7cx~A!iVeK2G=olLcI{*XK?*`9Y$wQ>&cx`7d5OtXG?P#$)^S zeb;3iqF=!c++;i7zqa=~0_vO1)h05yt*Myj@Fx+RE27oCpTa#bV@73qJgoLLRO{~B zONt#!9gAywKIcwTgIyW!rAG`p-hYr-boIQ=l`u^yN ze8C&kCt=FdhWA`Op$9*-+uWC*Z|1sr~PJ%M%TKbpQme zaQG_ClrK*LnP2MJ$-H;@@5vzB?;QJOt#yCg^Ro1ucOuUQU$quPmYQ=HgNgFz;pgPR zBhmy1@1c6e$%0CQdbE_UOP|5F`WOEB9Y2tTbMeE0}h^^N?lKbry++)F&|4$15 z$bTd%E2lW487V4Z&7n7vlBlP?w zRO!{{9&co|4GaVh68remr2=cNu}UogaN~=v`&{{rSmRpPM4+FmWr|d6KFuHex|Ly~ zfZ7DtRUXeMgRK^FJb3S4-la|zc}Il$q>A6W(C)+0d*&uh`o`_=pxH~eZJ0(NLO6^h z=?oNSGL|#a$H|rhzH29|H2M;grGh?P8fZ9c{pr`NCrtRPh*)gLxurJQruywoix%}P zTWihNUv6G2i)(!_TQiq>msn8mNc20tAF-jQdgOkwiC!4 zeicz`yK!~$>Fe^_La0?{Yo~Z}-uq;`9giv%jO#J%iZL9}=Zszf_VlSh(@~t7anlD= z`Z;Jxwg>rIOqLC=9{g3Xba8>g^hYfqrS^zz{5Gjq#+YDQf1rCjZE~+^!7HTR zaeCe$>Vo80W&TasHWtC+Y1`k+%&Fy3NiraWi&pT&f z(L)%+{xYy2uOEFTII|qNy1$6V6;@s}yCbE_-}0an&Z;3SlXtSdtg)JKydyIC(bRYY zBCFm;oMMBu5j2X8RfNYz2g7=mWt>0Kw&-jNcqX3>ja#QJ$Z#s9-8zKn}C_h5CE+WLzvpDmI1hT9*Rw~j z`o$D2$!AIO{T;xe+2xbX84=XQMyf+Tgmf~;Vb|`q#tWr9baj!L^#3JSYLH(gIKL06 zmd&hl6|yvaf_?(qCf}MPLjt83@rbQ*;vBy4$~{L&7ZvBHnOd+G&F3>-Lae2k>P%Qx z^c7L>Ey~7*Ok&(2VB3C`dkfT9p%;5z;rzg0VW)V4Ri8+QtYSW$<5q|Omwdr8<~#n4 zAf2(Cg!+=(rSRDcVRcoPb(2Rj`CCRL5RDgX$LbsEzG>&Ew^46Rd*#__Ym@I1tt_-l0}uNoE` zZKm(RLIQbMjGi1FXxj-~X&Sy<#4Cu2)|y<#w9SRX#4D|}SkE{brBfVVx4v_WDJ-t! z;!62k)%ay<``%8K<|2$N*Xf=C7O26^=pEj&-(sP%+IVAo@(LL+?)Ku+m+h$*%q-il!tC4PwNkC(pq+54y$lHewy3|B z)^0MdBo=ZWrE}hzWEpR|O;xjW&r&Md;1V?2tHd|D&cqD9@@8P?I`F^Gqo&8fPKWgb z@v#|ms&uJWot@45l8X%?7!O^P9ewVca6$5)*%=zumh@->S2$l~*LpNHW%F5+&`n8L zI}beQqd!@EHd&Bt8cScy>EF8^le(s^vDu+xoF_Hc9(6oEA(Jh7;w|26=FGG;358$Y z6oM7GptO}2n-J~$SqEE_$itA>0x3me`uoE>XK&JPS$PEFB@}(n8ua_W-97UM%`F0y z0O@3R!q1T|gC;N@Z)|0bG;KUOk&>4QSyXB>hSzO5t_hCSTL>dH8YEQJYO;1(tCD>X ze)sZO72GnZ0^@y-J4=N9!HmF?#p? zo2vE`W#HPJyltDUUAZDCz#E=)566w=5Bs-eJ>Di8-#d5TYJE3sG+%S*M`@X{>v}pO z|HJ&^)5XfhKgJ$u3&G8MNixJdb2Evfm7??HA5Rq8{3ffAB=LmYPmMfd{44B*Q`OV{ z?|j}Gt~DAz>&u&%s)A6DCnW|cT`H#pUNh6hNL64L@ltJjf1i1hT`feiD^B8xZeQff zCdlDoz-&E}u9TBnSF2n*fK}mhGx@3UE@~)p74hp6<^~w8Kbv98YkxBcAP^4adnpwtWak17*-HB*=6RD_Gx;AT znkgQQe=^~g!=Lp#{K6MdWBdHGcP5rotz__Hy7j)_#D&Y|=(^F;#?Hn>pO(O0w+y+D zo1(l^sJ7Q14`#)@w0u)O|(y#?dP{gE6l-mLV zzbo#2#yEQR=Nb3c9pjGgXF@{WcfD&p>v`rg=UhL*<{yLrJO-2XuL(4+rKZbL-gwLG zrp9fqEx0k3vIdne2t=(Wvxmx1>~6Lw^_8Ypk2Ji#(4xxs2vW+LTx}Rx$~FYg38dS6 ztI2i1fs6}zQUYqrG?-#5)6{Fh0EkE4nzCl;61g9y0U|qcblQTIiqZO0JHit0H%yCgqR* zF;r)3-up3hI^t#BY3#}!tuDO66-QG2*5+;5K=SIaXz;SQEP_-exe>yKV9)G7VH+Mc3RW_yA(ey4+z4-ErCRiQ9a7iMOe)JIwsXg+{$xPV};esdZ`@ z(#cX_;^lT9_4Qv%HLZ6ErI^9HA2f-&3@DN^Fe|gH0}OI0tJWBgP_)BeVNjX63nk9g z^vF|Ba~v4-Hi_GPOJ0*AwC2xAwdt(Top+VzeDzs+9BNL3YcuL@{#)}26a`N#L0jpi z-6pFMr=kXK=o1~KxJEBvrk-`aI&gP&c*QmI#tdaQIEr(wJ@Inq!_hp9v->Mvj`PU~wLj`gu?;_k(KYvWWXrUSOPugM^Sl;g z$TYee=q^wNu|S8(6Ax*Jq){Yh-?-B~dhzTP$^Xe|FfYI~6gS9pYhCGSrJzPH5^c?= z%+^#A7MJo~OvXHR9-iuc?;t#rU;6Pt{61Z*~?Wt5zQnffy#tk}79KzPtza zrx4#{-v0MhfdA!JHmi2RtVWOUM2IQ=9@L|IkDK3UIMBR+;@olFHTUZ}kvi3u4tK5b z>rT6VXdpK+P^E;bmLHidAGvzEAYx{hqu9E@+1g}J!LF{9BMzPy-oDsL_1b$ra}}ys z5!5gj@JT@y`S>^2v=iDz236@hho!3bs%p$T)H0-W5+g1jFOcP=Qq<1GD0utqn3UoD zdgAWb3Y)$Q3gP73*eb@z;@&DjN-{Lf(miG}g`%J%n_<~A8MAKYE@W70&b}#vQCW;F zzf~MyIQZC&oDsPvKMuWMNhYDhcz$XT-aBztkBvTUH+yr9+dJE2BBLa9UqML1#SD!X zN@=jR0+Ov`|D6a1NZm#1`_~>VnXHEiB_Mp7c#@fWv{OTIdvq zt!lZXy{=bxDD&pRGIglg6S$WRpJWO>tfaR->eJCJIn zYkalhVTAg#R8v!)(WkcNun$@V551OKeQ1F5+pN7O%7a)5#r>|TzNCNqcM+S6Z zq4BT8rdkUmHhw4s!Ez&TF~Q4s{Bj~&edm}lC*zK%Nya`ET2p7f#IM_zbRQtq>s|kZ zRA-g*n^iwyhL@rs?@7zH;6RK9#KYwAzUL8>P_1Zi>y88|_vBJx%SgmG);pE2e) zW|jRZZf%^uGvn86HBbc|Nf+`<%tS|E9fi=|mD+uk{&Q>%o?oiBLDT6aU{yf%p)!C@ zsmq(@*?cOm7i}g<_mTC_gMN) zcwCOr@y#rC%)XhkbU_`#^&SeJm3w{|{A|IBzV>&Jbv`E`RYcr>0dG{EtbOl`5 zNUa_z!D&~GwdWeLAcC44Y4h4PqDB=W2Lk#ca!!+TMi=u5f2^l=;xM?k3ijWawHB5h z`kXESJ)N(%OJ1&9qKrvTKnDEG`l#!De(-ohAAkB@H(nJauT~kiOdU!{^~6>)NplwC z(yk#tzmU`|U(U~|Fuv(qOTvM@nctwyxy2MJ%ql_Q2Bn#2^U6l)6pJr>mNjC`Bw^tA zx0kb3A4pLD0&D1WAZ2qo_vr=g-*O$E(UWyr;3Lw_dSy>xJ$(T%bOSTBUenz5@O+$j za3!endS+tTAIUOKkZLfI-3t8qqbB5l!dtmni!F}4y6MO26GCKt)M(*b5iCW>nY^Dk z5%-RQrI?Zee^a{GYw2uu3M#35l~=-1j}RF+xnS94I=zP1%yb)%QhXK}eq6g>NMOx) zcmx%~%aV**XL67psa@Kf%lTrPNX+cYvgN1DgCas>>7J^pd6mY1G9&V)}YN^i!!TwPt@l ziB&fYkDc~)$zmC(t-rlI_J<$y0`=2EOvRa-*6(R9#-+f&f1}5Lz`XD>tmK@XZfWY1kt@<02dRih*#u zXik&#)H_29u?4H7JNPOF6jGcVMws-&RF%W^H|Ofj0Jt#Y*RoujGjhoHa5Ki06kI;J zApLfSF4m;Sm-IL>fYs>4^tqq9y1XE5l@YdD2(HI;Kzz4vTBgcCcZzAsHvvsGQ2mRVO(B9DK zfT`ECiPCre?z^osXu@@GrYp|;L{D&=+YR&7YKIYS(`c0zK_{x8Sc-jTgnn2JyI1?C zy7UFG(~l=~r@8s=y>HlAc8C}ZM)dnA&(!Fut??aqN$)$#)3Iv0GGCpzqAzyikWLfW zMM~EIl=~#o)p!1zh`G>Cw(`T^p4IZki*T-!V2cSq;j;Y}WNv7)(ug+i> zq=r~8cK5`0D17XW+q%|u-jFOFy^|Mcp$)2d;@U@!j_Kj8Dc+3IM9DPFh^tJ9c)3U* z`M~&_uu#yOC)89S?v&A*_@1;0ncG}{EdPS+cz)P7!Yz&XY-~n+wzhPp4%GT3EvI_^s`b|l zW@;qA7Wi*5pX39O;p?~tn?>w&pyj!hdNoY#z??Bqu?i*B3S%AeC0chKR%&YSv1xaA z9p;)FcLGL4ZbB_J7|6G`8Yrh#3JBLRGhaNx6RR4vS`Z3U%Wl>_QqP9g|46;=3ktp- zZqm&+-;pnB@oj?T1>RuAbM>2twP0!FZ*7|!#f8uoHyXk%%L%G1eo$p$j_&>&1By0r zKX8NTdP9Nt%aLypWZM1vBX^|3e`I|FG#qI@8= zjT*RqV>D60AmyslD~p$kWvMby4IhD8)cB54@Wz^P?dt)FELK5bL4#JemK{wCmK^QV z64zIiQLwnH-rf7R_?N@%P9r!pA-u?9<3Hhk1%-=mUsrD9L(cg5`$w~ZN_fGqr#(sE z5|TQwmJ3@H0}fdmU*mMAPKzac?8oiyt4@o|tr5^v z>lj&t-yu&k)?Tu*bR2vWx_GxQnytCLPE`{=bx7*Zg>PwV_ux+ua~RB0ztya;MddX~ zH{y9|FB_?SS>~=Cw^+WNmWOE-CI9wV!RPr)umkIKWH-DZ^CDmMEj>DsEOJuHZR~vF ztBUaj0dI_OZwW#@1x1->QVWn)gl@$=&<9}BD0Y(Y+u}BNhiywYDGSmzh|pKjNSW(U z2Q3MU+U~e`COcvi@tyE1I zqw1YiJ?XdfKc{B+M-7 zw-{LBe9l|I@Ngb6l^Sx+*(5+ur+{^YcGDQ=_*_ft@oY+8dG~x@p*p4xpAcmgw1!M| zFHQ6E-7zLBTHvTEkzi-Si8kFk9hJQxX5hsr@t34T@H(=1aDSr3#l2d}iSekQQPPuu z`jBR?)FqI4i`+mrtlkt0*l{Yi#1iV{yh}H_*OTVAmhM}ay}7?5*OzcTd9}r4Gh2YS zHe@viKwR=JxM{@g$^LiCO0^hD)SJ}0%clx7DO)xA(|@G`6TORhaV^jP;It-g>}~=T zNYdhOypx0dxs$KW@DyLz%&WhFuE4*5u1~vO%RYphbiwNQk8jj*HXSn~lguj_yX6{G z|Hh447baP&%xWPMdS$oqCg&?<>&ttU*kt z`iGWH@ak3}f=5^C!C}t9g0GfH@JDZ~5`Pp~H`7=w_#C1mv5To6!FeY7A_I2ZpZy#r?e4@B8E*zC4q(DOG)u(2l8`T;M0jL=u9|M(-s6I7Dtp>dG6R{k-V$Fp+F`NO4e{SkzN*e0i*sJoc57KfcCJ>-?I(}~_H!!M{GD$4f*nCj8nWwJ^zih{>DDa%^lk-Rbbn?HPQ#A`Zf*rw}Q__`U(;8Y@Tx}+6VBP_lzzAWFl zY{W?2z}jpS_c#0C&8BNo;M7ZqJ;Sp4%NLrpnNOXK0-g8BPj)yyl|_*zVPP^;yEA|C z<-FtFGgiTl&26V`sucQOrv#zT6>^B$lhg<`al!-6Z$PLXm7M)FmTCKJ>ddJg&oAFz zLWn3)$e&~L1g{(B5iF936xpOLDByHTL5{eKK#uT2txBc_j?Jb|Na(%++rr+9roJ-< zuN2u^9ZXP>vpjENGvXurkbTuBIb4F@W>b}T{p&-Vc1@zh5_){|gDgwSoIBwt(}|ng zX2NruL_{turp|AJ4|_JP%5|s2g;ZHM>e~|nC)C&E5qh*J=j(LViq-?$UaLHw+AV^n z;?Fk(NYC7!Yv8Q#;88h0=DqsH$E9(4I@k*t* zzH1N(xwm8CHOLB5YqaZ(47~jFPL`sNcWm%N&{A$Nm6L5**1TjUFbB}g-u_eZeh;O& zA_25gmrX0}5++)7TfwpJrvNDJd^w+^6Ow0`PH??CXWRaA7?x7VD!aPR&?#T7r5IuY z@z~v{+{@1hLY&seXuYl}6O;x-Io-;c)^maD^T%0!Z%#SS8B#Ywc&zB}H6zl9_MQZW z3De3`!5k$KzL@+#xP0U&+q5M3gQ~gVu7jCW&chHVuEz|=4kfvvcNP5G)5X~DEGMs? z^NUQCcJa{QV!oNuPfk8;)D7Y%#SxW}XpuU=LP_UZA6|KDdSXE$-TNk8cKD%V;t;E3 z=a%g!?`ORPMt4(fmn=t?2C@2)dS!*4?rJXrZ3>oUSJu)~-#^dY%${YyX-Wa08 zzh&Stkbe7gvHOGDO3nqyzB`nXZfKk zboLfKkd!!e-E#Zk?H?girQbpy0t76v7k~59Pv!Veaq@^E09UgHPfy+W`9J(X$53+s zY>a~t_^~rTy!}%jSLuH*?mr&N|Bl>$%$Wb3b3d=O|9@61vjm~Y|LA2PUul)lO_Sl) zU0dJ{+1<$RiX(2QoOnq86V>sjm$OD78ZjHLeExFR%kU}xyHbDMCU(F!_e|kMt~?g} zo$1%XtUnjX8cr7F0)ffxH%g(=&grrG{h;-~0Xm5;=>y$dm1lpZp8OQS)KH)X>37BJ zUHOlN{O_L(eGV8aLKg@^EsuT_R{RJ_osD(_!8&)O==GhSZuF;@r%b_p-zbGdvi!$l z`S*BzmUIZ{bWnyWjsO1c&y#820a2Pb@VN!%G{=t>)=$@}gg#EwlNCRD@Dr5w6I5Yi z1A_mlT9~xtuLs449}Eh~h=pF|KQHOuF1WQ7jIpQ}#mnWVQTXZQ_sM`pupjy_V;lp< z*mjaQHS}-4@$V1o?;-6+fI;ywmpI1!>oL9n*tZ?6$c~C%4@!r%NY)J;pa^ z`*4hNqV=x_<$ovMPfLgP^8ew)BRQ5N^Bw1kE-|fatto3rK#VlVy$IAH;)m@sxCt)> z&KaK&=8^WAjE*9}d4Sq>mH*}&IY&q^_QCQ`MAjeIf|>`IHu)9%$Ah6UHXRAtcAZJW z09=%L5qP5ZQi$a#F_-(3J&8T2acQO#9l$^U@SdmCJC?T&wH^4?Jd0KaRH3xyXIdRf zgg1YNO`)ANh}rUfU!^uYNcolPVNteg$-Y7RW0rGlH{jtm`+v0rTF-*@sbj60XPC*a zIo(+qun{zp>`ptC=6Vb7)FM`Qle0Ea&e8C{-s`{pqhgQO4@8}vZnGs`Xx4fO%4Hr| zCt?5oqV2Xr9n8OqDrf0vBG6Ecx{uoyu~NEj!^pt0)3U(=lPALH(LSB%^WjWw>GgqA zt$n?7k46g~)6>M2vqx`^q>=)uaZ=-^DK3J-{tx06JyeoJZDZwXy*t9$rem#wH?$9L z`*(oAU6)ESpds-^{r#)L8tWU~;=5_cL&4o2lynVI9x3~F}uWl)W0+(6;2riBZ@Mk9D5%}w}m z)EQy4R>z-0Jpf##Z2l#pa8F^kaW*N794Ij}^_r6DLCvF79^3q_VA+^k#KKodM}Z=N zSCGs3Vj>&=Jwcygnjm2%!By1ZgTqE}kEJh677jF=lZ5f$xScK5i{{P0el~y7_-T5Y zwZe5ukEn|mh=;n=X&pgV?dcq?nsjYEdN;0 z*1hUJMLuHO{-IAj6dg$lXrn1k&8M|2LMV7q*f*uE8sxzNnv2c=!Vm(?y*k@s=b_(T z9gN*zK3WhFZC`Ux#>uN)(Yib6AoBp!k)Tl|Zz3|Rka{X20r3)?>>}e1F9R@ljHKY+ z!D9?EXJmsd(CdCigNX4j=J}Kf?Wxzvco)rjA^G%!P(A_uB;PDJjR~Aea44M7;Iz-Y6ff zFD=&7$-`cPMtQkfNhJfo+6z?CY!=a)&t5HDWL%#u#2jbouBSG(z%~=D4igD#{;3Oc zY0HcZRYXIgk0s$;lV7G}{p{nn6LkA%tA zbjbR`)~!vJ(?T3l?jj^R%S*QI7*k-mem5+GL((-qfg&SY+y?Nelm!Xl>czxT3+^3; z7heUfe;FCnj)CAMGB8km@ItO%MHZ0$3;BQRI}vQ&yF^C&I=a)jIgeTLudoSI1%3BJ zh^lsk{ET?t@x4}*7BBk%!Wwy4Cwn*p2KV<3Coj)rgb=gh>vv9^8bT$e1#b@T zE;pMm4mr-~3Lc%r0BU8Pxn7JADWigPmn`bKuK+=hWa{ZA1$HoUm3bR|`laSv?Ii66 z4e&2Lg{v}IFqIP({H%b&EhsMBw}@*QX`8VMvSmvIsQsx`i`$uQp!-l0{yMPU z0|!nvt3jNka(%mOA(`;YeB)o4@%0Fv+hs5o%=D-CH*N4K?nw(56=T8Sc5QvuE!W)o zz=8p6yVY66i>3_%Nn#zCGGXLOzD|Z94F$)|O6yTg-Usx5*e)w{?opsA5Maa|-jFf5 zeuTcctnSxB1_D_sHK&=t((eT4__!A>K;eOmUvXt)J@T*iCDeC5CrN zI#}B}#J z1-eJAMN)%TtNeaxwjlZtWS_?hn#H=WsysU;bcs3!Pc%SS01P(qSoOa|9;nAJK?8vr zyI8XG7Nu?fPJYaMPDi&5-6MH5f+nL9g@1O~Y6x5mUY?e9hIgbMr0r~2|A|M-4m&c- zgz=CL`Qy&5Tp{03=Y47x*Vg8%yxL#nmL{?r^0jMA@gPm){FieK3M`F-Wqn%b>{@>o z#s5$r$!~xDl`ga|G;nch<)JkcRNRjNb>>Y(HJ2w;deFxj!EL zr?t)4XWP(X^_|B~8pU=mDBHMG1(@tkSJ98d`IO+48uD3U?3LX932lxx;PX+L!gM@S zs}Nqn`K$f^qYq6Ccf0ORXhbemA2M%NOsKn1)(MGrVFFCxnC*?_wny6Lxwe?@hAA4G zJf%x2p{uG9tE&JJc9rp^y6W>*+AHeY>873aUzx60(Y4PMa!((v^jp`2VaHwejLg1k zP&lg=(-esk4w#mdVU)FHD%ttZZ?vUGCOS`kSmezy9uHl{^ALMKS}5TF-ebX)bvt8> zmG+CQUbh?j0QtYUeRA4K~#$~hnzcf0es;G7q;h92nfc8t=H%98v zNqdQ3jyC?QWp@}>57OJ0o^g3?2|XwvWmaTafI5w62whc7+z$AfX;5ltMjZ_s!(E3|F>&xw}KlB4nyu{`PGf8TeO=XbOpUAEs{F$IP0ETaDGN?*RUg z?c3g%23xJ4D%>AmK0Qy%?UMO`w}FoX?zV=dXE-I`wv_ksq&lwBEu(I8(Dx(2>D zww=O%Vn}JELu{t~)l>b^h#=fzL&E^WHV;-#pGU`kSjvjmO$efqEai6Tdl%x6(_Hms z;rjD{qn8VO23n^>fEaeeN9DFujLbx(CR5wNf7TlMsPc2g-Aq^Yd*MXKh=e6St>Yb)7c%u{~uVw9T@3B~fFapnKGGlBaG75@W3 z0}Nm0kNm0{17F(@O{M#f3;D5J&ItwMC9Y(j_+LKvN5RDD|G1DJfBq~B$m(-haF#+K4!w1uKcAcbNNEwJZ4C0rFMG|MH~O=)gEQ-tQLrRapk^|6U)mb z{#1uV6|&T)>bY)gw)GbM^xq*n`Jt(5)4 z{(0DDHNYxE7ezideZ|8v*JeE(;)@3C4e6k7PqaX%{X2!Gz;KIqs_I3`=#R?P?=s&#AJ&!+i;FFtpbg#g^eR@8vw2Bl$r z=Z%%5%cua@9OFSbeW-SK)=cm7vyQ2CbGKOjE@waxo%-mGZKElctzKoe6qg}lta&`x zg^r6i;-{k&&J!%X<0sqTuiHYj8YK308cz#EODxv0x#1t08UfLfO|N&oi$5U+9ask^ zJiHtzK;M-r9cLgEf;9-rOtmnYyxkTfkJ(;b5nit|sdYluZFCdMQz$^UZqt!0=yC?~ ztr4wD!(7e-QOYNZG22syr}lqAJiy;FzHwP8 zbNuC(roif8PDh|8mw#RZL8k~!3~q7W1_TT3;%bfMF5mIC2TlCj2~C?k62Knz)!ayN z$|(HiDXS%#E`OudC!C7wQEc0&?N00m3HDrBeLKn`s#XP+pG|&%LwQ9%{SM(^ue1uy zlH8*m$^}fE05((Y5NlJmFA+5stXs1xw=chaqXgS9T}|x3g(JboM9aF6ciYsJn5KJ#jb5@&@luI}2fU0h8MXSVBBFey+%0m^nbl4PEgv2y0(9 z5HML$F!(~Miowd+2q8cW@F<<#^dc-QPLOPpOSszf=xXW%ePRGN*SUkbB8&x!Q}|i) zF6p4!+4P!u3My&vwLfq-7ThpcoQdUpTq9t9)%#f*)_-m$kAI|B&^SVAkqQlU6Ze5K;#fVvk>LzQJTsOfzE?VktkS*|&p69Qbe0GQC zITiwqeXTJ-o#mZM$1FsvJU*-$&d$mUtb*7XQ<6i`Me*&Lgu@8e3f*+M%RrW@=pYp; zmDr&CXmw6-Qwf6maxl&oeJW9yJ2p`%ttQz_Zp*gz>w^`FpK}^o(VFf~_b#;d%wt$c z0j=C!?k2LPHUZYygMkD)pj69*P0e1U?Drn5KA4B;IbCCG^CIAdE8%JPvWevd`lNfq z(^%IuwMPppz5o+kn%Y18FE4<}B^bnJFEk@a%RVDs{{{UXu>XLJRL)wK^={=lhlgpBYSc$ zkDi3mETbZ3>Mm{;If9e2MB(c}vYhi#42|i)m_umu{c}!3QneZGgjQqhnG6QJ_-}cv zWABp!4i1pKa4e!hN>-TSGw{tVC~jun%3Md#&$b9&j7SHjrn3S^I78flq?$-2qDw3% z657yzU0}&w)Q`0fp0Cz!h15V55tx zd1G<0xDBwtx;2iyM&b1czJ+V|*!++qGLEJ$u!&z<9w^nPgQ@z%h9%ubJd&s_~}xA8}4R^4E^-ooY;<(bF2X#cnMl2K8l ztC*C3*ihQgoOML)syFlFEtZuF_&U2wfTXN#9E7XoVP|&k!sz&y@>(Z1C zaBaCnI}96Cem*@3G;*x1xBZZR*v;oP$_TK3t1op#Y&HwZe#P~O-kZOUz0+HyS~XQt z)qTXZcIqO$ylnPw41}P?{ zxIAs$6H3pH6=3Ef_e{dB(KB655evm;*uHz2XiM4RYHts(n&19p%`>#=AxZ+Arzx5z zgE?Gj3`l#rv!ct=MikEBiw(*<@a{l7AMB6Me_hx@&fC%jGK<-koh{q;!QOkSS^4pteJCeu~>A+kE?Ohy$JJ`ir?JhLkZA$)5BunS`ex2s84-q03d%LPt2my3(ox zdjLp*3Y`~9yNb*6Iq6L&{*97SE5U9iKzg!Y^vzj3*83RU5o310%1;9k3OqvgEw zvR~i`>9tsoKVFFF(s7{Pn_L5 z3e1nrX&RMOF8WyTvK*k-BnB*wwW}-F@|uaq1R$MyP z&kdiruxTsm&hX;ol+@r9p1btr)9kqYP)^KRET9-)^f)e-KKZ>fB#ZsKGsL-tfOh1p z2n`>ydo;!&nh~RZeyVTlDNp`3&2aRs5~Lamr#UADxXi6Q~ ze@IQucc#By{K?C+PeR4d^YNWU`bcdrcn`{snc88dyo02@x5oU(Ww%;_{JSffnQ>h4 zVs>io^bYQ|?_?=O<(?huZlp#>+ui}s3m4;%lpRLa5BA;rH+COqOvbE@rF4iF5beb&w`Sze`Z=Dmp&8|NebtQc%Xz zSV7Gb`V{?23*=c0xvET6TcUcvZL>O1+n)Y*b^!7N=B|9zY+rUr`;p@F4m}NYu@}sZ zE-%CGUIwS$nA#gFI=*_@en_TTqqTG%w;+YxwRfpqVG}wTxHTGD@2t04iszioC&?uB zu6>Oe&jFpe!I|1AqAJ1hBXng@`|_(0)EgP0bGL|c-X(#^DloGULSCm|9TC6LDO|q0>v%Sx zYm;$5{ql@NOJA>+d5TWPoA5+fZ|>Y`gH!Kj*cwU~^QYjl(OsImw|WrCBnf}#pzoUhx-D?3 z`04J~lCD#?9aLkCDz9R)ajpR-hS#Swuw`-6k6r8y(|0D&rE{_lZ-yXwvvrafS=qF7 zN#}H}wWRDgp0DNnhZTC$PEun5VP1Tum`UlI1JA zxT`DFnY=fFg$r!8_@=p&mX7u4EZY9*@?k+9*6(2eHgN5bNuE*>csjj|>T{yAhbS0$J?KhhsgUY=v`nI%h7_dQX=Z!J;$~QKOi#fu6B! zO~#{dT!+ylM2d;*m;STm#bYRUtZ#a|4_yiAi<9&4>*9lTP?-{C;0!HzaiS;OhGT0p zO&Q|1h}=8Ai6w^cubzUVd{k%n#1Qczn-F}++5Xy_?!z1JOIvF0?)ztq@p{y6%8+B| zEiHCI8J*t7_t+=C#gS)vO5*w0iyUzxIn~J;DPlQa6m4=!EUEFP3qgHOSyN0wT86K7 z)k>`+hPz60`tIDo_^u4hbM+wuuTpq)QmV3aLPjY=U0C#oT5M#agxwIE-7Q^yx#8-j zkW_KVAA^|-h<9Zjz5D9BMCj)1`cFM{IR3E*`;GBQC_T-CRlG5s;l&Gcq*Tk{Z~aB4 z)`jNT&rIi;$<-roDID7^*#2lGOcawNu3|s~Yy8f|&G*7=9ZcTn?MG#)K_LxigF=Vq z=n-UKRAF8J)ZK$*0PtL>^_uJ@L9|t_j>i|Q4gS81Q+oYmGwb3GY@$=PJU)f++aJE=e?kA&mVG84PJa$^RiTr!Y?gP-;-OO_gH<7YsPUJ|8C#6{-I+R3TWjd4JR)yF zrY?$UX#VTbE+zfd>k)w7d1w)Z?^BgmLyJ6lw(rY zVn_2~zvh}`4i&Ozd?bdO&o9CvP5 z^_%|i^l00(L_^-Q_EVRJ&xJ?!7}S-7u-T;_?yU(&eR*?}xXQRZC*_vRr+}Dd(&jft zMq0e_YeQtYeJSvCBlQip;3}xqvAX8h>7iy%bB?Q@bL}uj#4BHDPVSPYhi$oZIb|X&?HsLqXZM%oRm^{hlCmdSsWp73H zb1{^za*#dI8#m~F9pm96DO=zTY62%IbPTENeD~Gg>Rl<@s_K}9CQaNuQU>g{={&QA@^6`2LJMQBLHyB&YdfiSb zp#U$zCS)lIk-cHBXU}LF;2H`kE5$hvu9?MW$14&aFp(cD`@&wcUC9K_gipR)g%KYV zIf-*@uY5ioq0>Svo+GuFRD|TP3jOYVrXxM_NoKb~0-O_Aj3hp~Lo_o}PLy%N6J&Gw z(1s03EW7@*BULFc1uO>;hxCmdC2Wj@g=g~Ia8Z*nr}%FbGf_@hwAzF)J~%`cci_Bh z7*CA98{opyBxIVQ$&W3CV5bk=PYcztp@#+pBQykeEIm7BIg7XYX0%@AbRZp-@V>oQ zOtGF3Nvu8IWsJXf_ohx}uu&LR@ehPGUK?A+=Ibj6hM0wZ8l!uxRj}?BgVK6ikg`6% z5`M5Jb&Ej?5&xY3#5ZGphLY>gIiyOZ?USgQ$>nBc?}w|ORjCkkNqlR&5#Kjv-1GnX z9E{a44Fe)sKBswI?*ce_GBbF&*}1H>P|QA8gYszlD*LXhMg4%mt*%#9N<6w5wh50X zn}*kHP57R@Bd*y3%u9<6PUVuj5^v0|_o&+c5IGfM#eiRPh(R4PKd!ht@s*yLOeU0` z6HNYQoJl+GDxu^nSCvq~9N&tvdLP)dyV9{kWSGyRBp58vIM2(KUk&X7euNw3LPYUG z?ij-~|G8;Fl#n3Z8LS;%qrWs4Wjb1?HSQSif0Ahd+pj z*~_eU80Y@@NkxLoqV-s8X1D1lln1~~6zm%;>I1rQlOSt#gh$H( zof*VqtFf_}!nv$owp)saWiIbTVRGvw&X}sDtT0jwn8~TuP5vmdL+7=CuJi{oB;T>; zPmL}`s~+Z5IInHC@mOzP)NqmGg+gcO!9u}1dWqasi2|w8!zDNoRNaVA?0D4f-D&xH z8zlPoRLS6n7d4XyO58^K+23~+?c(xCqKCJ#qLLQIFj$x2n)d*8tL7M3&Q&buEC1YD zTEC^rzg#UHhJKqKe((aLIX;B`dP`uj<@m^ipF=>&IwDye9ChN;|El0c4O@DTa9j(s z1TLk~(J4wrB$Qe5hCPb%EIqtd%_*8D9caXdUuI28W30f^DH{$9 zkBhJPD688)c&1v6xk@GHs~k08{v$L0OF?c+koA{x zFSY`c4msF(5#EC-i0A2}#7*kWy~D#@!(9|{=e#3Ha?10q)sMV^EgkczHQE*XR5+)(bC_)3cf#aN>A3ICm~%vXrcfP~ zXz^81_yAOg<)71~Cy@OIptt<6Xqd}|;~nvm_NV3VJ_Co@bW-M%>P6dE4`yrQySmI# zC8hkuwZUpAl|v+D{)Y-qV>fg%mb^#y;-W^{9@{Eh>8N(nDV~`htoN2Zy7?t&T=~5% zW%I>4-%4rx4Y~koG~gvBdE!+n0JqzEB4`F{K;Z9I>8?AFJZ42%b86%=x zoa|b3*E|h74xAX6ePt>GOQ@+hnr}McAt-@c@6Z&UL|sc&&Rfbd`Vd8)aq8*gX!0;xG5UZ|fW z(%5cPND&>Jx%+9pFjfREb9C9U)$d!Yd#SYpHC&9>9PiU(E7oD=yL~;Fi0MHp2pna% zaJpZJ2tuNx{D;OvGatyvMh`Ci#y_~-%1*a9Zr?070T0lK+1rSMpBz?(%mKmk6q{A_ zh_9PwS#TKFmRxsHlVqAg3?y(}9h7EJrtaPi(n(RLl5eFZ#h6;;ZaqeGDa$>@HoYcE z6!z*^YerAd8Mxcp2DBzWT|;Q%F{^kyICr;*p^5$E+&F5Cv{Ni3v7Lq*BGjfNSxSaH z%0cn1uP-Rt7&>f5dFJ!KV;`9HMtiOuIl4Pby@|cqop<-!wT!mX3g&Fl@99)<7LYrvC zUaUA?b1WeMS)9W4QQ~c}Np^z;_7hk8)%Q<6+wyfFMo?WMT7Sp{zr3iM-Z_Pxz$_v> z$Ti1=Rx1ZejD`=y*;&su>FXu-vv)Y}u0G9LB+|#Eah%ix8EL#ohr7ONI-ZEus@zpvcKKVw6RPhT27fd zoBbd5-ZC!AwQV0=7K(_d2nY&@3P?+-bV+v&IiS+rAT6RGAt{~GFfi1RLntZT3=9mQ zl*E9v^uA^-*YoV>eU$azzrDZh_nZ0~;hyV0uQQM1ILB@+aRw*wW8}uzw7arT6+ZNw z5Tt(Gsdjd;c#M08lrSD4{-h$tEirVgi~}mVh;UZ^hBSYd$nVtR!3s70Gto#S38%ng zfmTTz@j8nlWJl55c~T!@^!vr)NJmAIO2*wD>rT z;`+M;E-273H^WR1KwRR@G666Se%o{88~$7zH=i~esJk4!`7{U3uqp#6H#%!=Z%)UW zkC0)_V6;;vCXFL!z`<``Zw!^c`stjLr%-82ghxfrC~Xm=?)+NpuL;mTImEYff_N<) z=n916ADHLlR$g2ljLhmmcnRx&D@T*4XnyJpMQ(p~r=76VvFR~AryLng356P?JGP)m zOyUFcuy3JTK4mub$I8`SXw;C=Q%5`}UaujO6vqe^+Mx5!RcQt&PfPW@(IMmN51fBE z;5k(*w65NKIOV`O9Nm51Qrk!)+l{1jL{%x92?3Iim>Le_3zho@8zzSbGt(=z;=Yd+ zyU6ojXcv+^fVyql!Hfs6EulB=vDI(B8e^^9BLeG~J+azZ>WccC3(p#~P@+V{rE@F@ z(G0Cnr_kIZfr7QI_8R?$GE}Ym&ONV#A6eIw$jp_}p)c_+(SVi1&l^TrGHb^I>dTh8 z>5PZBC@Ve0Wi;SMHea{lP|Cb_l)}{o2xG1qCgo0{qPNSb3S#|sckyISFh(6-n;3SwKEoFsoV3;O|21o3zY|isG{ne95ll6 zZ<>AwS<>B#)gc_Vx91}b{1251)qOn-Z$QYX*(1VBW#~R@UWMlsJ$qG$V!q$cf+A`* zy0{T9UTIR$e*+4GIwkU2DN_!u7`__V8nSeWK@gGGx;U6)`7RUE#eEdFk~*D& zlT%fDEa3dooAld=0VvQNV9fpMysWVh^#aY(36lN=dy}o<)T;*KNbO4_2vOVFv{7Tb zkLHBPn*x;j1@H=Qjo|J>tX(x?QC_U*Ce+AhMt>Ibnp?c&dtRD&bh zyvY#kViXmUb)R3mtMWP4Ev~{gH#fE<=DY{0H{C~_TeC`(r@~yYpaktQ6?UmCFKQEg z`XM)}M{Du$YZ@vITYBXCr17fBz(pA>)#jViyYdx*+q{$ptDkKo3l`IctZBk)C%iO{ z{kAM*tzhT}Pm4;Yk<^AWBhp#x?Gu7EtJ+(tna_DR>yoLKngSwKYJDnr@+r;pB&+hA zP76dqLbsREdU}XtC%!BF!gBRG113E&@)ptYPd{t%#NSsI9mUmeg1ks^!qp9%$w{IP zuc?bNn+;{{>oIrh+us%wQ@eXPd1@xRV-$6i{El?q0tU>`qF+hrLUO-e*6LnT-|Em0 z2F@p9{i*SgKP>LUdtyr$CL_9rV}qs(F3KZidaLw$FvV5`Z7KAjOL1;LzJJI~KCo}3 zTPtRK;xsY(uyBi8Bmr`Ism#@U(o)lCe3MEs?#uDc=ZO_3*zYjmsMMb9SEk zad%JjNQQiV9WK_HbqZU4RSCGp^vKh6pDr#rYaXzY2aD(j@> z9}966p%##k`Es+qCva`=82)p|-B!P|(;O1uleBFrN@-=|)i^Q~E!0PvN?Tm34NsFF zei&B;KE#3M+79rlqdJI2bs7>;0o(guKpbf}?%lP_lLQtNHw2G{c z@~Mv%_miU1A?K3(#AvMcc2%-vtlmLm8u-u8ck5s1gYVt-j>wRf`w zu)(0}sc!D~y9y*{m8rm9ZiNzU@;Ob->py`>iS>6*vMt#tJzrXlnV?1np~IGM4qI`B zb$yK*UEJ&6yZIcNoqd})B{^j{!g0TLDXT6XOy5x^fjZ*%4T=O?J z*Qe+L@Q~u~JpcZO-+cUkf82i`(ElIL4vRlVZ#`bAQlR467C$OWx(9y`lKQ3_K458i z8^P{|^u#z1t#rh_^+q_R#GF{ax| zAjm`G^V{M`-Hl?qJ6Uf2tsGu8FI4I*8FJO?-W#s9Gvi-tisAH{pV_B&ZbN_#tst&G z6ZEKgVVCK#K$Kyb8?mmtZ5eUT?m3V5m$X~ESQo7duSm;@a)Dk0GOt5*hFyg|VHBUn zC2|1;FpON%ZTfYy@=s*?mut6k2*~sYvH3JlKqIi~#cm+8W(o76f@M-0`7P{3g}%1a zH`2^`qGdy3u)8Tn@3Rd=zg)L&Y%9`oa@QBE6=f-cK5tr|S}~H_y3C~4@EYG}aLP;N zd-G{3BD?81d9#59Vjho~N1;o;+Br1PLhGd2+H0$4t4FH|6AVXk@YCk;N3n`~13VLHndykhX4ug>8phT!Nv|kf<|d!Ec@dG&kVn!1 z=;LvQ#Y0=OAi)O3`?3l}V zH0N)i?BJyczW}qNhIFzw`*>t4PrhU}y;U{piqInc@x#TA$Nb>^{X&jQL=BOSY#Xy} zv;*|P9J(gm$x&!RozI|xgnD4L-DJD+qB=cHkt_w8aD*dvq-EKCeUsd5v@YmJ65lGX zGw2H%Er0ikpx00+&38qH8cq77K$o1J|M=K5+l3-%TOV37?ETJey%hU1JR(f4%9(_r_B zv+lL|d_k+!+IRG!NyvPL!IE(Vo1(RJ;^T0wEE){8H)Z6xpqQYLg;~phYqam*!D3`? zE!D3@W&=Wh9egMWT^=Sg+tJ=E8}iAGs))p>yi4dtucBi0MPLvV8^$A&&ycB7H=NFkyCx`DCEL0$ebes*uKZ|w1+YvND zp9KsD*e!v*>_H1Onm^{$PTxGjX`{t>O|lE!9zN+sR2k5^OUOhfyFSx4(__$DAR}XS|A zO(69N?R#W@V5n7TScP9C`t^gzY(}3LtxNYUAH7y^x|von$!X9>ygJ+YTAg*;B&3=H zYa=akiF8ENg73p7I#O%4_%VDHrJ~J5r};9g6jq0m##`JS1gR;SNSW5lnKB4pq_X*i zL_}GjkSyiMoIzoPEP$h9cZ8}7cDT|>yn))DMC(=E8q3T})SP_-##w3dOk)VoVec@@ z5ya56bN3qxU7JJuOxKnz*VSz;p?UL)h33-iK(P76SZft1?jN?W7;F0uS?L#WO}^*t20O4ZUlKuK1LzVNP-;eHuXQIp)m*tFw5|?8d z7_#N@`rOSzF!Gp{cM6A010<9xT(8!+n^?`(@Vt|;*pksev{i{c#%!5M~ir6JVc!3QjDhnf&z0P!JmDIH(U#HUXjcHjj20_r3Yv3O=K~3mD z%BDj*R)5&7S-&emgDb&sV`$5X(jKxBv!zGIV`1aqmDb)Ij+EFkkDI0vTu)%uFN17T zJ-!!La69kYm`%IC(ZQLT$%3$4S0ttOM!<0XFVdZ}m^-YE^_oQt*IQhf4L&pEA)12I!9sRM@N zc*9&tqdPB$d2cuY-u~HQc*l6B?r_*?U+`gh($dQlNUv8egTldfN_-an>7B*grRQ8g z#-yy(;EU){O;x7RfZtq6nNACsA|X9rgH7F9pW-XDhGz$QW)mnU;}%k1-8E6FSX;8{ zhfO}MV9dC*FwmZ+rAmwIB|Vou6aLop(>rCdYecmn3RrEK?fkge2oi-YYW@b6#D0rm z_*L%SUFlzarrZt&&F37Zh!(p6Y2YHm~K+4QMpg=^(qT{@Lhu1iv)M;rD!uX!W zh;*UYdv|M+^?q5PGK{<|4ek&+QL^C=?S={{;4(IH_M$>9RIQ8mC`cb#v5sG|xMjs# zI|foKOeTGNtmkzQ%|9_VPe^*U!rNFVj9Sm+VI7hbf*f-W{%tZ#(b8sf2S2v=x#B8y% zilb{!U+3wkaJT~=1OSG-nGgD`z+T=vI22k4r4S9bka<>614fj1@Xe4In@Z~xOuM)S zpE{`K_yg1HM6)-xXlRz7);n#)#^ddHNaeyqqJrY-L=ipP9gYX96_Xxfew(XC%eb|u zjDMUFHLt9+tP9Nz9>gIT2If2jxeS*8%88<{${RF-BiI`cDo{moVw@%ey|OS4ETh6a zui}6@qgj7Otpd$A)e8qFoQ^!&|7 z zAe?aJ-l8cf4YUXblpC3vaqVfVr*wW!l-dEPbOzuoPQxz` zc$mJb4s*LF^S;-S#*9CLHfE+RBJQFX-#3PJJ=ZFmH`cs!@sO9zp=4~Ii&3ZdpMJu0 ze5^_EP}8756k3Z*Yr~s9aJ>8TU`P@euj^~_2T)&l(y26Tv3JgdX!h}K>(m1EK#x|Ulc0B6% zIh@8Xls;zmfdF$XO0^y#^X&BG1J%^~LI|oZzjZ=Kd#6ILz}$HEJc&hYF`n4EM}}+y zB%19z`~I6*hjTSyi9E@XU?v~m3>1a9rx(tROU!fsIB>&oa6TMwpzRQEy>&_6qCYi+ ztK@X&W4o{XAd{Npb?Lhn2k3QL*{#D4rn1S*xrW>AhU(IS5qIkk$GWn$7G7xayXU>o zGWcT!pqaQ=CR;GVZzs^mD^_3WIN4((#HO;5Ht!*Vy$zpRaL%LnD zk3@jIdY8ND zwu#Q~i@noe4niPD=f^_CIu0N~sFF5GAVlM|(T2z~-AmlfLbo5~pt+|W-Gs!A($nOC zVj(wQzpB?U;M=^5g5Z*n)a6@y&D@uhL`^FK(Y0v+*)w1_^-u?@PbU>7?4gk#MU#F2 zYw47rd4a#8hetZ-`Y50N-QunW3xIlLh8Se~SUd%=56H6{eI!mYA4uHX##3AH2a6Bz zJdbk2EdZQ$*KDZBuKgMI^D9tQEcuAf-@|QKkPZg9 z_~OM$S&=Bnitb!bpSs~(=`gpfm8;g_Jp8y(h-) zH@~H*)N9?Wu|&ZLaxl*I%ckPC)ZS(ubKGR;xTZ=Zq$QXI%!c$l_!hD z5NvGqcp{$Sq_~K!=F87EVb{ zdlps>til0eFP|(ZYuu^zR1kT%-cPVwFl8T>TRC9VoeEb)Myr&G@|5PTWbgCo;5r9$ z8SHeo(ca&~xGcs-%Xt4hk?b#HdCd-;cDV2#8ed@tdpk&yG*%YfPSPH0)gqtF`e*#7 zaTNs6Da6f=udCPDY0zM}A1)b66*rBy18S=C{Q#O!Itp@R*?B6@ac72Sl%@KGik&63 z`$RGz%@`k^{~QjzNcx#gryTnLgW%OzeC(mkk}VITD63|Fx-^MIUT51Y*Sck!04kk3 z>G|-dW%6H@ne()f+(%PqI6e;UOau%L0~u=&n6!mD+r}9Q_9XV9ZD*%S4@i5CXA2A zcAj3M)vKu-J}Q;l`oOAF;t{oVY|i8L)uzo-eQ{yLxr<^bveIrkUJ02?Ql7{3e`MCB zIx_oe^hbiULrag?OM!PUF>Yg?mb?^Uk|J$J-GGHQBCjHac-*A*A?Tg%5U_WK1>tdc%B=fdWDadGpW_Ls~k zJ)g2tFR%U#b;%0Wdmz%CB>^zSt_7a5n>`|pt!JJ0TBGON!^P-=Deh4On-5q&pw>Dz zduvi*2DE3?mb^`v08XuMg#kO48==Ou?&gfWuf0a5iF~n8k~=u?2kX<0NT%#dl)Yml z!wYVNhl!}HXD0isDea$9h|Z5Ex&GeoH_(D6A8TC>MvEyvPmOt=f)o35f>-tSJwr;g zQ{54Chme9D!gYInus>VYXGGz7s2F45~$crQ7Rgt>iFD1)g-Q>Zh^04N`{wb#7tssX(;fH)%Z@+e|c z3%;GFjt^@q3+MhI5(=59T|+aqNTSSo9Xct3tUj#DXq8#lIyj&lm3BDp+X%e$lOkL~ zMa#^}c5pHr6GFFYCYOIG(8+CF^?aDfa*xVPB|9Q2$T$IW8q#IDJk+XKD3}Mv%a;;_w}iECQ{=#wV*=@obNzj%n^f zmlBD2N=%|AFnkHt(4>wscLh(@7R5Lk3iI>Bn|0pkg2?M0UtIgxkCvSQ9fim7$J^jI zE_E-#16XaW%kN*=9XSxV8t$jB2%~5956-U53>S>0JrmW8$GfIg!_n^RA3BpUDt|d$`)2F$@`=i3Pb$E$%W=|&e)%Ad z(gAsq zpj{WI&wpN}|Fpa(nc%{xnftHl;v6s|NqgI>_Yp%K)3D##{STAa`fG!;JqbE{o8RsL z*HTS$Ww6Tb!S829f1Flk2KTvns(8Wi_)RJt)U5poWzW8~ZG_cmxmX~HXNMf|AUU4i z2|nR=JdUe`FUX+?!|)sbH9u*a?a&Z)%mdx?D1@0x&}7wy{ZWH>sUm1Yfaa)-U+}`c zkgZAJ!)Es0CD8EN7}_8_8_fgA*x+{G05|f$bvgD&_Xc}5ZBCG-XUIveMg$XVAVesI3tSL(rnu_Ea}5?7sT^a?ZDtBF6Jf4i?$9 zAgD*aIc_Y_+n6gBraW})0dODrS{xmN#f`MC>xRkBF#t^S)@3PV9koVmTnpLjisx0} zFp{znl!dNBfbgm`sxC^9nF+UEXhStW)pl=r7w&``8p<=xxPPDe;J7d2v~^d!ij0Fyy z3ePs9*#bssaVXDt4l7gt6I>nS6|yI<(*dU0$Z}M`LzZ|cta+4t&d6UZRMv_unk1SW zg52w4zi+C&6gC9WP`ups>@A^tB1w1}lt|C*k#--BmqegXg6#QENKh`oKxomNO|z~4 zd}(p{d-p(V+k2W?<~Tk~zCE5(&wR&S)z){m*ma=4Rh^YYyt^?M4GKSqu!~mp=JJN7 zQno?&!iWPJSMR?2fKu=h+VkJIa#c`XiR??ptt%(IriS~#r%6=ijI*Tq@|vJx2x8!2 z;nVyso?0^n4IGt)$CWPNP3%loy5%;3P!F0gwB1894IfM=NM~Q1kO5qQfRwGMUHpsJRnXPS>`7o(E-~r41I7U1bOpRG4!F=P zna+y2UF3rc2g?iwv|yeN{DzC{K3u>_`E3H8M`Y{e7*I2{#g&>Eq4Rffpvcntp2RmC zW_z7isF@Hq0RQQ!@N01@sFu+_jzYr56C#B&eyL4y>%0_je1}$yh;^8>p%yP4zCcql}HQENIa*E z;;s7fP~=WD<>cY*#`9jRHg$SZRQxaQ%a1v~Pa#6OWXCn6tj|*NalER=-K4!A$`N2T!6`U?AYKP~x?MY7>8f*) z%uUN&d%88#ZJQiLf8oYprecgjRxl-9uBw2=c8Y*^@j;PAg*uIkLW$8_%)BFO3_ z)8N3NV^kl#3c>UvKMcau<*hMwEX^Hi&XnA^v?giQgHy<~NmHxNyucz!Ae_#LQLDxaeJ_5HdN=KDH`I%=ceWog(K7@kjl@1 z8G3)F2{jIyRmzWG!;Rlb2o735-sF6i*Qv976D?LGa8gNkg$BHWs|`1hL?BDK)hE0< z3TPpj9U9H6<5fzbr(*cERHS1BlYG8!@X@34g;~-PATt6U-#7JmPNmkQsez-70ml2lK)6u5j{SgDosKQs0Js&$0900ve3 z3~%;7>C>i$nEd+ze)%UK(<>m2z;jgiCm~o7%AS*|3EVUG;b6Li-xB{x%=Z;<@(Dit z&jKM$1jJj4UY?Ut#6UIIiT|H;fuHz-V8oT`Aag>%!o9i+i!bm_C{A#v_ZAPhdQ;)~ z_a}G++^f4XxOVBkUfO@2-W5Xd?0ql08BVynxL*kji~RpX!zKy?Zs+#pfx^#IU~zsO^o zV>Wd(VtYFr13!_c5<$v;7rpaGKZ{<|s>=$2|9L3?Y4)$xfeqx_AFmyXUK{)g0G*4F zN}vO~=#{!Vy~}njb`L}Ab(ij#-vJQ+{$hCo2A#Z&rLbu_#TXuf=*qbhrsUvkpW_U} zvQoCs_8rh90X*xWz*$ojHNk2&y*9coo!v9*G>`9jiSbeREi4gz=gjYN}4FeqjMlBAL_h8pa z`@(?9?2+&JP#Z&*05cKHHX#f*6tLf2Ct3KvmBc6orTI0|l9SS@=at}uF4c={&(bO+ zN};(nqAQMfs__>bcR?jK(>dx;@?!L&R*_*QyIXnU8iL0w&ttb z7ZF^U7)y=mjy{%^)J>qSHwtMu2V64!#lQRkl+I|lu| zGYb<{Nn=Odae5G?ouRD4t~6(s+Dt&Cr)yg3FAeqBKMd`bKa&JkDu17^i<}^lxHfoB zrupIt9_cMD@DIgP65M$jvNbn{IW?-Ews8=LK#^Wz7iW5y*g)}=+-A}y|isJJ6^S+@-6C>N`d`3T;i=I@M7k%R=NGVTC*IW=CrD!_3Swg)ub58 zI(K85PCl}*VwG!Po=rClD4@kIfc8G1mFISD<~c1HeGF4J=0yO?^am1yU#oNel#Kd9q2tZdRWrfm!!La?< zBc13@4yy(!RWq0QZYYBmJP=0E`4Mb#S5r2j-fTYI(*b~pot8f#e%p*FcaSt>IG43& z{R_PWcltlcH|22ka)1l%M{baeXs^$ew>Tj^w16trT^v`ZsCl}yJ9TAdfm5Zlu7c^c zgEbVUO=~vRFvgrWR{7@E;YMs+NPemUQSx1PNME3^5UhkP{~gXho!Dq+7YC-7H6Cyq z4pb;xRB$xL<}wSE(cxU7QET_=EYNJR2iWvbcV6G>cL^13*@t*;<8a|44-TW{2c;_; zY<_J3p5Ezbi04GbcJDm_{-P*;%iKv`VjAaf##vksULkZkK6$#gExplm*4YJae$;*H zW&WXkx+Qw=o#{@LGvG)87Uy-)$G`$^J)o^d0lrzR5$_QuXH6A2q@t{Fs zpGPrF`z3h(C1jMg)KN4}y#0H)IB@);OdwCuF1}G-3mIn7nYt@|jXruZgeq!p!m5)5 zya-SdN_Fi`*dKrLZH3qUEr+33C@qVNHwW1k>XFeCgk6qn98t5w*fC@6o>Jfs;;4j- z{5q5CN92||Kn)Tjo$epI>m?@fIo|0eh*55@*cb0;zn?y-E%H~o`Qm<03_w9`u1qv9 zmzi}ujH?(Cy5~S;JbFMLAqeFiMIMI%<&9BXDP8@M=P0q(W|%q7Xz%sawCO=it-FTI z`+~0*HySu3px}}lp?`$Weg7%8gU+_Er3aoouwL~^w|?b)0O6CQ5^$T*CW{AkZm(%E z%L$2g(-V;V#JYThfC$rg^I9%UpQ?=h1W+|;%HNj-eAjP{JU*xS^SiD5TcG4vX!iCCDT};aPIh%{a z(%0@uO;u}STX6gq_%3FDTNcn}+rT~S;sCHsd#u}T>JxTpFpjlwpKn@p(w9D_If}B~ zUI&^&)vKV1H!~ZfS43nm;l4%9tZ%i4mSd}^UXY7}x>Iq|N%r5qU$tPa zHU=z~d8~YAr2|ubk>&Udx&85xCt$$lfa(%qJu#xTM#7{j4JbAi!xkeIMpzCU9zOVI z^3qoKwElV6+m8ji8rhB=0u~ zL+-4!=MI?6Ow_tl?GUYs7HrE#a0TFR0%-J{bLDG%;&w6E9VOUlvlCwL%a4(a!b&^T zwHqLBJMIhZXcfh!%D+pKpC^GVCLd`;gM13Gi#)|7Cm8wdD>wy^K3eLwc`z|Be0uq$ z4*G@Hs=&J^zGQdWxfehzyFo|TIcqgiBY*kG<8hWABkIjb*)4xK_(h5{R`X&fPXvEX zYEN~a_$0T#KLPC&f$)kXqIW0#*4tMwxbNqA{_)9U(vQRp;Dnz{I18RY*#fUf80@AR z6yKqoIymWl{c&0`;|~!GvFq4w@5!I*V*#!i8kw<8|L!?IMuNt}&b{$j(;vbAW0on=NYb>T z8bZ~tujjsHw7WmUkP9i4&fk3ilIuB$-G^TtX;~0x45eZe zKvcTldMro-078Uq)-)4zp!U2bWX;dB2GsVCr$pXv6BLTQRrvZW9pH8W#Df-y49;wl z3snJdn}0z=#eGmSg2J74sI*54xcC|LyPIFBigb zJPT2dnQ(Kyrqk0~OzhnO3b+RU#);p4{|g6_xJ0t=U1tn^43N_k)??cY98*6=xTMI% ze!UC2+jct19J2!@Zfm-wmLODpby=z5vR0C`pKqsGn{MXp=t-)-4QN&2@;c>JSc@M1 zNRCVpt28Ir*5DQ?{zTO6!&p9}!5<7N=`!bX^{QU!BAw-Nsv4_wEb&}nyxe(hSiXu( z!>T;^?63G_U^eHv9rxb&CgDQ4^)bZnUHt7BR()seeBy-y-&z}BtF!%4Omv@1V$SWX z)J(LO2}WoSG07gHx$n7_WWb?BM;@Ahz{I*wL}kyigjynY-N9DO93Aefz_lr>ybk>= zk{q5d)R0H8%2Vvlq9!w>VV6q`R~)5N9u_nOk)g^)!R_fn_~|-PaJ#Ue+(%Na9GAYyhOhwYo+n6=Zf?lGMm_7U^Xd{Ug2b#bY7{TR=lV#Vwa$De~@cQWEmb;K*$ z0?~c5)uZE57qVVusLqX-kF@jOmsg5~ebvqo*S^29vVtFW`x7P*X<?4rhY6DHA-+cpi*CrYGo9OLvYO6qOyCEr3rcz zs_zi;y=T!v}6jP*kez(_7#? zecYpg>Oko*MlnsBYSgZ@bR{fu_BGR8;q<1v@|KiA_cPsfuJcY9K9?{+@Z}K@I?XpZ z;XB(|-Hm5$6u+qS^7Qr8KhE73KJ{OJ6n;(m zREg{Zb?EGz(z!Rn^x`EM)hrp4X-4Z~bwV@LTlEcj6ol&PIdD;(2GV4~VMcm~`@Wns zw-NwL=oY`dTuP;G?zR3RYZl7E+i&wA^|>`K)>Lf7+*?IFksXipNd^j)S<0;kAS9hh zVVh#fNBFtKs5jp{c;*B7 zkN{x|ueAI6Iaz;48l`t8SXHy$#T2Eoh{|KlQOeKC+frJK1>>Ut^1Fb7Go)T(vMX1( zu{bV9?b}Pd{}f?9EEWH#MD0N`zmvq3KuTUWU6o8J*5<(L(e1M=rzS=n<}(W}HeI>* ze8iisx52S_vCRI-Id~|QP&l9(bb#I0TUB<1!4C7RpYyg7W~RQRe+_^5?mCN#RJ)g| zsEyc^pYYI)xzx)`UdPj^`QP=~fWZ(W>NhQihOriZXGdU?D5I&F;%+O zSV-QvX|3ACPj_pX6S=k6QjoUN+A#B4lb)0%R14W_X^E%P3Q9ZEJ$#wRVd1XpO!xfrGh%I9?Gj*5^_BZKwf_Ab2acoT6e|HAEQW{`mGutmRR5FR>se-`pW99 zM{f0xp(GijnF>(y8d$n{Ha&QZyYtqt4>?*tjU~r(zk^$Le&PGYXAsHBiHGx-EJLba zr+D4D3DHhwpZnY>FxnG2zl6~%%V#d^;W3)i)k0WC0i86uEA4xZt1SMZ$wgJ=y9cI< zqZv-_LwSScgl3Z~NtqsTHk)=>z8$rpaR03mo}5NkzP;^!^{AF}4+>L{`>WQK4}REB zeZRW4Xoyb(cBCr&_l5ZPHTl7Y{~#ymtfKo00{@lks8aLL2MacT7k=-#du{JOiB&i* z_m|dBwBBmJ9lJQ5uK&_tW4?_xvc&_2+9Bs3=hdLpW=t!)gNLSnPW5pAs+hW$6-3>@ zv~@v23DwX+FKqkPLhh!J3}7;3jY$n#pU>k zj8@L>|A9;w|Asd5dbN8{zB+!l7Vpx~{*=H}?bDI-V5^*A1dnRVl9g{ABWe2A z--nAa&r2rsLhBDmpHlO0fLM5uk!^?ian5{xqrzM&d^PpUs|Ukgn@gkO?NN-mu!GJG zGW2RI>UZu1cbYys(3c|6v)&CCbe%ox^(JS5_^8n$vyi{bc(x8b27NV3k;HruwI3@M zH@ZRAace8(P%4U}3jVawZ3A3D;jO){n+(czZZE6825-nw2*|C^cTp~1ZlG;HvOC2iND&o|TtAUaLqpEoH@Q&A+fCY4 zrXki^=95SLv5iFPfg~1OVZcU55+4?&!U5;9KdpF{JSI<ARWq7q9;Zd}4UJ|239ALflrMC`SiGYj1t4oa4#7ZX2$!C}~ z^13jGE&KH~9_bl#DlDGnBYt zvi_Ktj(A)Q$7{p`reSp;r9Ko5h3g&5nsZxToqBBWrZ0q}lEf*NmW+lt2UR}ZI&VLZ7kbi%ny9D zhxNo>I=p;lJkA5sEs^02+o{UG>bS5!|^V4kRqN@Vbn2X=2E}%&!YF+eo@r@GCIZ0@4FAgz5mHpd!B^xg|HSp#T1o)=! zJ3A%8M3UIgQ*~QhUR1d!n+ID|f|?-en@Zz3hx)fm#A1?5j#G`i9&HFzT^o$^mD?z8 zDM5-*qRrE4=>)|tc=~eQ7(6E#w6F5}Xo-Vd0#iOd>+xMgZr5(wJb;QNzEcMwXWR{) zTBr2Cmjlzwtoh+qfkK(1v(i(sS%+^)S@JA(%br~GxGpqOz~-w1*tqBS_b+!_fS1AU zQ&x9N==t002mFQd3DVQNEs5?D`?7@(Su&m_QP?jHTjge<#p*n77Rj&`rCuH~&5>!m zkiV6AwerJZM*46}cOJ9yskbu1!9cE}18gDLEMd~WF<6z>ZhU&D-WOk@eMi0*O z#rsy%q_?zXu?U?VVKVNFt@ixZ3KU{`klRu{M`Bjnxw>`1o6CzMBV5E^9|TGX10;VdUPOiK;E)vsUQG=D0EVBd%5aF}^35*G=^oR=d6 z&(segE7#+Mb>tDKrf=L&b1WyTB-s+`n*z6d)EG4+(j?y9*(ah52ch;`*QLcy8QrXZ zeaTsCe$#LKOR?KBJCUR75*D-hHwe5xJ2TE&i+a)*Nl06E6^FL9a$}78> z|HT-SWi``3%QOcq=xE`0)bw%v_JBHx{YsscW|dTZQ|(l;t<9W|xs;mtbI``tAZ5|X zjDhaT-L@S^xD}^D=K*snLwafZA*wZ?bG0hn#>R&M&zHCBBik>I;NPiW%voS5O`Fy( zGC*3SGoD{UL-HBC642$^>kiM)0Kz#)$z;rii{LYz$?}skIA}$A3T=}e?Rz`Y!7qku zUz>+e@(H|UVt>3A^ADwpNz*62xG@}4dCG;-c6rK6U?!n3@R@R=ddtE)XZ=nQ%7-+X zI#tHC@fs^{t5crv9&n0Q!%%Qp2SsYvgC7AHttTK)X3dl#t$+)Gdz)(hqHZ)g!F#2C+`kWuDuRDVj-1~R{rc)Pdgoi&E(f4C zklm^CbqW$+o{_mvtf}PhkScDVLCwan=_&|)_QvVPc>vP_Eo|8;j!CIQ?V(26oJJ>% z-A6l%m|gq{nBGLCxI}PLMvU*?^RrGwRDuzcJJbmLvCdXO_aixNH|J=A7}f+0ofra= z&-BxNMCEl<0JV~@@+GD;oXv$bj(@lx-aj(mKD4BQw3D6A+}z#@51f_J(H~dize3saHi9R@vnK@(>aJ)^pp%vV75>niaL~f|6Nt9 z{`m(zO3lnsdTM&5A?-F=w@p*~-FEdV_tiSVmJ1F_;*e??tEB}29qs{jt(+Xurv{e{ zyl?3j8=o;K>vBmixbrG2&nW?I*%&m%J9R_tr4vZ)sdD!bL@`B}(IPx>S~9b|?=nS_ z7IB41z|Hw%9|lG=oqP(5f5jggRKb7QDNIwI^TxV^kWL?*z}LAEK9s>&_y8#vxz4Yb)HNB z3H3}oN!^d&gj@PupBUr=_$iO)_+`@=F%PUU)!3q;=Tei9WzFKIyPmuq+}4xPIu)yr z`Dd3Irtg7?=6dUg#{N17MgRTRL+w+%O~K@<6M0vd9v6^yi9o9sw>EVGD7f?PDMqWQ zKWc#tnY(Q9TZ`06o^@Y>F!!FK*4A=f-8o*YP7M(!G1|P*H?zAzw!Sz7`vZAbN%n`C zD9Lbh4A1~+m*0xB9kl28=)($skV5@w9Lwhd<&Qur|2}pCySWnk2had_JZcNC>I3pBb@pQD`(c zF|J*I5el4Itt1gAVluG8gbAktB8Ua@g|j08LbHQ$rLWo%U_X!)$gVca&*!li$`&fv zV+(J7@FZA0@BsK)m7F{z5Jp^tWYX5N(wVA*X4tcZURn<(qDh`RHYa{J;Zy#nFD>3G zv86rRi|&Jg)sm2^#3upQnrYgr9LO>(Qbb<4u1~aHjYx%?XRg1Re&@rp<59g86%N&(Z1^XF6CG`ZJ>MNndNj5z4^k4*R`Mp3#b9;hb*& zxkZ6DC@+1oeHBn!u!3Sb642K1mP&910T5MbZ}Q}uc-S|62mV#1OE=B zFT6p9R$Jc_y&8PtV}fGp_Gto8GkqlTuaG#Yw89m)-;XXn$Z_{hVVNX9$pUHB}n zN#R3%+2-9pJ;2l<$y24BBSD=y&Xc@0?#uaHU%RNj^ zxUGL3;#Eytis<#YhWj5MU`GQiW82@i-0eQutif^J!CAnC1KSrC|M&o_h2Wz*JEQAK zC*MZ!xKEyfOz`TDFW-m%@c}0E!ADnDOK17g|7{t+Zc@Dc2yVii?~Pvy{_z27>cK}R zCQUkA{?T5p_~7b+i!3DVs#bi>dfAl)D>AOj2?Ll3NH ze(_)H;MI4X?32CsnHSeZIP=W&z3)#V*7-E*tzPHUE#co=rE@yS-wi#Pn+(?a)f->M zlFZ{$ZlauNpb?OE?zH_~iO*?1yt~5#4D`;&yVlW%p+N%e;L3$u-`CMY#L zIjV8XYB28l`2b8FUjdBYmCU$*w0XL*`wBbTD_Dh5+j6-4%degbf;``$Vv1sBu7MgwCrp${mp%$u>KZk- zJNjWeQDR=Se(aEpx>xhtD^L6t=0mYpQ^(`vOKd*0qJM%u+v^trPatWK4OtqL_)JIb zU2nXwl3MN?93NU-X)%7VgT6pP1W2xO=jiC9fm?rNqj08J0~1qLG*@@szVWBUtAr=1 zBVg#H*+naebOWFx@m>hJZmR00$_;*s9J2K>*ALo_w!?$(7*1Y_l5-b_4lh=nSL9VF!_$G&j) z#;+XynklFh(_ur5T@!eu-#lmD#anJR;C81FHcyA03juil;nkY>8x_PTiC1!hx*EBSHX}(V#i_IZvCb)Tr^Hi@%&WQOG$zCG)X&wF4}8 z<@|-@BhxhTp%g0ul zt~}LSDnx}U1+1)kH55ay8zL4za)`eXNKxQn(5TC4Xav%N?m{8grRo!WW0eil&mKzZ z5P$lyDMjW9^QM7ITUe1s=23maYXBim)tuljQg7oM@Zip%ed^7kws~E5io0NJ)D6t0 z5eGUhSC9A{jgpJ2JwBdB z?lRWC^+s{|)@>&{wM!&4m}w^To>0^-7ZUr3wyJiSV7WYNPxG!UW;P=svvT#-d{CVarm721NnG| zV;xbs$hVbQ>*Dq4`tcw6jp-zh7<<_iNj|A2wLiz}G1@amvR#6XjhYN-SIIxCtlbmJ z3F}ORo{xuI?;1Eeu0I$j*x6sDud{?9Cls+;OH& z=GCi2$|FyuQDxxuQ=t7TIrQz|?x@SeAgRxrdbfHMy^qEyWR!w2hKit0#K#P`X~2(z z?X{XW+&g(Wa);J$MHU6{=PbEoTXz-tW7eHvRwdLkgY`#-5pT}xN$}pSjpwkK57Z^f z5q}_HrWlWolA}bri=_i~h0sN_=zJM3 z(wjBHmX_sY zzmoqrX2J!03eM?ablKXEA|4!rdK`QxSyydoD3hp;O_BrJ0vYV^Zx(>&Xyyw6*-_I- z{$R@%)kPuv&G8ov$FP|51W5kG^UWF0Hx>&{zkA@`zXG6sR~#bRHRIR}hq89%`=0mW z`TDjyPM1L1N*C7})-(hIg=uEqK!z3^X1dg8DJ%4bj#Yb5i(&_{1G^LGSnr4IHBs}Z zVjm^ZK_zAgZ<6RCcEB>mVHuTdmHJLM`F^uxsqKwPE@^i;y8}Cm) z$CE6l`&LhemHI0VrS-P&?B(Sv87bcIqR;NR1RxLIG(jHQ)5!=?=UaLjt!LC7XXJtg z^8jj(&5==Omx&y}<{rnbmDEGlE4YRc!&?or8<{SVyDkD}szp<8%UIpwa+cL~DlWto zje~&LyEB!x1N-oekKduQTxubf-cUhU0nR%hge_f z5p$!fZ6nX|4Or@LF4AjNwU_{u?SyNRCe1e${fU|h6svHkRW(i5AM)%#Ag-H1kk899 zL*D`{XQOwLSXRRZZf(bTUZbek5BCtNl>)4~eah7$u=i`ShE^sfj!wG)(fJVM<&O_x zn`dpZr4~z?%~Tc$uY)8JEA4`hYq_u!+fEtJ`iYtT#^dSZy(H6G}_1O+laakNM2Sx8PUjBcm4lMEBJ zcTa~OX+ufk&`h1PiyX*`ymOrPfpFDEP^=rM-}C0&*Hq%$D=v$@f)1dD`)e~^OsT@! z4Cyxl4Ey!!o|=zaheOu5C#MNL#`_YUsTQqGmnECkyEeTO(7k_@#NW4DHeF)Q+DwJ1 zb{`yPyEt!YXkWGiTJMv+z*Afcn)Dl^ZWmz6Y|hQ6t?sA5aYP-2UxCRzZQ)T7;-^52 zRWaA|V;{{V@flvLx!6vfR^H&?``ujc<@6XVKM5P08466p^g)%^f2>>UgD5c$&mFt? z(HVh?Xjo*|+Q7U1S$E}BZHD-bcD$aKV=ZM7R!%1QoDJzODG^kyG)U^;#|lX@c?XG0 zKs7VgiyO*aSbEg~m#2Y6^BaYmy3>i%{M6v>w`fGc2?@eMjQE#Hic66So$QnG21`6s z#achY@WAK-iZ_Q=!n!~-?t5u1nB*YxjXDhg%(nH{t3F;ivz_6k@gCymVm{QT8F^4G z_Q7{Phc3mIaPM#*=54k!z3MW$zdz-zku0Uk~NF_|o`llppSpn;t>7!Cs_YLthbJZdWcFwl>DB z_lCe?-KoJ21s}1p$fe@&xOPv(BW>!Kd=ENL231sRu$Nw4T{9#I-{=n*KM*m*rS_b( zxW1g#7xx%gKS%s)PLFvCHELZ*R)1_>=o3z6rrC9o^(To$Z@ORWJpbq)3^{SN5jiX* zd#E_MGvk7u=x}={bfUq?V##v0^*IvHShnjwdNupVjj3o~E!=_fHO1P!+6AjA}7Pv!=i3w`LqG%~jt&u%H9p_Tgs3Dq$>Y zI4}R>@!xKKsjT4K?aL)5u?N5?#Mb!AdBly{hal#^#^(;YKl&3r(o33F1_JmsnhyUU zDyH~`(?J&&+6*?;`(zrq2>e!>$cV?Xuq)-P1@qLJB$eCyC8jw3T`3@lQ2Iv2)*6rX zH|s%;W6L=ju=DB7UpdYUI9U{66c}*4*+Bu(Mx79evVDTCp>eqVyN5bkq$GqLM}CV!(A$1t6^9I_k=ve zdcRaE`D4X$WJP15i~$ac;@kGOKD#_k*5}Rda~+x;5=|r$l#Pxq(4}o$N9*tjMT&K$ zSoJb;U;f~(ST$k|&yY);oTq6BHlOnJXAqMuUwz>!2gO$2lRyCrro>4YMnk{|xoN+> zt+XQ*FA;c`;gL}zLuwWIr~+<;Fhmpw<`dKf<)1b843%KvP6Os9mvYrJf^KjSU7Y>wHCiRSZ2N*MJ{?1?6J!O^|xFX|PZ z!MTjRCx=KzT-AZV5!o4{Ynpx((42`2{FH@l|Bb1o{a8E~J(aI56P}%dI*FeQANR-P zG2KoFM2s&UB7y?Kc)jJJihg8)>Nv%7_^pxohhSDHVTJ80CI&zkm(6~B-!W^bGDMPl zFuuei4U?yyL=F&cr6-265)QT|_Sfh6L9^u)A%5fzB#=nBT#)RwqlUxm-Z6*ak^WO} zx{C{cES)-`%q-ZYmn2Jss0s4|;nt%>zss2aRktq6)pD%v!)PI*|BM55O60X-h$6aL zYK(rBjFU<(;m;yu*&+)*8{C3wR|`%@wuH-XGM^YdFdeQ7IFMLTgDG2@KUuO;^dD!(L`KH>6q=p(kxnX>XP_+~U`XK%<3t!YMT_VQ8JKOT9{HOel_jf>1l zq?if6MF;wh8CU9etCDk@c{_7P{|;F5vbh76rjQ>9WGKIrUvzsXJLUk1~o(K_=@ar&upouu7#2@#aEq+T>ZwMo=6?_ zkLW{R3Fn@~%bD(I&(FroR^HBeJy@38HiFkS@jS`6B^03}e0WGUr%}Ji&-P(n#CZ;q zdTTUuwpQh53Ar#Vc1eD{$F&24+5LcBjV$~V;m?tU`K|aqFW-BT;cmy3ThY8B z%x#zvz?;-DDBqfXJ&<&#HIfgxACkq2JEENZDzzm@9gdx@wDBpWsy}7^GAxA{iP_@% zbgCZ_Xq9s&b~+AfJF8TgQu;?GR7&!Cl8l_0A(AhbFRW)HX3L1K-868iw8~;#Xmou2 zfv-*RrH|ReAS-vA&O)I^q!Ad5e|nsQolH@JaJ8s^PK}(>QW|vwl^Jy-{xd0azAzC| zS7l1EycNH5`Q9=?*T9i;YeUYacw{zuTPj-OZg1>6o#5o#XLV$TD<5=~)6{Q@q6*pd z(o1^JsQYuaep(l7JS?$1?~x8?_~;LttbdA%o2;Du=DIag5J5Oyx%I9wuzwO?~Yw&=YuDzP&*c zJt`)7z}E6+uj$P3XrfJoY-dC0`N8It^=!{?X{q~E04ejOt(*%A@OJhJNj!V4>FnV6 z-be*@U!}E)N;xO1h0XsC2$YQu)@m4E>(x_~D|d`dF1*o7)5%^H!yY6X$ zDK6>CTOQELrZzGkT9fFi4wLQaFeEILHf$KpJIUF8JLT9dXsSMMrFdwRdWFN~$0c`p zV@vv8QC!$1g)`IZ#NZIgs`y^_7P=bDT<5l5v(fmz!W7)NBLjK8fO@~Sd9I(X8ya?L zV?mobMz%mZH{8W3Jvifd)|Tb;=VgKPm!^pn5EPZ&DG^oqdC5#-idA#IbogLvDOK>XfvM-R4q^$Y^haYPP|BY5}lpr@~l3$S}naVn`)zP~*uHyMG|CkQzS z7Z3AI9{?55?#!f-%M>66N2&ayDhms*Iy9rtNEx&h2Pp1f6<8<%w;(R&JP>wmL%E8` z51OB|eflsk?ReX3mv=O7NRZ|cqK8VLbEtu&Lr$&06}nWcQugp|QkJIgJwZE_G?;2w z4Rkq-9)-j`AH00cLt*|fl09hP1ehZk6E;(p{hn1~AI#vwsj%I62_su#vWX1S;#hC* zaTtjIN@2<9*K8EU(F=--{^6tg%|v?Lw1)pj>16rdWVx z!?_Ei#+U~evwLjb8 zz&#=O`iV!quCN0l|5f7B@`~bWI!t4rWfPbpl~X0$>qGf+IB_Tuw||E2GTrSN%j?-2 z(f6$t)#;`u$nZTON_TB+Ce-4mhcMsXku(kZvY7pyVwC?@tCFHWx6c&!$=cmSrnB+R z>dp}KXA*{|wAVDe8K8>(d*^jV1%(IKpRTle9JJyp6gT9+a$M0T6-kSDpp}MfKG9>o zh1AKLzvrVr-#3FC#|)E*PtZpSapHOya(5`#l}2a6SbNrFk6b+`$erE#p>RAIu1K(% z?A0W&Mh>k}yNepB6kTr3hlKs;uz~-B?=tXg%W78gA+b0CT2Q{)i4(bKPz=ACI2n||BgVqAu8>*lrd z=jXtzsMF?e*6=eZkc;v=5IkBkA|(`J42DDccxe!`FX(6_Ot2kt#Km5Q=NW>STMmQi zxAnE@DJV%(aG6HgE2Ir8|1ka6jgW zPG;pSHAL3;#;;+LmyzZ?H;{L@r)I7rE#y;l@Bc7Stja3492B?aP*XC%ZjtecI>P^1 z_S52hr3GU!QfoG12i)TlYp;wu^xZpea9JSQ@EG5MIi8Kw0MMWbNfi1CYI*N=UpDtU zJXy7ngr+RYC@%Lc(?DL&5w0`iB73p)j}E6Q*8(|JxG88KCA%ijK6$aoj~fujQ&{BL zPqr4cWr72L-^QrJ=kIVJf@AtRJN!q9;i}A2H({z^jyX4GEEf&L%nNK*FvwPJwiTd1eIsi5a>|Ixda6E%ytSUIleoA%Gi}$0 z%K$QYgKGS30@Zh@?J88zg-ugl=$IEM-=af#!8aT+ZZfb;973 zKHg|sn)kumcn`?@vTwkDeu5P(80Af>G^~Pe;;2?2I?fVjk7s;kA;$GK?khuY?c~!7 z+KZK%TfL*gqQ0-%`=7rNrS=}Pg~3L0@LONr44hHh!i&@kO%|-Y)`WQeINzIJn+B4* zBonD9-v&7y+~QN%M0eurUZW^kVVUhO z+KOKDsp%aKIR}tIR)?lfI;Z$&s{Y0Z!;hL0bc=AKD6^NRGT(BP(vjv-tmt6EJX;!s z7h|9N%;}^GvOZK)M=H5u0-xCg2ep|cqLq_nHC2))=bP`W$MX7jda4Yi)5vghE z8H7CPS@fBgU+V@aom6xGUKfgzCsP9b?nDsx!ehExQ%$M&Z!KBMc%cO`CZ@5SwA`)a~X2mQoc<$>r6VUdyd6wW1>L3>>e-_IL5%Zd(w)zJvuyJU*M) zdw%gouj(Dw&VfZ{|JG=(KNYW=IIG$dHJOtm$DwYoUYsh7WGtm6`-NnPv_P^I_aXT- zDR|Go;}dZM7<$wIJw`)Ll zk)frW#+4}mK3PmpUaT*6v3YB*o2!f)kVCoXWerSYU6yBs%w=UQIEnVQf}3DJ0kM*% zl@Z0bQ(=~_DX*5aAb22>o_cd6)pxUR#5`kp9XciLu$iJs;TTsVu&Z)LF0iKYC(LM2nVqPP5XTKOs zSBr~{KoZj1Sup`Jb{JAQ=u_>wrZ_mg?|cU4g0#3VE6ZEABkuUDOv z&qI`qJ%|uSP_lPEQV))&7pFzZRdW?pm9t)@YRWczt-*`pwU`Sut2XlBygE`H0d%}$s&@K3@bFYgi1x(>7{Jc3{U2ra#FH4~-Vw%r%8uk%IEGZi8 zu-J$fqN%rFOuU8Ns!<)1>~uD5=wqx%I%FJt+6%C0jEH! zK=)cc<2UkDrq{)N!a1#QFIc_yYPg=hk{UC&`P5*RfvmNno+6(w-}RfD`s=C%eD1DV z-h6cuF>q6q!OX&)0`hpUp{4Bho(Tf6eAL_p@23|IwF-s}FZQP0ToxPkV35N5Gj2~U zxp4?7ndi_)5zb}0e(aiQ^fE`eNw@RGf=|Zy3bviq8*C~u6PdpoF1bPyzo8J$BxzrA zE^KuudvtBmX_LW|ir->pv+s(x?9~`D44Ly&{|q09i^;v7A+Yi_t8(M8wqjAL`^Ii> zXc$g>jW%1036YakmJgim5MiXOdYIH^tU4O#r~z2yjV_v7Z-w^@&6+6sA(DfYZqt9h zfcL7>80!DCq@jlfh0Ewe2QT0Be&A-cbIcVApqR(Nnlv66Ws(?}LgvNp(MYgjlnWO> zs(qW;c+6H`_ij~C8pe1)NX=Ks_>37kt^X0|gN3sJpq$w%a(4D}`MV&o4fA2VX<|2< z;9sj+sJ-dcjra4x#vcO50DloTmo#itw{?s0RU4-dUefK~j$eO2snZ2uO52iFS7}Ww ztL%Ln*JrBq`thPz6I2UWxvfmUe=jLrt-bI!m5|b(-UQiF_K{uZA0f^iATcK1`FoV%t~be3^nhw}AxbaH*sy^$PSj5)3D zDZP&-`D(=dlvOrfjKpZFX)L<0~`aD zC%?c+ef?Mp)GrpCyLzDId|$I>D4q){SE)Kszd|OCl1hr|T;YlyAF3I~o?}<62gZ6v zOJ^@Y-0o3giB_CU-8ASyd1Nmaa#s*K_E6aP|`DG}4o3o*D(d-8Gu` z_D0I!UyU>oF~!$*{d}Q}-feD`#30gqDU6%j(>J#beuf@Ws-=5#oY(8u_uW{`Zke286!a1)gb~WJQtB!8`hOtBO0EJa?kj=M6Q*h{nM4PCZccZ(BMGdA6c(&4GinKiR)Q$f-k+Dn_v>N3J04Yr>pDC%1 z26;Gm`=UnvCx&EbVVr3+ZXKjLExPDnZF-!sn>yZ8<)=_!R5{wU@A|o`GSI+Yt z$_x1!oY@c_@0U;0GqV~`UJ1T#6sMB_$Rxa|WZ1LMr0{;Xr&=GCOg!R5h3B zI@dfF<+a5DtO*dbi{qElxvw^kwca&raF`5N`Xs(U92Of`5iMi-O?!$(T?(1&Drs^` z%>j3yyYVEa^_og&svU#T?%M-@ocFF@)J`?w2K1kmCI69GnDIEvilb*~>fD})l< zBTL1685e-DLRTgitPR}Vn)0f5;5FA-eWTwTzQic5Nt^<^cJ4lM=CULkU49P*YzCN( zxbW&XnX6nTm(6IzG>LHAy=#~=ZGS}OnaoTmHaDx$t%aikHm*y#skl`*4bls^W17c9-R*BFmXRA$*P1}YP{1T!`<%OCw zSl0w@z8K!#$Z@%~IYGEB;Z*q3Qa$j#o{GCXe;o*BU-@F{Jnqz<*g* z_}Qyl7tT_!Ta~7arlFtv{yZVULz$f-@zaCvA=J!&bPWX#y4A5Okiuq(VG~4Wmm48v ze#)REU3y!|ZSCEet!Kxk-dn)>*@4`zHJ7LNOTvNf8tcso-y)qqbORd6LI8_IUda75zlof!iEU1IXaPCKGJO14Y_ogfja8(&FPZFx4j7yC$TO- z4^o(Pw&`!Vkj*H*Gb!jXxnA^*0 zy`R)9$kjg^@H*QbPkP;au7%5ea->upGGK6D&?gveW0c&qIhSZ`d3NE2uIF{FmqOo* z<1h?y;Dk8{l$$ezj%+bVUJy6IX-R)OjQ^h1fXP9@J)@99I>}3z7PVeIGA(!nhFx*<|l(e46y;QRs%`cQk;ajcmcYfub zEMDkUS#|M2MEz~Pk9r+OZx8#JZNiUEpf;y{I?;q}G{=S=0R>p}JvwARp09jWg(N&k zskYfkYb9=**ull(G~QLz*L#S%myEQzzQNXNmV)GT2{PF|JH`_wNfyA0^voTtGqG05 z90x%$FZP{nmro8lR+61idnd1s(V4=HeI8@I9lpPyTY(Q+w)$1#yFk0e_uhD?5okTx zn@!n0G_R(Fk*XsoUf0KT_ilpTVN0?c*|D z>#nmd+nLB&o1|wkR_D8Q;`rYEKmLOML!<0P4CKgo5AG9X|4vc$KS%O|0Qf!AN{j6O zRf>BD7q}$g#8-B0{@bs=e50DSDW#8^>)wn18@+TQ{3SE|XSXw2p)p1T%CvtXpf-*mqOWoy{2qMEc zHx&6u6~zAYLHhfvxUxX+s-JtBlu#=*@6BOm;)%8*yeG_F+7tt5qJ4*xg;4LqPuQ=kt*Q*xIL~maR8F z#HPwwUtNjrb(KQQTeez|GKilmP4l!1qq&mRx^ok4Sqfy#HFj@D<7u8m>m6P^0Do8n zX#C_CcqEojgw&8wv3A-qXb)nzEkOw4+{*+6+lGz%Gk|9lwCs}mPhn{VgcW)c=`lb` ztA}SQ|B={MhoHADCR8X!i!2)_=hucHX5EV&3RG5$GJpEG|8sq~;&i;C;!+XrkCrUd zt!1g8sK6f=cpl#ZI$ot=DhzKk&D@=jyGCv58`3|gikVMkjd~NW416&hYLF#z-a{0m z>(EZaQ<99t_X)2rq|D9~h*AhQ;>OX4I5W0=_IN-SNHUl$F)*1E8C~cDMQI-42MLzF#r6pI zBGWh)>^89imfi@s?OxW1^_YB8<9q$3y?>M>MIWas%K_Q~%HnX`*Fm1Zr)&kD8i&&m z)b!3m`Udu&6UOu#Md4ojHcnAP+{+IIQUiFVRIUC&;{7)!}m zORBwxQ){C5b&<$#0k<&W&A&QdPmOuzOv?b&`b51Rywg{%^I!x@e2YTTEtiVI%ejA3Sav$u`KM;G$8L#-LTIvZq?TD<)z7|Ezf<`L{>(Fny{Hrzb9u)H z-3Dbud=bj@U)-gaPWX&1DPNOUa-riw5f~J#8MG=2<-&~1%4R-mfb2_9k-e{}5}T&k z@9_vwC`YWQ;k~g$UT00s_s2ih7eKEd;Vz;_?`henC)FQcoNbIcA!*V|+kAAPCj6PL zL9@1YJDaAUt}^oz%4gPjYLBJ1zgTFSUZ()8&%=uxU3Xx28}K4`{1*b9<@qS-vO3)m z4D1Bb25l$Li(W`MoiEUVEYN~C@~nGDV4U4uY#+V#5^W8dc1r;O;{g3fVQIH@?w&hQ z>}$U@qI)CvMy7(W{2RWa8~Kjw=P&zjI$ktmt=Vp}jN~f452cR&V)(ebRWdYalzUDD+@4?|dFm_N|5zzad~k}G zH&NB{4tm_Gw8S2zaSaiSM|P-HpzKUNLeAr3Lp;Wb=ux|zYiVM%<<(VoSB^p|E9l;d z8!t{v=k?)te2I%hk*0?$ph19s}^tMgj|Go{Ug;zgIdnyNIsWnKLRYjvx*XwiQ&Ozw}TVNP^*U zgb;`S+xKc_7aRVtZ!i_t(2S?DN6o(r7M%A~l)5DjNOLB9G$j7FYR#A`(FuOT1;MIo zHFF2JFpf>j=n~@p(fY#)nzbP$WDmZdEDqdUOE4UxKG&&sL9C?K8g^);gGL))b;uGJp}p(zgOK>r`1N%I-TW{kC0A{Qqt ziSa%b3WIK5+eF=lSw@woi|gSh!dewu2Ap&8$9rD)57yFN=07Mv2Q*L%iH=#%nw3qO zCgs9ZpS@N;U z5c-C%oQxK2(8d4kZ~Et}K9OLyD-I{f+@lA{mIm#g9-v(vBG)y9hoLsk4x6d{AzjRd z@+?r|kHIEh)dna{KD)CFL6HFj4M`8BJ1VA_rFF)t4GTsudciWo90%%*ToT5KLh@c|7TJ}50j?&?$9Y9Ff zr`Rh&Oj0L{e~rIf%1hHjT6U`y-Bwv6$6|hFtZ-aBig)g zq^)A5DCgyT0bcdzTBD=LJwJ9O2$ET+roCDBAx&E->s*0OR#mS?vG}`FX1nTSv`mk? zKi~GiZkfys{Xu*|(cyXd1YA(Aei>W#>4RsJ238t|H7wrnKv-Jok7(b5$SkT zk|XY?I5AUwE@CWye#qSi54o%F=bcRy_u^tdl3jf&JmZRkOIqp?B`v7MGh7v183d*p z%e}jxIyB3>Le)j5s^Q~XU>`==fkJV4W{2V$227xW6;;JTNT zNl*$R2ik^pI0(XhhBBq;4k5f;_P==JZEhR}jeWwk?@JJP9IWL~JPSW|?~kieM6hU^ z&G`9Zyb#{85%ydY4@mqf_Ez7`+#4ygM+DyuuBHiUw3-eF;?Zj+ovR>K2d2hQbQqg9 z`ISYpQie5Nh#2_EH|b-_dIdpR@)IC*9?i4a6j8Wn#HNFhDfNB|9)o#rIl+^iu_leuEa@dcw9oD-4Nh-7 zieu6$4V}gQ^D-&^)$gB|HDPDOU3_^#Ah_N4$bOAUQiW25@J}<-a}&&VCfCa*sHQj{ z!uM&S#_CE z8AcrX^Qyz9%TIcC9KBvxr2Ub4xxJA&E}x#SjY)I>Tb{FkwLi2dwT0lc4g&3n{aBZh zl*zdTZqKfqRCGn6!l5wy%D}x_^9ymA9&nCZ5VsseD;qe-r4F0=xf0K4l0_6)VxP5_ zinMU}yhC^m;6Y*~NV1*d)0coQQcBI5wLUjf?J=Ln?$`90eK=sb?5mv@`hhKCxMS4= zL68jUX^Ou+j~SEmZNiuZH@%OsZr(BQ5H^g_vO$1KH2UCs?<~T%JkF$5Pl}?~%m@WS z?N@*08R)GThf*uX{(!q1bA>J%t(|NC`QlOmW3v=?>*`TT3pJFO({$28BW)6e%{5TT zo*FmB0wUXQv4)%|xNiZSFAbu~q^2iEK#x@bFQj-77YT+TW&pLZ99fB6JJ_Vpl=Ywk zePxk=?3=K5TywDT=2sIEKDwV}^&nShp*`Zg?IQBV3)&S1O;~uHYAPi{`vtZ-*cz@R zy7o>`;vqHo_pBz$Q_D@X^^z-9mNa+8ecy$&ujpg_>K4PsRI4ecH)MJbogK6J7b6y5 zXTB=Tqbcbd6m&qi>u8)K@(ehtlQZS=%H9m}5+QDX6XM6a8H`?UK-Y)y5HKHv7l-UQx|3R0%~+zn=D)b6K}h_`^z#7w_`6X{E5F;zGBc7<_0P4MXawYLXyk}BKswQYYBw#inu2TY;~VYaJMZR8{1o**!nn*E9^O2D$!3+oba)koH_nk7=p1W=oX#Kht`1xhn+wO zhkJ`jJ;8E27u!NxFm0}KmLeql;(IRVh4o%8Ph3O3Hm{a4jPDW=jZi8Ay=FGq^5=xP z?-)@^59@RYA>*_y-6!c-!Pp>5zHgeQKJkrC7hXxp1fv?@ad@{}?b}fi5E3wQ)olSc zZ)fk6(^Y7ckn^tifbJlfgra{`;9i!(NFEkmO3>nZBM#~*=bW8JwX+BW?t-brj%)xoSg^tgD zzj_@g-y0ZDGTj7;|6zv&muRooV)J>sL!OxRc6K3jcc8 ze#tarFCL4I=^*sVD+6gjjQB5V(+Kb647569#Xly79tV?+sC8EW$?ZAs{)IC_`zO8~ zDbz*-Bx(E=wsDPFZm}(Zmi6LXOU;#EKETy73`PD7{9Y=AG`UyROHeBrN|H_5?bfH2YT^#zXE&n z+9|uID5X;Q?Y8H#3grj(Mk*NX0c4_!Mj5{0z!2j*#5OmyWLoTWB$0Ul6z-z73H(67!uzDT; z=E|sj>$QWMKB8oxxUx^XlMA|eCAP{W)?=$Z8gygAe%kpIR=4s)6~{3-f2z4TPHncl zvx3T*G7t8QUkzOz{9@RVDF-y%h`$a6;rtX3cJDibxFOlN%8fU#Zw3(jYwAj709Dm# zMtLA1Dmk>to4>r|sZw1I;UFf05aVt>sS|b2RVfVG-$jkpZp@y>A06B?fl5g`^ns}s z>3}h(IttHNRJy*;h?t%z!{|!v<5l-0dRqO>`0!P@(%^$0py66p<_=Su+SJzT`<}Y7 zDVog&J%0}j}!fa9yQA@xE`kyNch z=*%iH6SV|Zz_-XA=iRj}&+q;_3oypwx}nOKw*$yIvqryBjWoNC{jv=Qa2K^^0az;> z*C+E@GET#J6bN%61*O2Lp-D4UKlf(lWRO?suM63KQ44I6y%b}k+5Eu6tWl<420W{; zlyi=8K9e4|Os3t8^suaRUs~mqjg0y0WrsJTUA|*dF~_O@)d~?J@vi(_0)C&}4C(zs zTrQA7lm)}rTa~G8n5kVc6R{|_b`4iJACo^(Tq8G1r5T-8f$*nkj~KU}&9QNg z&7a#(B)lC#FZ~FZqDfR{^tJ2Hqesk#G`F(Va##n`UP3xp2S)wXtIRg8q2?rNK*oKB za->VlRH59S#4Ro}6>768RedJy-2d#V0IKL-$hSv1cFT`&ot zxp+}$&pQUYZ#m+V%YFH^Vm}9nIs>XQQpWlO&`e<^EnC#b&HPB`*bO5Vf%eYFnJn@F z<&kkTQL%8kzyGUvHZkHLgA@XU2+|XGCy3qn+#t5#oHO*slFaqV*A%TvbPSBUAU5)8 z&Jh%(3V5b4p@^F%LVL(Lzk@j=U!S^?cAs;OvPC}G${i%dL>&14USf$!4Y)53@PDyy zqzyRfX}J=kfn$8bg@2pJhIqbUT%K)nLvZVF)t%lSz7I-)bgdiQZjX#B^Ka2g;MaEp zZ$>u&Dec!9E#pBsb)jl*)b?D;m%l9=fRtQs#i@xV=+;#F=U;ta&q( z=sFaV!lgOibq7Rb2aonK-S+DAQ3t)MPBfoYo^-g*I ziZe?N2zy-TSCjbGrYPWW4S8nLEX0HOaQbU8bZIP60CZKgzM&2=V^1eQ7?IqlInfr| zeL4tdpU=JyQ76??L_6(`SvsFDye!@n;1yV`sfQ#-gt@6I>O^x}$^_gL@|#xNczPk| zKK8CqT=Ebr-MF8bS&-V2XAGlscFVfb{11Wm>#XfF+$gI=C}-=vC$8@6Z244A`j9-E zzR_h6)>iXdrXDly{{&!oA}vWF9;eq=hVX6~F5}qRR*wcAr~cN(|Dmq_Gq=@L3&7CB z!sI5;^^;uX%rKHY;~I3K>j4d;$kycI!H!_q@3fKs>>1*BFp(Kc(r1#JSbIh8AStqH zZUdlhSO41(0aAFrW0WMNBBHY*zmqlpo7C|<0|QGk&`?_cF+}sPk7y|-hiF|P>`>va z%j*x18e#@au1r@)d;A}>12MDEr*-hbe|UQP7y7qy-+zCRxD0qjzU-FlWc?S?5=8sd z0vzf4`BmqOf4}*+bJrJTFmq+NS=5>SX<5d+$^V~wS?4YlTVJ~L7*|?cROMg3`G4N} z{|8^aE4ei7D?9JFQQV*iQiFbt z=BeqslP4wrmyZ-L$gx`fO21&S4QgFEjC)e7zvsOs;G6NbbS2*9@WwVy-7o4;RvW(lNN&)Y$_WEK69H4offrAi+9Cc@OTrs>-V1I;ABI0NktTCjKYs`&5=e;$1WKbOaJpRE%#+Vu&h~FhgP5Obxcjo_`izsCO54Kuj=wnU{Y%Z2oc*-4=T%ZxiyTzLt!dOQJe)5n1RD z4>qP?MrB!n0+iXB4)N8GzeldgGbf95ClL{a7C%sVGZn?C1M{)kJFG(C5K*fh`8+8= z|AIP+^oya)LK%6B3j#@)7Ye!E>E#krSsP9y^6vJRpwc@OhNiJ)r!BR8?Tln5&LNB< zzhI@wA{HRyU1|7jWt=^AqAsDzXL^DS(~)xWIAQa`wQzLLSB41spnF+?OWlmZ=iZtP zchZ3QsAlRb14LOeltwMT%+km#G$}PvWht3xa+{gA+_-dRveM#RGcd0U&>;A+rO6vwMo$wo>O1$OB`P8e`cc){rFHvv*D+*VihvI0(Pzrsr5~- z^^%<5n4j0Iw5yN|e%n&<#hNI*u~9G0Wjx?IB95!(nVy}f&W^!LF6U{fr5@+nJlV8< z@Kk5YffAuhpwR_9Oz7Eos?-*2ydPUbj5!gG8=HHjVqfhj#@qkeikqk4-Rv>9C4$Pm zb`yt=_u`)INtNa3bRlx8_Ts}80Y|8H^Kp~=5|_tF?e5;*FhKYKM^8f;7F)n$tPr1( zP_;auFy%4Yt)nNs&r)Nw_wI?_tl)u%uzH2&3qRl5tqqqO*jKJRb47|%+MJ)Q5mw+K zs2#=^rS*Q_{X#5N4-PZ3pl*zVOLS(UYzNe6N7D-%Yx5f=uU4!SyPmbC*zEssG(%t;;a~_sp6rGi6mF9-Z(dnpnZmFq3$BmOr$sA zofTBP@vwnjA>Onbq0o`yXf~6UE96wL?`r;BZ!||0f^aQg9ncVP+HkDm!QM;a+iTz} zODj}synNy^a~M#l*Bvl%&wgZ}8Fr>fHzkyz#cKJZ9Hr@(M)bSh8O#UQW)!r;sL44l z3S`jda5h6ig{`$6p^-FhP1mPqD^W!R9v$dH_+cXC?cp8PcOUDU^f_*ctAh2DQJJBjNqWM=KX^iE6hHig|E8)$iCKBR0) zNb7Y7=3YkvGAPqz*>fn}q(A|xd1%2s`j>>Zne#W#CL}3e>gbLf7*dwlWsY;LV9p= zwzqMawc!h;u0-FH^<)vR%20(Vg>E%Nfgxm;YqR9+60&M%%cUM!X;P`)aUz_Px_kJ@ zuCI)_re6%hHzJgcX6Tt6j=m3yM{99er^!?6mO3D2oO^ZGQ-1vT7%-X90aW-7BTVIG z)Gm@e);9Io>5JH{kEuo6%-Uu6pf=CTD^e#Q2NeF?2au$2P=ZtgqzVemFDow#yZmhwI<*HR5{&3Iq;E zxXlVC4i1GNON=T>+tWJlrc8lpbqR>tZclf0jJ2PVyEiFh z$zL8N8q4ik)^xa`0Kpt46)NG(&o(K}%fpNlHHGhm2D(%eaA<2_=PsuASt~E!w-_Z{ ztlyLZyzH|a$128YJXkUtzX8Q!lxIHEX|HM25_l|js!6-&@)XoXGF*Twc@eyE%=KEf zki&HI6Op5nSm?ZNee=Z*pWz-n>5ZG@K!`!N+al zNWi*x1ilIN&~0ZzO|^y=m=rC+5idz5qzR39A)oQ*M!ZJwoU9c5MB;51WPUmZC$l+9 z0ES_h!~35-TPM;byb4u9c*VUoQv8m7xa??@aKOf}A|c@epLCe($_`C3)!&?X8qw&H zo|jtJn=o_ytX#Q!6Pic#+ty4;xpd08jmiXWQ_gSOQh}8F{`gX=6KnA0lN4S-ZXIjV zwahT#p!iN)%K&akrgD)P#yIAFcVb365V!7yJ|w&#PK#ls;NbOMi%ZT|P1nFy%ZU3g z6x6#ES%(j;EFoA$&9ZV_*#Ka1O^qCt_SGxU3KbX;HBz>=x44^-3kG?E4-4 zq&!ffJo_(liAhCKd@i?ml0{n5*lVau@+wmoy52Zh>5kS>UQ2r_kH}X;>}fLdrrH!K zM5Gq%hsM_>)k<#`shcvH+Lv@vNzIlD1RY+Wv|UOeF@(RYqF8;Nx*QkZF;!+@B?5U_ z8-Pz88?SH*DILFc%>oWnJcOgE2TVCs(=)bZu`Hy)H(4WS^IWOKbg1gj8SCo>k7~V7?P)K`2{M|A&J>;J*1BxC5h<7 z^CVW`-3(RnH-?7H)=#Hf_q^+etPLZP-ly#L>+j*fJiNaDcX!BX#qaIcuTSjfdQUTV zJaK*G1BUB9|D^t3SgDJgx@7QqwBQ&vyzcqGFnDM9o89EUd?>$P0ujgbvCY4?o!_JQ zFOUAe{&SY(pSR+C83JslML4js^N@po=U~6}_KZv_ZC@hwX@?uE?5%jx`aDE6+ckzJ+IGm=OQw!_GGV|Gs4skGxYe>=oe17cuj1KJ2V1XdJL-#kKC7zSH1X zx%_{4tfGn^cTnSDnyuSPua0;Onv_5eaFURgmKJi-S8ENu(LedLYX{g{ckCiGViN^y zhWD|{E+~)gcc(2UC-J2B)0j3qSLNKV&Gc@3j87+9mGzK@)}~8`dA}yOacCxP#AUKL zV#;r(k8fjhGaugjC|8L}8+Ay-Rq*0*;QDx@0z-}Ckj?R-p;=(%M({LM^eO(sXU>iQ zUsbOXw;FE*>O4f+c`^swxvyp0UhXPl$6>~?RMJP<5`n~)03IH4YusdV8D66b7MUYD zxmQq_^w2Nx(@5Q^kA3eGIDKW0$e8ORW8R8a;;f=caoXQ(&5>T!O~AaihS-B7uW5hS zvbO)4rRAG1l4HZcP6JR%KX6KGcTiSACinqyC0N-|S30Yy8P(MmD33If@8)^^gNf2s%+j9Hid{Mnc& z^ngAM_qF+2VbujYDQ`phydY-Tr!OrFER9bpmTOXLqu#BC>#icy3Quq@n;IW=gK3fa{+L({_pjJ>n zDO49$%DcuvRz2m_|JPIf$Tb;aaDRAl@wUz(RLAj|O;Yk>`{Ot7k`Ug7l|%E?ejD`v zk(>OFxc$T*J(4v_DY;YcwUH5Wt9EASXRv>0T&cF@&Q90a`k1$H;ALft+ztYW27|QS zr|ERWGe}x#Zj)qv$IMKxWcB@@ZD!w=&rSR4IY=E&29!K6{37mqQ2R2~iA&3G=kj_E z=dpiZu*;83$>{arYAXSf3)NL4<6daqN=fz$r!3|6Y!QfJO$5tF8(c1Fll1G37~5*7 z2NMv4i}5i_!R&1(L(pShuQA#_ZZo-ymv~7aKvF?{J>R}|W^T!O2O7lIrDwO&Uj7lL zMUJkp>B@`LFTWoROf!D&pOppvGC`C5CxZmXX7iX(%;ex{N=@XjWu z<$l43kPw#*jW2m6Gz7pU_J*$c_a9F>hVOyqo?#!_Z3EB^$~}z0f>YOc(!j`6*d=&- z5oyuITx?jv($eg-?|Xdx;>EtT!fWdi=)faL$T$f$0*(>NJ_*OmuqahhxYn8wuEK z@+=8DX+!K+UNA@{J(Pvx27*KQNoQtHlwaF*eZSHxBD9|xo2<*j|=6i938` z8(ZWqS7*!|*8&%UhRjQ}yvFmk9&)E;C+8I4rW{_`w3OIg9k)rjQN|g|{9G`2Ov#vWk$hLbi^^caal-4qKN)DDCcqx4P%u_p5dU1$MTu zjESRJZ$?kCsKYe-Guq$td<-={(ZS^FH!U&10j7Wnap+n&nNohVt*DVO z*1y5+Gf!k>5{#~Ld7@(99okC%iBX_C4I$wUrP102%*xbb~IQghe!39 zjsveXLyBI}({kE(iff>dHb;#JYxLg<63lQ(SZ~;k?!_G)D6=Fk+lJbx>45$Z09iUL zQ@&%$X)>vWTmHu>MJ^|3s8#d5L_8{0!b?>s-D!;yTe`>u0dYomXRqILkBzw))u%CU ztW_ti@?CfSPn%==?i3X!HY#ei$@9PUuvn9pZ|)--T#$M3Q^R{S2=7w+`bS<`OH7Vf z>*u<2Z%G^rXs(JqO%K?w+7&HoCnsId3yB|JSv4GT3b|^*Zu{d4m7|!Qx(z2SkICxJ zp=Nn`ND6*RM-x~A>Eyg(nFw4M{RnbgB2TjP2sLW46OymXljPJU>Ur=!fNwgILD&wT z)+ml0F{=*zS8lEL7b@Byr@5%UFnYB!nWJXiL3g|L;3$NL1JAL9m#$)6s#)OGIJGs| zdk;>f+M{3u;$zZKQ6V&4FGkGW+Doo7Jl97d8MEg)&yMq%S4n;3)2Zu23hB}<9Uc@a z;&K?~E?xR46<AUrYXxuW|d*sqsL^Tp<-=&=5Bz|st z8KG_3qy&V1susBpSEsT==}{#Cxz-z)%4NRc1Tu{cck(RrN4_z^D}$WByGx)zivjyhQ{94Vh>NgSpr;c*wOKz z;hn%I=G(V7NcS?jGze)kmmGG-C8NrUcv;pV&1BbGOc@HC5RNrCJ;GqUpN&v~-m(&M za>S;`qyS_%yco%;Z|4XIzy9kVjlrsP6Ra899mgsiA}ezj%v@bNb^(3^@WJNlF3_OT zaH|73vGl>|03zh*VBeBx=8|i$9xtij$*__t>dN2$%Amrs|EbN>3Q>!dXZP4)> z?R4Z{M@^lWNd=u2v?e{kITx8?Ri2`_hRW^vsC()j)toe)Z0S8%YE-VkgBI4)3eA!G zph8c5-83!<0us6N%<%NI0yDIfN7@NHWVG0HYSOw4{x+V4S;CC|)Q0T)1o-mPfoHoT zPg~;cC>9VR`4=pfl~4V`7rl)@pE#lRdjFJp2uODPIiLUu_Fwb2J8kFB{Nr*7H2Xxd z2d5P?00s>Pcx+cxWGY|ysf(C(0;$YJ`k$r0yStwx;`S);Na(i9vAL%&qJalAvg;h* z4^G?fGiT`lM9M2C<=G^7TCjZm`eaJfaCkm}`PBWoEk}L@W!2u+6EA;S(Qsv}8pN!c z^fTDg_sjCcL;Ujo=>EXziMrb5zvaOG%!two)K%A}X(PgyJ*%Bi` z9Wd6}%>y`F4&wK3e++3I!`}~!|1)szLT>#k@!`2k$%jZa9RvpQZ0K%>U?&*sT6{J;-fLqTuaVYmQ%iZ4 zH-ytMQBhxBUd{)2vSw3*kH_|oUF)w~4d`VH!cuR%9&}?8GPc>_egNz*57u4Sb7>$X zDKo0V%OX1f(*|8^trVm0-w)iYP=MV{J)x=qct1I%V-YB1l{`lNHGlpF8vc)H_eHo? z*K%(Kq@{)Ry5CmR*@ z`%jh>@wWp2eotxzoPRb!+-Ez-Uc`P5od-+-hbj!TT2sTdK!-PpEAybxXz}d@Bl&Fm z0Z#SCdaskgDi_|Q&XM8Tz)6!ve^p9GA(F`*kL3o&21PF5fI@cmTp29u(4{-8K8SF` zhJqWtBAUsxN%xo6#U@Q^4!)^aaH#g@@fb}(Ty~*4YT5zXptx6`%Hx5cQFeHt-sWa} zKId&Sslx+|XVoUk{y3HPq3zrkUX4D%PDVmrk1(SdJ&+$( zH|Zp&$fWRa*t*TY%zHBA$$Xsso={tIi@aScEYfjoQqE|(Uf8YrfsjlFM)V3p5k+#f z{m!0Ze2^z-2#_`mMTem!+Vg)DXlDI6+;4_ZB-hx2rWwqaJ~%v+`g@4`k1#0GVZmow zT)-a5P?#d-qP82Ma)ugJ7jpDU_Xz;BOS#Qn-7hf0^&9Fyi(vpjLBms4u>U!nhh)LI z4JvFwu>(^teSR{XH>j)f!3cw{09o4XTQPlnR_xkF;+d7nI(OfLMAA}T;RJ`-^UBsV z<5Ud7h4NLf4TrQKKs8tm^86tVJ!-J2s-)gRGj*TIWO|2cP?SsT<65hFLqJb#QimrG zd?cer9wxJx7-r#v zMs=V+N*kFVX{Nl~WTRJYGCW%NAaeribcAn$H#N$hKnNW|RS?iZyb8Qlc9;C;Bj`2u zDXmXESBVAAIm$!o7(SPd5Fys943~P_QU?r@xxRif_n$&Y` z$#ijnt~25Db5UW}ZI6^NZ#14AV`{!4Nwt{GBXuXC_1tugYeH&}+B{h&~ z$wM!d^@=^;)}?!FPI+MzIZ_1vNyR*va4`k1JlaNgo5!fi5PDFj>_yABbl#pR-HRd@te&EQ(-IV0boiWuq6Ag*e$kd48_z4B|z&?oVw8 z``G)+OWo+1ntX88(E%flTd^P}^bLM{sGwdy>|2P-r3sq|9OJ$A$!WAf2dLO<0w{8& zPCWi>!$~JQO7`=4lx;)~f~+!VwIN9PTL+o3TFRqbNk2S#>vOiJqd_k# z1SS)c#T%2Ct1%lJhDo^$SFb+3@-#fADK`rx-H+diWGr@SJu_>aJ47J<(VHJ<^VoJex7kVKvHVYqyv6(yv!iFRanofu z#J+3*yjr>#5b2co-9O((X#A7y64`T4tD-iL2>iWbU@K0JvJ<*08a2VRX_9`yFRftu zBVOx$H*`4T1~;L5b`k_i#t3kg$e<=)HTJCXGV;mVXnDEC$mZ7O*bJPffzs`@XGwCV zD$d?}e>+y(T6%4~vCVff>PGj7-|^8oCy&)mon#YV0x=Uf*6qLxE0fY^>zt zjO7Xk*kFcZ)wuW8Y$h@gu$p9GG57zpr4#$65&6jKKdhv4g9kpYe+Gc^^h~$4QAG~( ziz)st$tE|UXJ&&Sm^c#lh}BQ4I&Um14%%1%1v20I{Y3BWh5HBa-Jfsd6r6U2n2&!U zjciA=cz|f0#bCbINW?uoc#i2vwT0I~-SKPFQnPyTaY2{j_{G}wL%$W9@1tTe2j}(c zyoINgQ*QcEF}_y|asI{LbSmfX$o*`8tWhiA=N;KNfGcP1pC!3=*QO#pQ~g9A^-Zo` zi{c682s}Kb@<31P;U9+ZQ$L5S4Ree(k!%OPCCd(8!8MXzZXI@-z`3v6n+V4QA-m$~-SsGq(Qj5orow^mucK&=8m`D+u9&nyls0Y=nf=12ip!`yN(~v$ zkWEz|9bEFo)l6Do1*7!oZgY%eCT5QuOlY3u z(&KPTA4Er)rMWjLE{x?{x7-kg&SkYswNWsC0P}Sj*m?s-xIf_TPs8dLHwN-lgHjRe z^2@gli;46!A)2}RKZ5=K27x zjQ=Q#$n8WPJb#|Re=RhwL=bLdW+Z6$`WMcA6`xy#`q}7*FPV@vdT;^arYcqw;4qfM zLJt58J=*&^SGU8;|7sX>!XG_4QFfZ%s!x-2xN-hD19M(c@vI50h|5&4SPY$*GqrB< z*p)TW`&v1DLoX_RFrrlGB#d>c7MokU^wb+o2VW5BxWeY{wHRu6fl-(wV~#5$)`9Ps z@}60BjEXw#)eG427P#Yi2>*MVKihGc0!&QV0hpx%lSbI|UAfnvLcZluV6qYkGl6?u zv7u*gxA=EH$kTMPnUN(&%TugI@{tFS)5TT1#QE;l8_slrz?z~ca8^Vw?c+na3$vAP z_ni((E>Kv3@1^+Nf%$Rgh6;K_9{VfqOdQ~-W=uZ+Uo_?aPSba(0^D$sgWbRwRtalW zdW*)+Y-s9N-eg+0zyyksw{Nu{dtKl+Hh%(yySby4#L0|PbR_9ARkbfbuDU*u2!f0a zW%MMRQ;Tq+MCT-_!q7SJo|{LS8$-n%aprsz5OjnjksDi|ScZ%R_s?8yr$_WDUDP>R z>^rxTwl6ELFvR`J;N>=4JB@A&Q~*gKE_>IudL&%`v=C^3wbzX$ZvD5bmCJSht)<#( zVnqc-x^;{P0TXCKHO$ns^-vv(&6rvKgm)238P2mG z9*%ysQ~qk|3Y1M+LYmU=k_8g1IN*3D%fWojx`CDHu@UiuMG#Owv+F2z8(seZF6E8y z2uWt7(Imdby(=F#J9TZf^Ws9kki8CWTnD{au>;=}dyfRHWIvXXX9HjB;jRx68N48|ex~N8;nRK6i`fMUajRGQSiej%hs0go zYjZk4(0+(guatCb?G8)Id)I-S-x{3ENFI&m^YCqT*xC3pQyU5q2O;~9?#p}$^@j1` zJ}Z_%-Eth*ema%r8c5fTl^Qo5Hx)OhCHA2xO`7{oKy6b0t7V<3=VD^ich{CrLB6#; z01?kH53Lqnjz5+=bMwZ`tDz}EE?2Ed^<^3LWa(^VQW?25vfdLK=s=pjczdo7ByjbG zuHG#F73I3uTjK3a=LjBhKwGu*PKwU2)o@Q~Ai?eU<%QE2tjgZB?j73q3udeL;wq4w z4P$pYcm?W@`s_0^yKA2`**+PJFiU!#RS7sK-dv{@ClbCFRj^$I6+t|uBA-bUxmd_I zpiHwu(BA2=gJz4<#0_I5(x8i*$RY4uHN?^f)||gkEB9r z9EL4=r-92&GQAmWAJf34*ViTiA-JGioayci1xhDQZ2gJmfT!<=T%Ne!;-$o;3CkMX zr$Q3{u9Xf>)V(cXz>s&3u1^A0LtYp|@cBR2L>qsbL66Uw_)+!AaVv4?$tFg<6&DM> z+&? zu`dw3FRAtTt+uoPVe@348NmF`G)RsRb>C?4se;Wv$DDwLT-g9Z>lk?y>b1$wG@*RY|hw+J#0RqMb;v9Uk7#o z-$6%Ow5b{DC7+Wbh3MWmT@!@yWYz84)}`Ht8mc;qI?SLtdg=a#KQi&MQ>}H2wZk(G zrZ0DWsFHPghXX>w6F&^W(49?eYH`;c;(JDlGU3b2 z+Wncc!i96>>#Zgg>@PT{GAHcr*M~{{X1isxCL{VzSo_OG_hhtU4Z|I)JCeVOw@YdM zGh7N0nA94vW9IGfv_aCNrs_hKgcyMFn0kvKK`9`pq#jnlQfo}a*pE|rOtKc?Wa z-u)IOB^^tWGI*`uX4>FUg$(DJMwm9J(%e@A0eB!A9*is@wnycMGoWOKK3U;h!ztVj z%sh{R?^m9^O-x7z2dm$#w9Y&=$^IgeTzp4pc&5o+K~FP6+dTUZlDR_c1`OLWI+QwHk?UKtya+9^Lf0Tms zsV(cqyJbwq?5l9j4=sK@0TN}}5DR~u63=V=kw&&EZySY_msek_1y`(%FCPy4ADDcdON~znVsvqj&`E))}}n3Rg`?0-X$Pb3tQ(fi=~HcLRmZwBCC< z)|yW2C02e=#z+2lABD_G{p-rkUp}Yd=}Fm?l}tCr+*+gJj@mATQ4wc<$T3P0Xd}`e zkrY1fisuliut8?kOlh>p3S(DaE9;fY@3J-Q&Og0dpj-so*k0P#r_wR}jme72a4X{oA|((w#jlm-IfpzP!FNm~|hf%NYu^3`C!Z1rRQa;|VwT z)uty#?@*(33iT-~5dD@tk_g^L*WCjW3mUW2@67E{fy;eFsu_PhY2S@Vx2zXOkJ@8c zB!P`U1V9_-eikBJe=pdrO;?I&F3Y*&lCp|sMxG*`fRj82^u!Y$+5{%38tq|a3(O`M zyf!7j&)D?bdDTik*>sKI?`bYhCUN@hzS(NJrf$148+hx-5aq4tKC zP7MlNbF=OW-dP>^{b#-{0bwbYCJl|?P84Xaw`WUv1fk~|cKi^d76Z_f?S;;dP!zOi z#HG0_zRXz4c}>4uy-t?yr~2`_g!e@NP`dlfcuju{1(CUKa)wFbS2zQ0Z(3tO`a|pP zgaXs5JN!A=$FrSPqFzUDC8pC}nYn}3eh2?>^1_GTOkDAN?||C!Jh(kC>bB;6J)LK7 z?i!oYheGkXhcme>w+cGx z(;`vFOS>wI^tI6Af6nHGE1c*YvZqr{eZ_y5aj$vBGg4;41T?6Ji8uK4D1elV>%Qp6 zYlu)}(K*O5;6bsz+PBXq=3$?mE9<1uxNUYTM>Up_>^Dc>GTfKvA^=pIw}&uW_%W<1 zKC+ZTtiq<1Hb|;91TAEALv6CokM^WbH{;yd+aNl5=e!+^G8CsNt#;IAW?nwJ*b&X9 zbI3gBLi2*x5KcW&y?NPAuVzdaM3A7^DYjYdXFLuQ| zS$a-)Roe8?WUY^~G;Ol5OKaqW*L&|#-ax4j3>?lM0xaiCC7~}d)$5tNmr7cCXG((K z{{)1y?>`GGWihu3C$#dF-TWS=^57h;r zeI&rD$j@Si3xYuufA3vaG~)EL;E&eU@5Qbw#2_n+PYWHhCUtM87m3W!=0ZfT_>>7@ zzuJkc@VW;o=jB)B-oRRaTv9gj-u6?L%NsLYt4GPu(cmLFdD#u8C&};S=^j0Lh3PTj zHfZ9^3b{0Rw3MOR{E{qus=V&yJrrk z?wdF+qA2=WtW111HgQCSNrXCmp{G!9N~3qlh0`CHD%Yr-p)*V`fFFGbL?F|n|EfIl z9di|2g!i317O-*BE@L+owkwb>bBRy0Z?6pyPM-q!luoSfKS1pLN2d^jQ*{ttMVk~t z-A%DJ!34Or)uz+^2-R6Hq3hGYQfMpi3tr>4%dLg|Jywk8em7G+o^tR?%5W+Zw|@$T z^*MvItjns1_xZcr?kM`mKAb`H${}F>o%G#e@b?c&7^GibkwT`^ z((;+e;|R#+Z!S+Hl=sjkxp#7WUnSBLwhw|ruSzaBj7ba1kvS4^=(p6ZpH?vSU@Q_i zR(DAOC6J;+p=PuY+N%SxC2v%LvH9&VTwim4W;F(Db>ATXW(X|PF;%a)^P%P57&s&3 zaM^m^KjU!opwguY=+=XAPxOs7zcr=wg4&bkE=OFO{A(I`5(#jQ71yKQG^-bz&iDR6 zG$!l4m=_!DFA!P1w7);GQr82GjK}(@s@ZOlURfV;B9k(Nj9dRUvpmH{$f&(3j~y^z z|8-V~&&*O#@2z-YlHoN`W<_alv)-EFIvXg{Yr5t5^=&cBJvlpI`t^O6sXh!z&hXxVX=My5&rfE z^A?nt#m_%dvw#2DFIUpiU%kB^b}l5su%=74hjHwI!+?HcDS(5nHf;)D@Y-hQe9Oy` zUty5S5+Ke^_Vu!>nDh4Kkl3`+osVmn$gs^C~*7Qz0eJl#V5gO9^<)Hka znqHTOTkfI~L=PQv|27t8Qb+$DsD#uV*jY+dq3ks81QRxa^0W8WgS{(WEKJYOCS6{Y zeDxIB#@_Nn%s~#Z9`rH@(dS+@=uvC{ zbFjRrSRTJ$mlGtYVA6b^>?^RyX)pQ)JZsHmX2~1ZRzlvtkUR*$<=OP6WhTK4qsiG7 zRHV}1+f%>pco4&q2poWE2!hPNC>aXWh8(`DCz}((u*!?5&ZqwnO0*>@7$kp;o==3j ztTrSP8nxrW7ZM-cN>u?ZMx{K?BfY%#k z)V)8ifI<||m1+LwwMT0baD=ppazI8sluUz>3zBsh@0W%w*2TLAS&LfyzPHesC}$R@ z&7*|N3W)v$fV^`%4G@LM)W=%P(#gEP9%KR;DA{uU*pxsjCr`KSrFZz~mCf1xOYykL z5LNhUrttc5K{P&*Kc_9XE57A`*fr*}HsS~4Y#|X~UmcPpQh46%tRHBL1%Me|JPVkO z_jcg_n&2?kK%Ajpa|b9M5536aA7_1>mPDhar!VlDg#&L5(t4Qcx>2@$>vk=KC=45N zem`y>_>B}|FU$EN%yVz$;qwG>Bjh=+pZYXwLLeRbiasgauzK#-m9};XYWp`zpelJx}iEA($7~V zL-4ruYfgL1>)POQcUy!Rv?WhRd4~XUFNSFDLCE{5w=z>18dz_I>0J@9O(^eJEkd{$ zX%`u*y3itM9E3-AXwrkxQo33#P$hMnZw@jtqtLC33-n=qrNGN-rGKpVmo|Z+^pKRI zjY`^SBqR!!uZCS5>H<{d0hzc!3j&MzAXqHMMty03LD)JAQgoAIV?+xPKSyPCQI2>6 zWZGhVK_jX+btl6gJsrO}yDr)udD!*)j#BY|5_?vo`lz>(5a)2o`xyY%Zzn1`Gu`{B z2b${Q{CIN-30U2}n`LjX_n+y03|kJ7xF*JJP)*N&A-S$&D-EdmX1>F8zzDAW6|uhV z-swfiu4$5c=sW(eh(*R(@)tpFA1q$&;MDMN0yC#z9oJ^#NcM zuHGdaW@yC3{ZsIQiS*IVrSU5J(qLy~o}|?bqBpG#UB&R#AL*Oi^?9ahkRxmuBksAG z9XvhdP-!y#Y)jH7Y_Wc_$(^d5F-WM96XTu$g({lO*D*@iB?X!;ip!!cv@?^+pubTD zZucB6VH)F@SP6%57VXla?MZ9`mV27>?j63ue~VkUeNSXn%0wzGdp_KsmmfdxxEucf zGHeVyF>EX04k6VAn&l~!PM#a%;oDZqV~!%z?Z%cW6CQmnO!SyAJ-RHFCB|rJl!QIi ztw&F=)l4UyA;y|4a+t5?JcQcW-z4X&(B4;1f|q%FyQ_Y^Yh@q@R$TGQLV+UAwD41$ zdZ|-UUV%}I@&(1#PHnQ`#?W*s(1J2MHkLOxQ9mCJxsFoYd*It%^W&)2nNjWSK+<#^ z<|`HYZta&d%1PNQ|Abp4m%p*3*cSTd-!o%RUhF5dc_JWQZ z1Hcxh+U2piC3XC(&|bSJY`5f?d8z$Xp{AJCHvooZJCwPU+syo9 z5gSN>0VW}t=arCHWwDS|#HuaoM*X0jF5rkT}={;((T)=z+GW2J=yw(h5puC*sPvLXYnKIkmhkkN zd}TtB?@wLflJ8b0wr^gu`-S5(hM(P)G11?wlFGOwElUVY4k?X=&h|GranMGz^ZqTI zKGp^mZ78`8N89s4Sg_@a*gk8a_5*IyCUV4K(*>`MX_=!2AN5B2rx@n83zW>^6f=h} zB)?qMyvcR{m>gD!?W2b z{LkwE^%~I^ocwueWkmr>x&^m>s|Wn5`cU{jj{kZ3AX`ZTEk(k`8+ zeNE>=W)mNt`*i*2vX#<}k%=%g2U+wNScV+zeORXyT_83ex*S&z6#A=0de+x;-e#4d zUgb2_Lk2Q2R{vt-)h$>R%JiZGAqi^F>M*w zESNK!hbdJyNs7z8rJ?(TQZR`Wq~k{=$RUp;ZqtMqRXeqOjJEMGBV+vOvzR3Y7uVQL z6?LjEeI7<9Kq|!-!)zMMCEI+bu$fbdP*{pCN8vR_X+P{pHin(aX zPoj9kT=V*V!)@_z4!Lp?`(R>oU`0FCjGe5v7xn8D)v-#Tk&zkEsMD33kNIKhby(XE zcm70Z>NVOqhKlxe)KE0&kQ8WZ92KWE-aymE&Dah6#Gz)O7V(5eVGGqQdp)cG;>qDr z^y8f-!iBSSZh-}nteJ`5JX7Hl@8jkB*d`X5l=|RCha1u6PeT3o=}tO`Mz$R;o$_E@ zJ>7JMYB1^1?7O?|Id6C8&Jo(}Afg<{&t%6-jJ$RnYy%dEtVqCZ$l!g1H)MYBp1IT? z!3B5*8>u`k+nt%5BaSZ)(|}a5bJqU?c>c>9k@fhsjLG#3afzHdU;E~m#Njzy*bzV9 z=-T~xNLA{4_FPWgkb^mcB(a>RPj?C}A5tlB1o*-@vj7nO`RQSZqq|PppEk&t@QYDf z$H{}^E9%2^En^+q#Xg}A1YW%1Mt^T9DDWff(2aYn0invyNz0p$#>|q8PrP*UKM!P*?S)y)Jc_8W~jcs&3cfO z6D(p3W;d>h1fkwGX7lEP%wZ*NV#WqhNEy=Z^=F$#yKDqyJrgzEohW{b*}5yMta3#p z)+2$%{IkE59rdh`W4|tigZ=(I!nJtXL_#*WK4ut@Bigre288uD<*(@$TjAbs3=sJ2X``=MkMV^ z+?I3Q)o|Y&yYP zQ9Wiv4Zm+RcFR*>@n< zP+YX=cV~t+n*u#@4m6X&+MD!PA^M5#GPg)ys+%tbihutn6C)osE*Uuq=}{8SXsLxJ0(7nte|V9MRZE=J>P@)Z3=zJ*(s7~!r*b`^ITWTxWNX@0i< zINW)dN+DoJDVsySwPF@`rr$ul#g}Dd;!AmI2*x( z!TFMr!FSvOq=XZdOB@TSea$RxDq^d;%GcgqSml`mr!6gjV4B$dhnKHSGw!AtoGOIp3 z!n>{YU6w|s>)}5)QD0ViY+Iq~ds`)}e-yvX(h6DJoRJlx(0*lF&CLjAz-F12 z6-++mwRPnsEb}&Yp(8xF%0c$1|Bc|jBpaeE%=-N>5aZUE^N^4f$tmZ_(PLe4yS~*C zsCl0oyQ9cZw$v5lLzmmwA!G7nv+C&e@zF&L-pDgLN;P;1#P9bw=JyGa9aev@>422; z(@25eKn~rKlADhFA%M-pQU#H7iB|gJWJ5U=H^s>r5HmpSmE|cinCEtjk+?mmYu|Ub zfQj-nBzc9m+@kFl-%Qz17k|wN;J!q7KIPLv-jN$sw9@k_6Ng8N6$o5lSUT6vycpk; zT&S10To_F=M}bZfDh|y$%A>z3Q83*{Y5>l{6=xHKCRKl( z$0B`L;>sw(cCPo~nvwDYD@yc2t+bP2Nl}0-IUy%KK%n`Wf!PxsRHOGF+1@{F?g5Gd z%@T*df|$YXTi$wv``?7ajOtcRDSU4MI(%OE%J%}4{w7pB%Y=9;5A?;oNC$G2i;8|*ItwID3`{O~wa2)!7PqW*`p57ymI}H*P1h!MIr_t~rY&cC zOWgmt-b!Mmg{kmc@%?~%2-@{z<7Gah;nkrfB7q%nN*YJ3-+`EcB)a|8E!(>hOh>Xh z%z_t^bC(pNg*{N-#baBkEdp(!c*?-ZubO%qiRai@bE5VWKgE|>3#6p%KVCNy3F-E` zt8(=C2=Mw$S~-UN&FyF!FeejGVHyZ)n@R;;M2~1%cOyrv|G*utf@r0?h0oloN=$36 zw)}dInqne>h`HZ8R?H+S0}jLw?f^z&hVx?9836RkhETF`de<2T-NDAN`S3lwo3NMX zMiJmxXh|8-C@4gTL_qDs8gQh{VgG=Qs7Ss_i=<}!% zYmvlC_v1{t(SoJLNdlHH%3L0ayom0WQ%I`m+U{W<#$>u7@i-Mq^c~-_b!o(YfbEQn z`zg-#WqP2P5Zq6GCr*6OF3O9B?OPmMEgF&vWpPB%J4v z7YP0!IL%Pz7OrXHjvZ!_{5Frdj1If4T#6jSbqd#6kMZh=Y3{S~d;!Zf`0@F70D;)xDrrtZv*t$VBiwlWF(`oG*P&qIEdhhd`d)Paf@*-OI7 z;XgHCO+d)@rPh9)!~xObahdZyU<|H8ljr!wzWws8TT7+FUB|P`l6qO%wDdgxu|0qg zsMseiuMfXP&Gf{l4^=0|vMT2<_ZB9vx#b8m*Q{>tXXR#qsR_ z481nb{<eEPjVpg@me`toya^291A5$MVo>H)#vyfzG&UinjZU1nX+W^b!jC&H zIgX9tCk&vae;xm2nVoD$V!ilIZueC-tf=Cp-oAK?5Q-<6YS_8$A%`v4pgyxgGlF@z ztgmGhc%hsJ4rYTGlYR4B1pY@fPv@8j6;{!~z8{)^mo~bSi|n3>Qv}dZ?~7-lkTe8L z746nYt6NgUj8d7@B-^QO%{RjKla}0O7KxR-IT%ho_+4MG^@93tvxess65VJrBA6g`-6RCXW4+-6kw&IL`R2vG702q$JFc6cgNh^UXWoI{z`v zmBkquz7Rp?$pgA;pPvS)0bZrN1A-S0^@hH`!Cyy>O5{2KETCU&B%!W=ErekcW4QB> z^PM=RV@NjZJz12r%PnsmUaCYs)o}m?-|qbmsI)s#G5ms7-e8Y(L5ysRwLV`pFj7z& zYn52^5Teu-RVd;-RY7TYf1(?C;K!Q7Sqp|3D=eDIG{=S%*@n7$X`QZXi0sdRe0Bvs z;kCwZmS-y5%Vt}86dj(HmiYC>ue(!a`SdVc+q%R~#eN7^03(RVfo)HBh)zv`e`i?z z=PN|$O_lm*titHmY#wvW2oqpvLFXWWBEX^Q$)%NXW_H4GsmNsq=lFhi;xEw@GFz%tp=U+y+Mr3`V2 zwXbFSXYlr1?!nRL898|YDt8Ixb!pJOly@vuJt@C)bF`-3RK&Mg`x)ewtqLbbw~)nS zv4KM^UH;wDdU3rdIlP$R*yNSqPOkrj#mZ;yH%hzkCQbN(EP;a1_Qr@6yb)CY=>Kup ze$tCgUj(!Hes2c2r(*~wO($9)BAx#nKq)K4{KO*#6C|w~O5r{`d*$@-3OJ5xRsI=^ z#IqpvY~A%&i}q2-qbUhu79a}xrfMS$quf&9?9$`ayB53+*HSoo8mzf8>_Du?~Z>h}1^{-wk=@xeKDj zDlIp-?XKnbXkuTL^mYA;RC8O_X6bog;&TtP_Dh^bjL800u+P6#&xLX`|nIBQl9ByU%RGQwfpraP@smSqF{km-mDgL$n=zLH5 zM}X#|qts-7ov^JUSz2g&QcGd0{5B{IVD3Z!RQszlRqxyL@zgJriu4;zA5hIpT+vjw zIX}S9`~qOKC`RvNsP&G(G=lb%zj-3@0qh;Tx+Lk&W?S8#h1F|2OrnoQmmli!RAW#~ zU;_OZgmTwQ$i9QAn-87u0Bfsz^<&>;4t+Dc`%z8X=-O1%)_FTbk^3Ty`SjV8ZOmFQ-G3n zLF~}{(2eP+1Fz7eu*7ce{o=a^#yGg24)t`wRAGn6L#j!Zz@evl7@949uoFv)*;{{Lh*t}5}Y zx|GKFn8>6Whg=r8-=d-v&G{4f>-~OB|MFqrfj6h){$(5n)dzhYf2K?cN$S1TN8Opw z^5OR)Yz_e3gqz|QQy)=&q-Og@*gRPGqEVInVMDT=6_+%jCRNtYNCHtr8JhxyfS?AY zdhdPm8@vXkJY_Qtx&AXFaty)+I!vqDEB94Oi=3l%FTw-m5F&vIWmr&#m%l{%N=hdv zjHC9pK_i7$vZL1S+=`Zuc6TwfON`v?net1he{jW5r!{rbi_AM$QNJQ^?Ci7H+;6~S zWG=)>dj4Bd$l4)3`nFdD)!bmkeJS;>fg1(A59|nzjgALV_EO-FIJdQq zR_w0rmm72~rk$>Ub-BXAVr39~&$!O{9I%vvNyirpG@S$dQH$Ez>{?kD^~_E)_-Jm|iyBr9eIa zmp0&N-HptHTNl;MkH#rxR{K-}BL$0%E@?tIRMOVRDU4PEeOnRIFdg0tP_r)V&Fdaa ztf&WHiIj{3Z#LRE*PQ!n{{b@!m|vIl#VF{_+bXh^xts^FKDXJvCkr|W2Ts>1Pt9}L ziz2TB$JRGO&zOY*KLuLjBG3va;K0nq8(?WDgoA^tK^`(_CZGr#6-g|{LxjyaUYuT`Z6{M@EC{?<25fu?CCZ+{EdKKWnQ|6reS z5R&(O%FI3Y+%uDfNl6y{O@Ingq%=)n6x!62?2D{K*{hjqp?ndBi?=euvCk5FW> zRKpQi7RV(@SDcqRXwKJ*T1`K1=-6Q7*u{5d*C+|auE$E4U0#KM#F-(g-}+AM zc7|8~bnnCI%r$FV3&V9qsuLrIRohL|9wn3p>Mr*=LAQLw!RWU-oIRCr)Ec$ZxBhKeqUSR!A@AT7%Gcaetb{x+A4p$u!L7quaNXEGvvJrZ-f%H#QsP8x(z< z-m0To!RO~->Kf5%N3<*O#rjKli93c{mM?}RGClV(OXNh)LhSDEHEt8`8S2$sA1e|4 zXq~;X{k32tK98mQyReN)NEMg-kDGPWk6o7`XfP$$FPoBN# zQ4xoZQwT>==ydPLt>XFa42A`C;F`S9L`+a@;jLFbc~ShmxSGea9jY2-?pR`QxKyGR)midwe&(wYQH#O&01c(W@=9tQ~zHjV}3UMVxepON#dToI9tTZ-RfJpa9O~?s$fQI``t{rNHFuq&mB!b&?0m?wT303Q=2`1 zH9m~cGfD4s<*KE;636Qs7ZK)dfG}tdv1)H<&br@~abD0K;pAd7${>m~M@c=)H>~RH z2_Ll`@c&KYDhC0x*c8MG`EfJitD|l3AYctFMar85rnTX$^yHIS{sWOp43q@4^{rc^ z=fN@NgxQseD1PX>B`fK7*3*G=g{m4q0HdO6vk=P(yAPB)L-=KQjj6>*=7gL~9<0tq z?~X;i_D)E{fl2jyf&BK-#7CVz`1*-glDr zt@oK`x=4J)W7-oHT(UGH&n~^_d#mxO+)*Mk=Ht(+5e#U;@PiM_s^pdZ$3KMPG%q=y zr!F6yT)Uwd^8{mn(3YkGX?JH!tt~dM-6JcIuh*UEe7mnZw`6d%Ur)O0w2@PHwqaoJ z{4q1^bY^Y!a#rEv9Czqn+m*-z6ZH z<3{1EWSG`ypIi&U-yI$g>%;6koGf=~P^~4<^xTQCR7ouNiy0jDXtOk>PG&? zkJxUaT8$TTUgwp+l*y^Izo%C7c06;`$k8kCs+GZjNG0rgs1hMe`3x)*?w5ggzIff2 z)PHW6_ZmsJojHMkl3<&5V`AH;FFSB($5|-w053#5xIONkoHuOsSx)&sauxqmtr|ud z9j$gbdG88fkVc}+4@B92{cFc<@OiNkwpTo0_{i6Zynp$DpJ%}M5Yn-ci*kr{AX*5U znb3EK7t%XTi_h0W{^fsPAF{g+q_jbHw7d}q`Mkd$_va!-M7A-c2tR)ik@}wj6(T?a zKJ!Q<0P*-BGy8YF%JK$9wdb*PL;A!&2z~$CBFMR%Uy(fsSd^OObq`G_K>o;6!h`&Tjb2n#q- z_SmXP_Rs(JTCdo^N9~-Eu9^RBVgBjg>?z7K)QsV3_}?r2KUCCze@9su(CCVwbt={W zZC~G@|Ce$X<)xMX35hE7GUmdp< zbB3|y@k6pMMgjB$a@>|x38|cY&)e!Pdr3=W%W3X%_54r}E^1ew&eG(1yUsEPNwrKuq@JiP*hc#MIqu`?yiy8;4OB~A% zsOx!Jr2RAwWerI0om&1FGY1y!-xh?2Vqwl5z8@Mu&?r zhg(H@#4Euh z)5eCfvBMO@K~1x=+`(210?MY~MCUTel!)`4#Kq|lP^%@nSo)Vn)sbcAPuBQZ9lWHD zG^(CQk$Bu4YK-{%ME%o|d$SBK`l*mX>wfdy_cEJ;UWWRW&h$Nak$_&#&=)7Gveh4; zoh=KWoPoLR)izj2)+C23`1JOk;kABVNv`@s&z=E`@v>tqX93@#H zsUcqScP{|lw}Ni)8y403Kd<^<+ByWXjlF)6NhNS6R)Z5W^W~HDvwhj8D3knL!m=uj zpi8x3HOg1A&2L`Z8h4WKNiDX;xqOkUkpWeQ$y$hD^m#(>qWE1$4+NV8EDkd;0sd0$l`~6!HetFh&=s=j5RzUYOHf#cyQvv z^YPiw-xGua4EtJ80sydC4fC7=3{(L26zx}#pSfeRek9h`Q}$Pf|GgDBL{GVy6q;!4 z%fcme)KcUVPS3O(eSPa!D!Z>Rr5U4sdqsKI^{&g(A;U`7a|&9g3}AHPW?cGZt|xd6 zs{@NC7^qg&FFiYD_N3F^V}p@tT;gS;)GEVAKN}SQR65;Kz7ZKk>ICg|y*F2S^xc}T zrkOYJzr!+(c--zYw{y$`{Sxw}yU2bt=TfFUPvM!Y9Ea&UJ%AMdioAy2OB7sk_bGa^ zZ@Yj@6;kgay=&p*?=q_NZfec^D#$3UEDQ*I%2aE;`5ifDLo#cufBHQ_z|W9VBTX&W zd=wz&%bavnvu^lc7V$S}QgC4dAh7R~8ObR>#rKSSF#4yQ-1o|0WjZpMgPpjI?lNYo zrMzxyFcma;-{fR}0)ul=jl(**S=Aqig1#+xTyP+bNSc&j@O}eDNfb5ILf7v5b8NmNH{W!f5DFY` z>0z|tGN?jab!$Blh|kpp`nCDMQy*Wc=A>g{vJa`@ezc6pa$sYGa{_F1B#A4Nm(ME) z$MVKUVh8H* z{`J#VoS)+fF8qLrddk3goXZ@*dE zgZI8_>~KiWN-_r>FYsxbE8}BLL6NWQRs%(&K+8i&>=7po_2<`RdJ}hObZ3~5&`p=I z@hMKRCvHIAK~B3=Xzsyt7IC*X>1BXuOOraYRC)k74l`9JDB!fZ6qw zKf417tL+W#-Mc6ID7bDgDArb!QC9u&mBCakuifn(R$&}-RhWN`?Ry-p2Xnc^^R1>)~N7Sruy%X=npx}`S;v)oxBVtRK8Y3@J;|@ zu3c^sOkMbL^h1}K$^JJstFp$+o0Y0?SB<=k?0^)8{3kuSWiEtjDHZu|Aa^krJl_Pm za1=tj%p`YB9$k95RciUBjlK(yxuEg9K_pt-wXUZXZ$9WJ{3@;kPrgkjLWw0e{{@d&?qkr+M0 zUG(k;Ki71cM&#OKRV^6}bz1VOleOt2j94|`@iPg%6!C4k6v+~L>hDedM<;%qqDcJB z(0Peru+nV$%A-THHF=V6eNKr=xA_WlGvHezU(kCERo6|kQK~Y%OQzio5F?}Gi1?bh zsZeh=Csk6N3a-QD=c`zbl{y!Py6G&>-@SFPTl3>SNl4EV2$5Y-VetS z#~|n#sC4&6AD1lK<0L20DoxO0q}_dP8Y^ki?g_fR_M??(`BPN<+`8Cjt%}GTr>z(J z8qku(9&>Gaz80VGVbCe7vkWLJRvUR`uhG&B&GNV<0fjTq(9fG4t86wxe-wd6J)xqw zwp_jRjST(jK*JkNd-(c&zobtbk21|Z`HSr*kPAR#x3xTKgfwRqtBEi551jit<2wKr z#1_~&QFhwDNAs$9j~E{I6Hc`WzU zJgz@^2awwQNm;hp+iF?Y3^RTxP3woBEa)#Z_PI-nLDJ9;Ryj`(=RB1Zv{jX3EmcpQ zcTKu4AZI)I6TZ2M^agch43+cR9Jer7KhbwWgv55Xpgi~|Nbq!~010ARKB@JPFDb+{ zO4Iz?7YC+j71g{Bo%a^&NUuLjyteiW5TKLFtKE9bm+f&KV(YViC~15`9c!-Ni8v{5 znfmx*9RSetQ=t36bKnNTw53E3+Z@pG>Xr-{&;EZ=~48nImai=B5L;GCat0=Hy%g*HV0C$ z9L-*@b|?4!wz?UWq$7bFr4gd~37TN^RC>iL>6OX<5@Sb!HG04G5H|vDZr%|-K-ZhJ zEx+k~%vQb$I(f`)4Wx+S_ulS=j$KM6gYqt?h#1c$z`ail5*b59Tfz9n-{=iHEFP({ z%W||wiy7uq-+s-%f%H1FXy{AZ?L+HYeOtz$(v|SD=?o|2wXpruD`{=c`azLW@l=Oq zbZg7UOxR!a=r&zNRwZv$oyNsTW1DV;5-wDx`=TYBrkN;y3bb#V$i8M|EWe+Ct~h;^ zL)P0jmAFol^(h*iHBWwU)Sk=`NghD_ibeVP1Vs7*(IPf5i?Lu%8G~4}Ak`Y>Gvwx! zEQ<~A2J~RdW(L3DPm16Q1qQxF(`!EU)%s)x z8WzzEg;>qD-gOv%J{1#mnXg?thBGuNj~Vy*u}_h^lY>xYcAH<*f%$0X?a4trl^ln>A^nvomgWMb*B0LlO z?maNZS9CgQoTsAHLlvB+w%+?E*$pkr+|s@RLNrgGV;I~|0&<2(jPm+^=L>&ah9G_C zGv5v8R1F@ZV=Pv)EdZ?lAuu4kX{#%{{+ywHy{&vLqpK`}_@gm2Cxk`%(L=&E1wcV! z-ClIWWrBtTRn#S>x1RnQn7^b@)?CfEq=NaRqvIN%MvXl_%eLmzT={}O$Ar?;!}~;L z8skt6-SyKSH;jOqD?%Trdg|hiDz2N0z&$^Tr=R5UoAVdfF{hpv+bVtIGIbK2y}-T` zs|Pp&K9|8G%x9S;JdbeWc7Vk0j|DkjovT-Rk9DS{=L+b<)1G!sP593)bW_fWf489< z09GOeXw?h3Kv!4lj=fDM);&-m3>*3Zr_?Wlf}KqQ}~_@*h$*&ehyZ*v@Hdu z+nN@(0*F9ZbK32;tkGj!u_SxODp$X;9M6TRcS``iJsWPoPpeGuw6MMh!<6R{T}{Wt;xuhrE6p zvWI?t-4dWQjT+Xtk10Qe$l$1(L3if4^eLkURfGp=JZ@{)s9|m@c`Fdym29%6U(Tx0 z>#hyETo`L;MR4dFdY3R(cfByq`qbP}V8<=r6JH#F$WMaS(V`O#Kf~+&FFjk7R?%W@ zL4!Pb=6>GJ1Yc(BA|1UjVlwYc#GGuJrwA`DmD0R3qBH^}r5sVz{TdQgo zb5dm|G&pTTqLX~AXY zm%aY(y`ykj?|I!_BK>dXb6m=CN;3fQt`LK8A6?k>W{>{jPJZW6;Bv429+n&lnoV7@ z*&Le)al8f?S#&F~X$3F0I3(9c0j=jO&!>pl5)$~z-!?8zsBb6@%0A2X==75~u~VQS zKmp5O1~sK-o5QuQBu|9bnXnM>ch zO`kSCrC&~q7q%64LGk!G^UAJ99+rRK4KkVPx!SY@_ARejjv2D)jC~Y8=WnYvpd%0^ zh1NjH-r8+wIPtfcSM*|Dsho+WMCnZ8@J`Q7RfN+^tiSRiPJ8By0JO}B#A22?W3v@Q zY~;XO1Fhd3$nLH0RU^y8w5J*PdeNB5pV7AuZxfgM>d-NLt;=z%`cD%0sLor7TRV<= zxZvMOluLWJt(X^6puzXTiKsiaHvzBpB4?WOSKRuQPReF6n65L+`|buXv(vIj!w;Rf zP}_AqRGtuajMs?#R?%wn$qBbF+0lOCr}3gwYiKcPUF{HVr?0T(y`AG|hr=#QLua~{ z)VR$~zjZV8xt}+Jct4~_kLJ_qe94iccsg^hg4UTCw5B-G1@TKo4xzN>ML&E;Uh9F- z3%FlVUbtJ9<%`?})pxPw=a~&cE=#)G(ARc#KurujlFm=aUOJwp5P!ZeZy5Bd!OmY5 z2wMDH)iM+QQwX+Qye;nVz{e?Xr0GX6=GPrzz*;q%vGZxU#$6+Wv}wW&W4!&DU6v+gnm9 zzpx*M58djkzO`;yCcOx{@myUS`*rI_5b?Rtfh5_IOK6|UzdYrwpOKVo(rW3-mw$O4 z(3-~pfU$~E8N&lJn1APiUh7d>^R(g`1JZuQ^-%E6D;I!wmf%!wx!h5l4qVRD zJ;AOL-{>#I_+`=Gq{-i>hoY>C+1(-SW3FUnGy^rGi*_<1NPgH!$J%)eHzFwFK@BGa;!()$gtK%-P9Gb z&5)j|lM7ky!iqa=FX&tcH5;oln70&>5pHRyiYi|$NJnR8((R|!ug~~3O8LU1d$op~ z2ctJyyU^HJ>8Iy)@=sWtxYzIT9ve*!iilj2LaEDQKlLRr;|!19pntt64%I<9z4@0b z`4_DV3u;n2D=o#JLi!qEEmP(+lc%Wlk!y}hK1_n%J8$%BP%LU)>cxrq^MG>wuU$Eo z!xU$N=9PV1W2sb42%fn~m?2VV1K zz@u*~x+2!^(L&i1&0XxbbiU!-eXlEw)NPxdBJKdnVJb(1&RNIZr-C``tXJ zGv8}T`t@yrL$4P5-%U=w`AuKRzI6+TQt3O>>)JjfhuwO5U#?=}+gm@2ky5tytgGy) z3Ri^fr?00d#Xt6nbjQG6jn&@3S8H%a$k9o&lHtB5Pnu(mY=)9!Dztl!i%<}Ezyaro4iL$p7K1zw7HjCs@Xe821I>sNsk za58J(+TExjIJL*QZtFOV9;WFbMGjuLa_{wTzaKhs{Otev2VC`@D(US5r#)}<5lAM& zD3JaF!q5#Ov9=qaldfl9^>CpOudH(x5&6}oVrK-1eMJV!QxUrS#|%B;ySp=|%*OhZ zP{SdN7w%pOwQLcw=IUi@8+rh!JF8X@!QKdu60U6S&^$QATs%s2e}`EJ-62Gy}xz=jOXMrX^h!sQnV zhFOuaJ4~?N3-0!gVZxltwn9@QZLA%W zC6fn4^45qOsj7u&IrH-{DPY*e3gP2l<(_ykL~=zvXbnPk+`LNFSft2%X$?DiSJS5a zzHlpV32c7Oe4^}Gds}TgdZZQd51akx_P^OaetdpDnZ#C-tEh_b&aMlVdXdGFi)zbc=su}gg&Nu7I<&sd##Q0eL3`SRl8hJvx;_rRm zTiT{cn9(HFoxe##1SHY*Z@8 z(Uzzja49l?SFzPHn2D%B9HSqsnf509JPM?JH-8xxV%d2Q4P^L&i-zURu<<*Nf9qfn>m z8DP9N2;F4u&(%3|>7@P=S!V*;9?DfEZjY#{X#5_QY^5(yQPb9y@zj}%IPtnYzDSO| zw!)t5zA?H0uiiP(TsY;wPFiAD-5H?oj;N~`yd0@(yi!Koelw5>e!WCPD;V7LTtT<3 z(R_`KY!}_!A~jD5$CtO07@6f*TvpY=81ETf|?8=^^am3-sUJ*dr&4XhNo zzTY+ZOwmk<9ZD~dh1?LbVMBB6AgeX4{JYGsO41TiEF;@+dvzM;+qgxxHrjRs^3%)a z@nTz5Eh3H-9$F*9iZ>3vRC6bf&!@G9hlGP? zQcOmVWht8YOhpC(V=Auz&;viV$4_q>L0U@GCZC zs7M994Y^zD2(M9QPIj8}8VtU>k>a|(pYVRIUE+qYu2)smofpQ=l9L6lTM;p>mcl$3 zf801J_9b#WVYg49>+}!@%@#o!Cv8gyCPpQ|#J2*S#(7Emwk?X+Fla?ZH=fejYbzL$ z(MIT^OGtiGaQwU&)qh^+fBb<|16PPs$h-^Gz^S6zIu#qg2JAF=p8`{crH6u0r*9uM z3qc&pS}7cJ$JghjlA5@SmhVTLQvx$_Mk?L(9XiSjXRn^c;wWr8c-hpqoFz8r5bO90O0*Vg);rG2i6PST za?D;<8zbfGQfL{3K`3|mR!^UaNuRR8co$YGzj)XLv7uuTNj>C@U+f@^b8^fqm079^ z+V^tpz?+smzF^(xG~3BP+UIjMgQOFO#7|vHcUcq?8Dc_Em!-8MiD@ppzj$=%>sK;% zu6|~UBlMak_B)~CjfXZ@3L8onIV7zH*AWibP(~yHwrzOPWysBCO5cs0d*<#!q%|_X z1oHq_R2(bxtn_da1%Z z2l%-N6HE3g4<3v%qkCVKjyxh*gV#+*&#Q~8BAaIsQMF^&nWE5?9BkFIujRW%tq)>)NYi@uri6xUEV=l`LB{fUTOhdjX5GbC z5B$X0E2=8L%NR)hF2w$e&)g9J2r2q$m+wDA$v*b`vPhM7Z1c3}d6REh=OxQb){=g`bOJYQLPnOm`rze_jea zvl%Brg*5H4RQk*rU!)@Zn!D7NZlW;+yCE`S@%HY{`UND}Ji+5Cn%TWOMCv=m3BxPC z*&9c6&OGMUtDohqey?TZ#kNNt#IGQBSON{8ScVL zvJTToqOWz@6h2I@qqZ#~kjRUlT_>dNHQF&*@y`1!REw-@f>!EyZn?@WDAT}B%I44H z5Nk=ol>^`F3NN^gmFm~b$x)iBh{)=Nf{Bg7HsF_kc6K5Nl{<#1GuC<4y86&>pp!s& z`(5#+s{L;*UFFmYTg^=BcVB^^DP7A9Y%p4 zHYx}vX2Uu6Txbq@&*!Blz-uxts_Zm&ot#e-$pXMo$g2B}?}(=@7x1nF9xK7KFvm*L z9#zCr%9)u`0eV)P< z`#$V_wDVSqLil)ch+M-@oL{37Y9tKCJDO(MWz@L*to zCv~Irl4af=b7+cpkxQ(mQ-TB?{>9<~8@#IaVQ$@|;~{PE8dgF`w^;kLV^95u3v5-q zd6kmeUMltHniACaVz|$V?)Emz4BN^Q{CmCnuc?J<;52ayNXw&;Q-XF(UKJkl(Aa?d zGLAfeoJxl#3#;6_q4hy%3j&WO)w~&jjbSI$Y!NnLz`zbo+~8u`SC=}KcUc~5s(*%+KrE)alt;WpuTDI>LRyyIqfdSWJCDz;fG%{~`Dt5yXh3$E$B#-#2>>VkPNmzhM6sP5nc9pN1uaFxjT zvOSuz?bTBzpxMZHE-2X>-jGUaJkzx^@6PIGk#gfUtLG+bm>D+Jwk2ul&p?May92sq ziumw5A}$eAyfYnZZ;~y%$f@|eU0zkIdM+kApB7>83c5i|5oiwvfkJ_2)~XvA>fNv; zUeapTO`M0RNo>?Vi*OPsa{7x%&|q+x&@P-h88P6kB1yJka%qWPX^7$za1`%5e_%iX zcLq^dh_ciKP33&&DdUKU#{>wYq?i~)^6n$p>Kx5TidcrneqD%hUlm&Ye%X9u#bUQG zwP=AN)FPyvs`0(ZX#0k_mMX$-#Mo!*8s<2YPRxJ^5+|gkf0?t?;%Tzm{7&c#m&SrI zQm`mfOvdr61I@_$S_+|J+4D%5;%wD}sJaY%%{KV~5kaW*Y}ypv3wQmQr-w9| zBfF4!s;}X&9;#XBAK^277nN9?TR!?x+UcxJoQQQu#sJk@y z*>-(acq`rmcu}zg_|g{oGur>ai?z&l>i9hT<+2XBOc1%W5qqrRP!MAip6yY+p#gO} z3pfE)`(tv2cteBnof8+*+&v)$B{T5hYgMD%wnT&N)nepMqXB7GsG=`t)2VsOykXEf zkA+aRFSDS}&s*AEbyd5}!N(wu!cxth-2J+S&~b0R zJJ-*K%w}<24l8@H9Ga;+1fS3-bX}WXSyAP~dS^}rU~B$hPd$uA|F0r!x3g!vr#_$T znv-=G8RqUYq*vrFbfvW-*4R9k*qt?5*`s1@JdcTe*asu|+m;SWRMeHz>7Pf9IGC48 zo2nwHsp;KaG8dEi94FaRNyEAE7zsW1%o|8Nwqm*Svgb~LjWjtrIw_ChOC@FP+LrjX z1NZ=hNPur_+0X)7W};{!J>iwJP02voz+PY*dS?qu7dZAdKVWM*me)GA(QW@NO%pIH z!9KKB4A!3_mlvwNX0*$S6HD=SXrXN*sXI85-+{*_R1SE2mTdlqiKRoHU3XVz>r6)6 zW^&^cTD98&Q#R${PKWDYkE1n$Z)ret#4;G6q^d$pWNg#!nhJF}N6=n{z;43M_&Smu z`;Id)Hadx?q^it5QQJIMs0Gz5r6NJjDI>h`DEnS9eJAz^a3Uw5nxmx+CtDj{_%U71)if0C zTn)2~z3X6XX*Mjkf~b)?`!=sebjJ{G2W*UT&E7!8pu01!W<3Gb(}JikTbVGixXp$4 z+R|7dlSZ|I)8heI5b~CKHqOT&)*dIU_n4nq<;Cb*mr)kVl@8!xKHE3ys3)dB&{-6o zxyZU-@Gylmle+k>Yp33b)%7lpP#_z7ck?UR_{nWm35v^)&@#dX_YqZ2&bdSJ4Awa# z9D4IbLw38H_TxQ$a`mAv>Nr0?--=T?@Cw~y0nXAbr^R{uQ)I2azkwFWRmpv z+R{OuME;BlK$s4gp4!V%;40iazq05u+DOF;zc>mb8Q5a*FfPo7UQ+WRog@*nHiXa; zA~HWtJLX)J+0kBeMMYa4BX{hGmQhHM$f!FffM42)I7`I?z0KxR@GR0Cq!3a8fI!lQ zG!h>U+@E{T-GOVGo7;VdJuwfjZRmm=^oq~(!0ozJ+~$q;rtK{_D5QN;3vk=XXDu;@ zb&?z=?mn+k9{5(Xw;1NSGNol@_>=L#aN+e=rc1yGJ%!W!BE*l<18?SJJrxxDix&z8 zq0RKI8oz_R&GEKe@Y3ZatBpzrF5;Cta+KftG+K8s>N_&62@uARuG_@T1HH}JsAP)o zZy}5wh;@HI0f1ZP4rA-jr(q)&`|HQueE7&H%p^yy|iH z7h5sJ0f-AdPWz!>J+nIaEx9epUoa`p?I>HC8zEx%i)R+4{8sWVi}EkF0!7)<-WKe% z0}7-$IzBHZjuS;5)dRpEgHdtXsXR_0tF=9dntYk-gy1n!{kszv);fjA2SLN~3&2t2 zU2mBE0n!ogBeZOy&!p{qmTLlmuu;Acl9{_uPAyeCqgS#IH;xwM|v*!3H9>57~V)FYwkVf{eKnI;_ z2DWo_+?-^FbGn^*|9P=*pYv*a2fFssot8# zp#`(ogj%_f>TO*GPb+suc2>ymPC%@*jQqqTQz8Qx1T9pHENE*s8?X~~A&(mAOd7i# zHOSN$dDXsx7j@b%$9#+nAJTo`t=SWM6wD-4M^vzlrh8dPnrIT|kdri_4LvTd> zOtPHTpa!8WL$-Xeg|jpeTeErXR8dP4d=`nWS%l)mp}2B`g_0ef!k^d}^_c3-RZfSo z)WZ{7bVgZB;*0er#>gNqx>2uvSn)uni6^nAFT$xu95wt(L(s^kaFb-6-vl3~3DV1F zy&NE7O&i1@l%p^FqPE^f+DD(6B$?zW(NoaHQV}VE!g!-sKNO9b9?s)An#lTz#ys)a z-c2=&UWb{DFz?RdcqnwV44+VAwv$N{|JcMW_ff*zJHQs8^tRHz| z892v^jl$6pk9yjgeFH(>pxd_1V*_-Cfl@ODmlHj=b+#f8;;6ss-v*(}hfgqY5DScwQ2dn4h3WC!;PtveUkDsEz@8&~Shm$~ht`JO)2)m!p z79uXXx3w8Av?QL(dt3eqSxJC~6}Ir9Kabd3Vz^E0FYGACeI5ubcK+6@w@w$Tqs z=gLr!QO6CFf>$PLn%h&Sm{mmgGWxBMN=O38dj6>BzVY$U-@O2qxQ{yVHswbeNtN>W zINeU-w71;C(4UxZS?qjL{em>aXkSIP3dR5uei#)idH>Ft}P zby8CA6VQvkn>yh)S7!Gm&&e@)sBO_kVBgXV@j^#44N~a+Z55Hk&cG%!3@yO>@gm9+ z8}GGYJsL$O!!+Vz*5UWD&Ps#m1*G{x#mppqV1DJcdy47;IPCtWk-GfC!>-AjU8UiN zD@7jdvQytWY79;Z?o-W&;&*f0sNr*1#GyVuBlsnP*RqV~_Q1?QRhK}guyK8?1HPbQ zJbK3PJ#8y5U93&zi(Q9o?jon#uN2={xn)z&cNT&`>&X!ROYO>y#`p5AMo>U7=af9k zURYJfshb=G$o@cD(r;Q=DJqd$+|p5{gJ$N7Kx8s8TSwE}PE$7RTjux!G5`mgdMMh? zkC~pgW>puOx659lxY$*S{oU_Vy%p-n7>@sn(!U~--o zA$+`$Jt1qz#a+AyG+Ix+!bwkwmW5*0mwE&Si}z%q3!R;g;eZZzD)nkRCt38Bi`y$Y z@@Tx{Rx8fFlH*jqS`W4W=n`d@wk!J8=gBYR6$cZ=*!a9)>o*ng@qesMfL3666 ztp@l%RxDL{O%)2@RjXzG)V?Ubi!@J-2vH#b#eg_aYM5+Fl9V8By4Ti&V~;&_PaJ(4 zBEis<;Oq`nVvuD}985c{t(S{dJ3Z?K|C6q=wCtR>b(a}#WVAhNo+wq`C>bKg!{W6% zH=i~DKPhHkB%#QAF zZL0UB#1J~+P4WV~iSFCc3-OS-FY`%Do6p(VD>u|?&BX!_C1m3t+vC)Gp zR|l|MqWP~*>hHlv3vv+o)q9WhJsXmAJv@iP{LB=^)1p;yC8LrRX=0zsSLg92_vZFL z;rJDKqcP7Fo3clrX;EiTSl7%aTys@O zptMc7PT=-r7ohUO5k@&7b)QIyVFXjZZZm|k0UNiy4z8Dw(`8I1_F~L%kl8YOsKUv@ zV@4uZ^opEYrg&Qk%ZkX0xMIR(kO|6nz|9?n>?~HvIgeO72BG%4l%HX`IY(#X1eE

C(=p$Y)A8VezH&CXJptq9j>xa}3_(9Y3t3czLhw>%ZkeccDrWSbTw zO?p1JQE(Ckjn7MAVuf1G-lkpvm-EIT_TyB*VF=@OnQhGR}xU{Y-WqQN(KEe47G}le@vJMO%d@! znI;Qb+tO332wA(!7cHZ)z&}dI+9C|}iRR#>m3v8vQW9g=xE0IE${y+^S>>@KR0wZm zal~O1diUmbf}531*{I~UAL=R-60%-bd@?EkUsY31r()Q zkd_`~s;tOs^f9l|9?3d_JReVC2g&<1~FwhTO+=daDyHAqV1TxKs;)Ur?ddZK@m~7^XJm@mo zGxkMP$92#{hz?OjqXMf{Ox>x%U3y>)qf~msu^JszBY*hR%%{yTzju||Rt!(9 zbtEIn=-qSOJrA3O4hT1&7f~XXWyP`$|9~&10Y(iIDpJhAnB=uf=BcP^N#gOTT82S~ zJy=A0Jm#sTC%?{uT%(Sc87UCG9vThF5YXnP)1-o=#}A7vp=a`+83>Jfu9>#_@pKnk zO01LQCZaB=nlwiP*gJk z!I_AgL^0Ouq{m0WlxqCeE210~3F{T)Mj47or|B@wzGSfY7IxqUWJ@WHw+7bpAZ%|K&YDODjKB?I9f*b11dd!7{8yesdc)1EzfnV z0g>ErlCvIUgqUpiiw#m4r5~w^AgMFMZ)*=yq)!Bh$Ep#w9ijs*v3PqRgZ;?I~=Q@Y)^B$x33dIOo`Oo(F-|zGlsim>NM|^_T^U zOcpdy?5sxCqQt3;5HKZ}0_-s?d!*CmyC5gO?f-VLh|vcpnE<}pmT!dsGIe&go0!Ca zRT0TzS-V>0F2}DFo5W+Xd=NF-tpFqx>}jGN9pjkTrb~43v6$e;1pkVS#&0Y2&s3V^ zSVR1$5PD)8115diPQ;!fW$HiS%jDT~p3iN?-e_m!QMz57jiD)oL8<2Ji`0^&TE)U< zf^gg-SV!u%99b)mzco@&6J3FcqVQ+CnE+d5Ud%aD4bz^ha>OFGk=nF-5wd7XM82`u zD^R_=ntgleyu)KcvN4|9%Sd!@l{5b#7>&v~nmz?%QAM;5NDDb^Dn6Y}wZ;aq^*2X3k?44AH-&wN&jGsZ#r0He8po|m>9XfO6O z9~2&N--+top~=SeA4Ix61Tq)$gDfnGz8YeYrRj+tQrm{Emc=7x+5NC|sPto1M1r^gb>=X$$QggZ z-v8C!wMQkD?cwU$Xj%3aGqswT?4@a?ftinP-LlklG6kP$rbZ~HhWIvX-JB$Cn$mov zWF|QIKnEX*&&HIST1|>tBI;ylqWCI&19NDtvOK~6cg^-M)>#M6+28qopWpuWkBxWa zq|wX1az(muhuMV$QnVUdiaY2MPtH_ z2m~`Xa|GwWrnR~3+_m3#%s-xs>kJ>h@+z1ntUsMWq@}&22sZA3V4ezk`@dJT88>~; zniekdr%EdV24id|O!Ayk5AJw63bkINmC*8Gml;b*>Qd^?8j9xf>=ME>oZO4-GLue) zLaS3bwi#FYjXbf~nsg$^aY=vEV#^fKo}u;r?twng!gj5CrajsQPtsqFjBfQ?45+Cn*DzlnW9#6C4 zvoF5G7(4aQ%X7{wO%CiJ468ybPwNS0K26K=CDV?&+tUUfk8VG=!ez@5GOB(y;s1`l z*pcZh>4+btYl;;uny(it>QwAW3m4U5Ihsu;?_*2r3v@nSZw|hD8aUhBq)xHX7|uITn0kySJ8DslIRjP=~S7{(Aw%h3|UV#j(DXQ^n9K zZ|Q_a*%Q%BV9d2OTkD2PUO&Fhh`BBW`9!uPsd*ejC8p`?JiBPlmb3*spD(Daae$KL zbEDboG)>%DStaE6&oCy1uNd5IYf3kdR?qM%$xAv3b?kf6r(=%TIa(w4x?9j2 zo4o0T{75rekf`>KXYaoDulrEG$Z;31h9df`M0&_JBL?A`vti*g{7eh_G?SzwHk9;^ zo9%6ejA|?{vJOQ=-mP6AgIzusFHbSt3m$2g*!_kYcPtQDF00!&;zvCkV&$@` zWLl13h_CW_FwWJk2%z=dWHKFx%N>p1N}CL=Oe9Hic)!ts{U7x0aO(!dA$*KmsYgce z``R-f5bke^kfJrT3ghq7f_VM*)5e92@qQ))|K2hZA)$1bK=A6(n};~BcLf`K3l9#K zIz~gpM~{+`T``h{Uh)kMqeBxHc7FllS0=>T30idX!{N?;b@c1B-vC-YV4?tc1B2vql>ux3HuK{#AfiA-VO0Ty0tN{XQDEBUYv*Uj3Pco$C`>Vch{7NN zA__zli0J&J2?zxciuqLm90hO`u&Mw;0fPiM3g9T_YbThN0iggwF~2H+qX3QqRuw=f zV2}Vu0UX7A?F5eEA0iapVs=?oTh0J#X*c<3cJAuPvz}Fv_YyYUU%OZ+CK9%sR9U~y z!HuGPcqplvYV@zoPZV{EQRE(6S>p34LAZ+-QJxIji!WsHGC~FCxQs@Ym0GS@io2ND z&(Z^bQL*SMqZ7a7htEuey--lrWi7tbqbH^14mv-|W#J)b-;!TpcQ~hka*8Uv%ME_2 z(mHocFPYLVrRSA6KAn5d{5VVQGBn`FTvV*oDl)x?uC{Jt=gR8;c?=D>+yAZ%)c)M* znl-?;+aqJZuF!xl7@UxS+PihSEJ5JT^)d#We<^*}BQ>fF)IKa#;f0jNUza}MO_*wH zgi~Cz4AcNkVDJH&!1)4b0%!u%1YQ^5pAM`p08Ic*6o5B@Svz21=UaC3k!^?t3l!at z9X@zU4p0WBq+r$qS5iQe|96^1_2IM1F7%TMFj{p;bfrdiOGfv3nKSMk`4}}-#IEql zGIVYrF`K5O*ZU@SF9Iz;t#hEK=0$3i-oAZEreHWPE_X(Jpr>oh1D<5e9T~TBea}VL zy|&sF$8Bw$6t%|-Lq}}V*6w9h2!^72-VRWisV4Z?F1+Z&ZJTiETR)m!s(!q5mYibS zXkAh;I)(}rz3FT?-8nfmJvq>sdc}79qV$Mh;Ue)WRK%ONK@(T#p*o?#!uFN|r%v$; z@{m}^{VIb{NbHEfn#HC));F`C&m9~)KVAS6AHcdv*G2*1Cr5s&X*W23} z`J&u`H2@mAq$)J&R5Up2|sSv25>4=q)T z6bsuPh0e4_i7gmgZ^Wtd(hjhOWnErxi3%^d{q}$174+$dtLnl-96eiu+n)}=Z{yxE zC`4jWv}8PtW(5@<<+u8D8K+o6ITK}{hm39V6OU9|BQ0%}zP%;DM|W{I>=WUdw*G&`_x=!;cuC#Sc27YK~_y7O^ literal 0 HcmV?d00001 diff --git a/docs/pytorch/assets/img3.png b/docs/pytorch/assets/img3.png new file mode 100644 index 0000000000000000000000000000000000000000..39c516bfb762c35048b779f6b1ba12e02496f195 GIT binary patch literal 17862 zcmeIZRahL&^Dqhof(2MSxP{;vba7wY-66Qc;_enSxVyW%2M8p%dvJHxZ}a}%zdYw$ zovU;6%`-jS(_J#%RozwHwHvM=CxL>5j|2q;g(3+ORf2+oo`RGs5Z*(c;{ps{prAeo zT8M}!NQ#J%DmdDiT3DMvL4m>()!{Xif8u0m$Hxkq=Sm}XB9gQFB>=SciG);yF-Q|W zz7rbI!@<^RFY!as#a29K<1sQ*v|hj!!%A?jHBevs1PxwQ@8VqNbbWmqPkqbacHLd$ zfhx(^3=`!pp@nMg^Ky`hCGqxBV$Ga+hagBm4a2k1^Jh0aMN!cZD(TPd+0H71A87f= zMXA#F_H90IaBI&2S}>1PZfnQEiH!dPl-^J5@GEFiDfelM2XdMaZ1XTfKLAtG<_GSo zgiSW?HKzLTNJF6&7{36FP>L}q!3Wyy1GtqBCAlkTwSloz0GJ%%&;=e&S0uDem9Wa3 zCn$KZK}OQPV}*EjZ@1Q9%nxS(l{ea#df*WNkK{z& z7joYK79u4uY7w3g7UJhYH6jYby}E)i3bk$uD<1gpAbd3SgMJs_>ravNkof39{WIDO z^`LufyFT=qWhd`fi)a`PARQY4gPc??WPzWFhXXups-yUs&S*iLK0I#F?!ChY{g2yJ z^Fv5eKm1ulZYf*c`>%d9eMMS87r8g^G1aNz70^IVridgNgKHnbW!(yOzIWg1583P6 zdV(L`i@rGzzNi%el88R|uw2%1i6vmNe~n2$cM2%zKpYrzhpQDaz6kPaA>Da&YoivR zl_RFoOZb5p9zPUV4Q1#D_Y6%0GLU_zEQkYWxS74{<#CrM-GE{lhVp8m+xc}KCaM_& z&@&c7zsDr!A11)tqdM9NgcaS4FHbfGmrDx2CjB)(h++++2Xh$V z^-tzII^4nZcjXOl>sS59t|5r2HDuzf$8lAx){-AcA29>N{Y9;G(C!BLE|}eJf@jgq zHctDm?rr7Wq|B*!Kd*cOW1Pz1{K9?1GU3_}dh{1*1wFCgw!qDRdtdCCN>CFW75`7(M`)*EKguupruU-B8xKbx3yS|-`l??yJ5G^klb|cEQqcO!B5L2 zC{Q=k<@|iQ`J(H)Nz0cphWmF=)!zdH10}kh$f%&XHr_Y;Zn=&{KnoB>K&BM=(oaDV69C--1QrQvbcrB; zHwtLbM~uKvWTl|@o2R(szBu5JJO(<3-uld=Rzsoa#g33Irn)c@RypPTEO5%u#JEG3M_&%t9e~CNFOaUIbdMvKBvj%rO=<9N2nEN3qh@3ub8<=y3N}grC9xHx zDk;?pMK#)gg8JBl&5*KxmVI@qgXfO_MIlecL}f?C8CMW6 zMkckYoE46e=BRAm*%Wc;dxpDI-m;(E0rSCL&i6n{;1k&P&}*vj=>5LwQPoW4ymwCM z(c-??k?izxfvZ}zUBV#sxZ;W;Wj<@uq^OWIn=Ecwbix#PsEpRCxa$W z^Mmut#a$O@7FkY^jxCRg7p)dU7ss2cn|-+ltz|A#5{<`V9(Ff2W4CHn|Ey`;)RGzR z#n|9KElm!_9clb=>kmJ~+GiUo?LA1Wul%y^{{6}ZH%+=_lswrqxw^`>%4l9|UXSxj zvr4m6Giu0tiip|-hu+MM(>?y7my{<7H1!y-m_TA*_a%2z&(qsuJTvZS0o$h*MkY+A z=y|njIlDI3vOAnJITB-%X!*o(V|``)tTWVgGZ`yW94EA^!tB88g|o1A&m?#&oTCJR zU6O76&2^j{VgQK(@j3IvCuWM8&ou(Wl;+|Sr3S+G=I0|Ds7=u6qgAGNp)J9`kSq{Y zk)lu-(7XvvF+ZbPAxZR$^r?rg$1ukL#AhS|KrY*d!|;kpipu%X;+XmQd31T%gWsZ3 z@tpXRaCo>~T#wtix?6S}?lvnoDKCB_(jw9z&LW;+zF?|Tx1gz4AoBb5=N%a!zorSR*=4SZu%*%A=?qf_NDkYK zG%8aqb#C0mVctRi+{@j{t<#I?mG-qBTz##2z=x-YSN_H0i#O{i>k}-!Q0s=On*H2v z#oc1uB2PWM6Nc064()c)E(g+V@M2#QO^7m5aYLlF*pIxiQex$Kxf9&b2-j^gcg!pN zOZJTzukrz@%gAj-e^()~-cSp~X;kboY$Rmu;$<0q))!4hgGswD=Xfi4zrRG8jio=B z<2cRDEe;x=HoO!t#VIrU%c|kK4bQseQrj^z-41#)8KM7oM#g1p24ES zjf~{Mac5lW_^3~>vC=eP@iT;SUCNCzS&B$%RH~bPhq3eeXhDDe4cJ2Ot-0(tzmu*8 zP)AT-(K@JioTqJ66)SI33$3ow7SUI(+dWzOX~mSpoou5gp-H7V-^y?8-T%HGg^t6E zW}LoT^N)IMwNkgEyKm0QkU|owaRW!cN8s((u39Sb$f?;Z=EQOE{)oE7AJJ8 zXt&b00s^rf8*{xAfxkmPhFYSflT`5v^C&5W&d63DRgYJkS%WOi?LJso+g@4&9cNzm z$BYMuXrrx&^?d>$!VmBLE=C0}7cO^`ImWd*%iH>}mU>}k+aTK9kcqMK=jI?mloGyGLA1MJyW#nn&x z0=58ySM!h1M5N`(aK+IUXAsO zt@bV?USqUKhWNYYf1G$+d^-D?d>SlYDnFWq$5(#paLheNu;6*~5VW_-B+A5phy7N1 zYqld@o*epi`BL8K>*ae#cTwIuFTK9swfHdiU~?rn)6?(TddA<)^pNvpI4?0cbd`-E z!1u&^f_yOgSUsno-WBZAE|9{*MZFouU8fKIL5+=%$ZwtpI_ERAVBp@D5!inZm5(VuQ04HM9q*bMTCilq^Ybd6fLBT00j$; z4+RG)K|>NhG{JwD#i6O8-u+V#0|gao0R{Unj2tBYd&NT1U!4Es@8UzC;301qkmQyF z^Iy==Q#tScs|-~EX@e3{7Lk;Mk|A^~f zV7RsKT`(oUWT{7dB@{Cj^tKq<+Fq|$$}6AV*1{0|pM`vyFW zaDk5>l_cDMeGY{2yZ_7h|DU05gJyES{F*?iEH0HmjUL<*Zsp7qZ@1Fzn=O~gEsc)e zT#+Z)@{fW92m;6Uv{jFVl}?H5nF^=exlQ4!RdFjEjrA9v66xUWdWi(PNF=u&aK+$e-Vso@zI zS45n@Do45i(J6Ks7MVRcBF>C>zRYiF%_lNungyck{5gWlK%vaxH(iD zu_JXcC4=zN=1M~Jb|nfHU{yP))@Z`wOC*IR0y-w35wBjWWMa25MZlRf@DvP_M>`t+ApO^V!AS_PeC z1{b=s?X;5EV%rw{(rv$dRo-alkY7(l=@zx!sm|fgN+7s+)$>-|iZKGq=X(g+VR_cY zQI_{iok+8}x1J2Od>lEqccW#k$4rW({=}w9xzT2A?Jk}+XH{MhBIeXH<)}qp0^VV# zkul#zaZeCPcEa;yHrZmcgba({3|cmuZ~arqnUL(^e6IM7rE+=nDk6$1X`yJXtA#=K zsT3u?pzC9{WLyA8l~%!K=(sLu+H?%%tBa^Q6d;A$gfKS{_M$*$m5b0h+v!O7=uXk5Ov64=vRubtEi=#{74e5xM4ky<)S=N% zE1*=e76&RWnpo;fil{f)6kDnCWRHBia6wH2dYYq&Nbe6+}!Gp>pZO{!FZp*&}sE`TNh9iO?w$haxuLo>!ecF z5}*5FL;s6GM;I!(bEq-%=GR2CFXYbZ@`ETOqIOtbvcDr#p9;bg4>d3uMJ<&QUI7+f zwqftRmI5O>pQ5L}oiLK9RoMDQ07{9FxZOV>3{b*MoFm0qe)8f;Loel#6)rdhM7+Cu zA)8VnNyyRDUB>)CNc!aN&}C7M>M|3M2+H>4pfm&?ZI{-A_fCnPo?3vCX`g6JhEwG5 z+%p7xvusyy=`-mv_YwQe=@jD!z?%a!k9X_7x?(YoOcwE<5*VyZfv$hZb@wc&eBmA> zzXRC|r4r+-^g4x-8TIqv(TE?*=Z>zklH_y3XK;BjDWyPoQ-xa$CWr^y2g)5ok9O7T z^Rz;TD&wUd9v4$4ZC*QN5xX>T9?H!fKPCxQ)y{Saqd7in-&TO%PWjffT(fF^k16-6 zb$vLP!l>bPeI;LQai3-_1(Z^<-4d{pH;P>B52%y>&R}^byU{ocwwR_iaXnotL|uKH zSbKp#83_yr8Gf57(p09sbRnDN`a^kc%;)&(I6Xa$vr0Z#kIBH- zF5~&EnUBqViN1X&yI+VgPKi!=y?UY@8If%5^ssRT_ zkRb48%cjO_aoWj{B)P5Yy+=Umr7^wORbILbRJbEbSCbYtekG)>3Ur%*I~j}I@RHxd371+ z_BWi{PpZ;u7f-Ze&(WHtQ|?@1cT*Quddj~g;T7E~4sz0V z?jbIc{}nbO{>L=t?`*wGXd|?=*2qI+yhK;#k#NZ zJV8g1+Re7g({n@zOiWFx2@f@}H~W|73DwScG0LXDTu6&m#`&bJk{83UK518&qw_!$ zO#gYlLN`7bz0v1}Z|^u=>cUnnQwYV0lhvLq zRW>`m%S8r~F+T=e;KB|l7kgkQ?sh7XlrukHGhQ(DIrKeyfFyvTh#3_gCzd1AW5KuKW1~zqmsT8{xG{uCUbfFG ziP&X#U$9>Bqq9|69K!7mhwb88eyJ*34;{|0Pi)(yIf%;r2hE-EDH9?k=jm{ZfZ`5v z>~9Ius%4U#cH$*ocmzJM1_fLukJf7vE&8iWFG3Dw+VyFcvCKcTOI_ZDUaG{=zs{7Y zNwS-^be{DHX*F3g5b-;HlW6BI1)e>4J^b*ZrEEen81+k1*B=}D$|~b5ZlH)VYO_sH z&|*J_YZ+vJK7qbwyz-Mjt&v>GZWJCxpVkMp$>rv3R@KB}a|W*=SRycXRK2R?uGy0G zr57{*Wc(3K!Nu)wFjMcbs*cp2u~-6WT`i^LX-Zc4T z#!i`H$$(&xnJ-J$xx*{gFdwn~T9)KcdCe+=+_>bmmfHgI(pvp0)Hj z-kQvQa!IH>*kC`-3M_3ASg3e*7|RIvSw@OOq&*m4Jdo2tcdXjB+pHvd@!@+E&^wF3{P=h z?`0Ou6i*P2MdUf-Tv3W&h>Hd@lK83s$31@PCQ_hP{E@)o)8_eP1Sokt`zK!u7zs}{ z2_B7Tw~~>s1*W^QkEJgqoG%sU6FR+U*&UN!yHDm}Xrm!T8lA0`=>2wWM@Xvn4R{%R zYJsnh*@GtVA`F>d_;Ax<#P9L%`i#mkn&^eTiMCzs>CV(X6)UnCRa!g`kE^7O?Qw}|yCY6H)C(JA6k@%cmM zZ0~_mtlx6f8PW>H&(gcsh4`LO>W0uEw%*X4oI(lHydtcDO?&&6O=FvQgXGNh*sn`$ z`hC!Q49ERD3f|Sl?=@=%pL>N1+MtJca&Vup?bg<7uc%BW3~p`R-fZ-jK^u4ZL*`Qr znx@tAo$j9^y_?jlb&Km%Ez<`bu)|98XL#TFu`hg;d=Mb<1UFT_{7!35DGFJ}$EAqF zN1Q9+k>35*@O2D~T_6!U_++}b@a`LH5cmn~cL28c7|S10{Q8bC8H@wE6loH}IC&4#@9p|CdoHI-meP|dWu(r?f$%!jC_ynkT zH*bRibXfOlPJO*Q(hDGqaax@WXMb&}QWW@QI4CmI>u_A-KOzXopDlC7U$jGvTgt8B zMJH$XAB+I~{t!WIzm-EE3Xn<8dPx+J$84mf)(FFjnyG>Ieh9ru>n}GTI&AiSGJhGg zvzVTk0Sp&6}Iq#3mTG@X0qu0Jj*k88m5;Bji7>EEFHd^f#a}a6c=Ha4V zB!K?#qfxw*H^Tf8&z-K2mTCMVkM58C7`kZ zrOj@AtDom)41;wbhAlIZTB4I};~JT`92ZPnQrD-*$w{bkDi%M2gH z6c*YF#qkiL52>$$tSD2^A3A)QJk#^Ha*ZEvihf%*92`q)VA32aT+}5pG@JE?As)Q} zK6)NFE0HNh)!itD&A<}Sk&CJ9F)#wT=VbS4giv|$G;cnjb83YrFoeM0p5)dAO-|jt z;l=Os0!a)@YQa@DqNYuzh{&t+$Q>hdp_GgE`J{%x5$Y#*ErMlFW-&x7nUDAuEOKUt z%!IWr;6IyUpC^uH8eM&xMUgrroaFH&7|hwKYW)FO1P*k4a@H(D1^J8dcp8EgYv_09 zSuHV&9ZfxAElEqWw94r?vNM$8mMH1X`Nl(U6U`?QkdKA68Jzda7pOo~EaIRbWCrMT(&ZL-^VT@bjf2`-Pbch-> z6i7(8_DsZ~5|{H*D$PjDNQVJR%Q3<{rVbw8-?FK>C_1Js6w(!r78|S zo0W9Rqbts_oo>6ef0=p~MyZ3P`n-r)XA9%wm^N>_kv0Vm*I+Q?lI});5p%7(0?Wyd@hH2-J*#* zyywzNL_5VpG}t(wY_RR;L}-!*18BtezPLpq!FHp2Js{*O=l57Zc|8k5#6B=PC_b?~ zup#u}uE79_NcjLj5%&mLY=(%UU-v_N^asq*Z8PFnF-kR4w=~6#^BI<16k@vkSvZ7= zs#T8AX`U|idSk^QPDR2BI!%0m=@kC2=@M?Iuj)^j$roMp@}Gd=SmQ%sSQN^`X(;*^ zOY2<9tUuo0+%oj5@Kz}M=mbhe6n`XR?mL*DZkh+^`8K!`IA3)O(BY01Tp#cLZZSh4 zcs0yLWLbWN!N6Oo#`VnpSy16uN$>gQ83Z(U3=#T0#7@9v&93t?uWTH3 z_%=^<;z*p~3t-e<>N@xKD#Z_@%s1t=nQY!k{aEdj8i-@`8#6{TvEwuR+A^cTjVJpx zvP0iM-V7w$iy%wWqh`p=`ejB~U@v9K*gtXud#O?z)TuuAb+P$<9vMS&yL$ZWxe} zkih_-$Y^EV=9AK|T{iPI@m9E*1N$HconY3cbw<A-Lg`l>qjah-2%|f^Q+K<0qVY}ccgcaLeze>hZb-@^c zzlX9XQ+BvSG*W&ur>esYs^oE*!sEd4_Z_Pdzd4SYA9nb@NakOO32l_gVOZeE4Ap7B z>8Hr3gJe3Ml!{*`^8R|8%+#vxFl3r!%N)jCXAGytw>C!CORK^qiZ+OWUJIg?7xXPB zfsyP8;stcwI`W2lWI&A=efb zLiZ{=?uaB)RYS{B%%jjO6iWJ|@Q)j8#{Ch&T!lxRwzPH%?Y_(9DW)Stwg6k)k zi8|J--(-!*@n5e$^Ggf42?0%(B4o3v9Y|zqw|s?jmGdz=KAY6GrdIzx|2$DT^`FHL z-1>L%16GeoY$<&Z-&lAJs1&_7;0D@dE;o`NI}IMA3?mbL35x?cL;*u=)Y=!8IKA80 zR5EPn*uwid4Jg*?f}_iE^uw^+d%<5|oACF!GPynZBc6m`z8MH0#ib>X4pV;=IIh&E zbTy}rBI57q&5!Id<2CX?d7}FLEq=qI$nH)Kbqe>zps` zLS9Ev0hY4!w_lY==tzEhnoD1$@qN?nAXD1!XCTlK%|99)KbTBa%4a(N1QyG@Qi6nJUL;WB9i-;z z1tvJ1Ml>1+ziIkC8rsZ0*6ZB`C_8F&;QyGv?DY)Pkn+}8u?>GY$jQC_T~x_or+TDb z7P82tiVxzQq!~$Ip*RcuR(;EM$e`eX6*w7tF&#MQ+5`-ZIWj$NAf`}ZnBZ|9ByJ_Q!nOBsWzCYw1eyNZ znV&2$#L~p2_XUk3#JSkaPz!H23#F4@)oQ(wmP^hPbLNO_mRmZ0B8!`laxP!*%KD5^ zDnCJe$$Xt*aOHZ;U#71Heh^LZPu4#@mxK1gnHZ^=02Mrz1IxzjwY+4YeUfSm>DyrI zl@YTds*{p3qJHHarLt72ht0rkl1_`?SY2otzGweYS}d>giKR0bDH;j9yTa{a*zCt) zvuKJh4+H1F5C%0TVVGlL)ljXJk8_gOlEH@Go%--ADyn5*h9?!>LWYm0idQ0RMpn`| zjT4!N5d?{0FdaO7{`hn4Zv^AQd~97Y)s8z>C7@?Z42lG&2mU>&eL?NphGAni){zg~ zWw(s`;ujoP{motu9JAuX!=mSlzoCk3gDcuNK&iUDl_aHo49r``>qXR;YkR~ zn8!JL=GZfXY(-%tEGjhFRF9!Wp=8($5*{KhEUdpD1(ZmGFx(a0(GIxd1}3RMBKk#0 zBAQrTyO3Ac7tL+ZsJ74OtSkS6k3nou%wQxe@m~_gwD|wV!!`ks_?SaU&5r876)-V= z0E-P4?5KZZWDW=r1TK|@%d&qf{3Qg#*v?ZT#Qzg-^XCeJAP~}8yQ}_N(L+Hhiti>P zmj7=chfo-TfQsGJVM!=Xiuc4jrWqtWwOuq^tX!)cB%yy1-d{3YEgZb3_E}!pa(NlaJYOn ztPb~c#J1hwX_kCku?-2H?UGkXAx3&4{K z@4~7`xL|OJY%f62T4*DY)-pmycJEY|SbI%sZ3|I|v@Kkv(-bgkNH#@PU?jx>>n}(S zP|D*`tZTq(HG!Rss`po6XoH20KqvS9OcC6%o+^=#OXD~lONH+YL1}aQVP&xj9-}gX9ao~I=RWag4oM`Ha{13)o1&0j3UDJiAbjI;=2lw2~x+ zy6d%SZ~-PPJ^Fbt==&$xA{T?zR_{@z>Z>S&(jmwRBPG>*fFQjfsRn+I29$G#7o1%< zn|nk;0;Q*I@N8*!w9>%uBjP_e^fww|Gx?hdpa4O8G5|e_f_*M+>nsA~K!qk-Ru->* zw*c@8Z^)wq^t?oC^Es(gGjsot^+CQb+UXMTz^YHJxEw% z&F2cmBP+(;F?i~RfO8*m>aow9ygFFTIZZSu|LK%$1F}U|xt1B(b6ZB@$-Wa5zBfkV ztkS3aNX9Llb68uyCa8}b=izT&HIboG?$@ z;d!42U8Yq_*l4x-4P$`wZF}iLKwbfIY_@;=u<^K^$aB2i~iDkX`|2J4iwl2#b(8L2v)&{`ri!-HEcf zReG4R`@w7Y_c1RI&r_y}FXst&_KJTYeb5GmKN?+^gfTzCc02>YY{mLF+m();I&UeK zm3kADLh0n_q-R%b+4KkJyw%%f6RGK$i~Q{ydK_dTZkPBm{SJ`6Os1so%T014@fWI? zVovGA*n@W0v&lPwH{qx5r#P46CAyf-l1FX2ec^%PIozp{x~6vIxu(wyX>2*DHd@2q zLSu+0Px;{Ye?44nKRK$=KB-xFbl#VGoX)bQzdWK5h_SGsWh@m8{KBOaWk~19xvez* zLDq2x^?0$TZ*yKOnVIHdtwS07IuU0)(<-6w@pzrudnz`&t`N?JaoXXhR4gZEMS&Bi ze`^gn-Jm?=Hu$L9g<;Z3zIS&(EY4o$LmqR3$D46``Zksp1G=PwB4cl237Q%1w~%5g zh}IjBLl!Lhbe_ zAs87ukH__F;lC48%b4WE zhYUJ3P`cp}kXR^D_EC)jE~~gNN9=305ggF-m7LvTwf7Xy-#^&mBIAJ7u4h8A!RgG0 zfXDMjJ*wr!@&4(1b~e*$m$&?fi}f11l|%O|sdkD2$_U(i*O3m&@;Ym=0>cIEa{a*% zc;-(biQ8LSB35&Dc`TU47Mk}m(6FQhgW@5v`0iY?trP`(ZQueRd%*r}CeKWT4K>Q? zeBhc#J(dY5JF7}ULir?wVW&~^S&Pkv+PYklY?k(Ve7d;7?5&yY=m-vcaCv3O!Jx12 z(~omvE|_hra(cp=Kw~U!(YNQtZ^-)MaZl}FmnS_0cszf(3)8 ze=CrG!w7Cf%Z#^UgecdgLO7wC46V55sE zl*(#CP8>fIz@bBci7BEgU?YkWVD-~u-)2FrKAp`Qr&6b0FRQA$8#qL_EdPsXu2MJ9 z%UgerQR41&y?EF;{on{LGJIJHWkpU*pGH_BCc8#Yxsm0&iv&ow74`18HFVr?c)0fZ zcl0W2v;a*YRM5=A7(j^vt(e9&s6oDYOhr44Vmvhq_yT{?hs&@Z+~8Ynf3)&Fsrs$M zAhV7z0jhUH_C&S(aN?(gU88d21&}u zZgHogAtZ|!ACdiK5}&$Vi?}JfgaRh2sKlSlWCa2wyr1@XTj5QZ?9ZB|qS|I!){3#_ zYAg9L-L0p-En<;%VcAa{+S%2roK@wj6r092)ZY!QF;m9?>Ghe@TcU^LG7?U#&UC&E zVMu`3*;V=I^3|t0nAr>@ZrJoqpiaZV(Vo@R*F+SH<)uARCZ|1@S4tflueJ}Zd z4gnH@IDIHLApjarJ+*qN;yO@#rmg++;bQs_ZI zOZV;pFOn1dv!jwaPwr>X_uQDEWkHyasL{CAq)h1RvwMiT0+n6}rQ&_T@}04lqi+Lt z-8{TUzZRX0KHgQa>obAdZ;5kyjnG4V!eTsKkJRatZq=*Bl*>_dt@RW-`*@~{8_Oiq z59;@(Hh1VwlEy{yFh7si>^gkCTtL6B`gpigV?54}|4Gk}zD&z}xsImMVol?7dT7w( z!56az`p|4-Q~5%pYSn^X0x{s)Yx2=r`-VZ1Pga<#QlPlc`EtCXOfUf2gF^dE^ z-o?k-Y$hw9)N%55?>jv_*ITo-Uo_WRwdw;#MeQ?rx+$ZcrtD-pd>Lg>S2vftV`6m4 zzY>$PE_Vnw;g|^H>teg~Uv=>XEK4fPLeI0n8Xv05{{$lKm>-8NaM(_lmj8+T7-^0^ zNx<)L=(>ohKF*$gTM#=}rI+~pI9CA{n9>-m7;)RP_`f{y;*Z~Iw)GbUad-Re1hOBg?CJi)NhY#!4Gju$j^1V z$u2XEM~)O7ruj^7TRCk@hDXId_ce0)YLLJ4rO}Xg7`YsDq+U+spbLXs2N??Jf6-46 zx}CZnjjb9{U&8}w35oVYGYkDsV?Gz8cP$F_OUO^`42Zjjxvw`Z1oK_tN=#I=);cvw zYJ!%V%!O?R1W-GwFMoa)Ft!uF<4gvS*&w(c7aSN}OkU}=js2oflt(JnBD2l@jB=z! z#Br{}?#nF#;{I#en*L+j4uTurCgQHYr%*UzWraQPzf4qq1|QRes)v^u_U-kT2)DX! zb3WnUq?Iksfs13+W-1i%>O<0|Qq{X)*TCgT?#xk@n(MMD=EtktG$ zxO6K#%FUOsm`*xz{HVFwl#f^n94g5i&xUWZi|sF1BQM}h>&Zu|3JF?cjgJ*BTg`vX zdPvdNe29;Op#*ZjDQ_~G zU!16#L$+sYI4T)R2kew zTz<{ddVwdG%e?FkidSj^Ds5D3I@8SgeZ|F;1C>Kd2 zs2wPg&BkN}gfL@4GXkW|y)=<@DsJQVzxT^IicWclID#oRpb<(DTM%gqbfR1;)0OG| z5XZh7&lf`q{Q%hy48FX9Z|Wfp>2Na9b`e~cO7d$?E53)5IsV+HEXnUMDZsj z&)fhV2^+cgS}zGh8Pv+(ahG`43zk;CwO`K|p{EGg}Vd$WV37cu9AL;Jf?<@ybos5WFkSGcrN zn%?#G)e6a{QxY1G&2A`Trhtv{*{PGYvH=W{Uu+sw`h@U^D&9mnooe-?U1;OxY?IB8 ziQK~={@UOKy5U!~Py}^{V+wU-UWwsliiVyp+b2|F!+UOS$lM_+lTT;XLW*}mdD-)+ z4U^F3OJ5KkaT+)lc@RjXJ1GAeR!>Fss>yn)Xhl|HiH*Ndo}Tncud%YJntl357?qwN zuT&r3zEWOsg)7ZZZBMyOehNZXYrhy>++~W^RBlYAu6f=EBt?#HD0JggFb+LpTM5*CLMv>J@u zic#G~8>;p|NL8-v7iz)9bAl zyHH^AY?ovrtky>_=fljeS{m=+^SF+!3eRf0m1fB5tiMkjJW1{s2-RrK5VL)0b>Um8 zsb0`4JzgDN93Nlg8Jc9VbBE?G6Nsj=fvG~ES@+NOWgJOt_6sXDqV*v=91&}CzDk=W zxJlakC=5A{h(m-~lkCyf6I_$TZFErur6$<9_>O$<4vU@AU}=mE&ST6hY;WFrHGFU9 z9!*woF8~emGuE%U!!Mrf5z9f^G$_IU>(4{!4utS`OtcZLZ_n_2+d}ZZ9Xs41u|RV{ z!uQa2pEnqoNbO8B;*V_AK&h&Ozchz0f06T=^4a5MHuDYvl#esC5i8X+Ci+a7-AshLb7ap<6!;JV$q{4PjP8h02tdp_RxS73Fc?8-}sPs zI4gXt$(`FDj?c8MD>$bV;Q$a8nKw7u!*e;+WT1K7@yATj(-j|f(t_L84Qd28PJhmT zpT*}-U%6fV_J?WDx^QRsf1*QhZt~1N3a4!oE2<#C@(PkV$z7Iy3ULlz7Jf8A*JKoM zmE8BAQWQm^ik%QX{9gS*aj|GhzuII^W0I~js~}ti^c{Sa<46kdh2bweLdW)!@AwuJ zIT$D%oI?D~*N_Vn77Pn9gN0#97W7@PMdWSgM4G6T6S~|M;$#Jq^|}gl0Q+pgHkoZ@ z&F2}NrRzF+?q76zGFX^VaG=v^*-1(_9ng;D$W;KaE2KR}S{-R_LWwa%yUzL48AveU!hXI!Cw+Hcxm$nIi6*29f-rY}_J|2? zz+nQMic|{7AGWEbl4N`wt$ra_p);bvsKIpn__6ngp>74xE$QLn8=sBYsP^oOj!-HE zYXN|Q{(CbEL0noG92p@fSV71wNC`!z8cvD=Lkjix{tWT?WP1W=5d5J2-X#!O)1nvr z3)=(x*K%ku!~H`D3OW%gNQE+h?O)gwQi!pJMlSjfA<`TSzt}R-*!Ta!VnQf=Ht5Cr zhba`S3dF)DB24`YYY+h04#U!?1pLF)&rb?UxC}!G|6f=c0A$hjgH!&u0}6hl5K5Re21x&QfEMzJ|F^UMSF-DqtpeSGk0V#@rbfotp2-2H$As{vM9$Kg(A_&q;C`#`^dM~2VL2Bqx zDFFh6n$QC0;(ngHy=R^O&$qreYhB6CT)WSn*?VSwJ5ozSiJXj(z%s!HGAFSLx+mJiWz~c#lZ#s~OeZm(4}N_pz%w9o7x9PXn+oX<~Y$6S-QuUBz|`M zT8T{6*0vx>{)NRX&vOSqpGAA?lm}tY5jo!%QEr5?IZPUh%WfVIq^}d0_1=x#BV;8%k3fX>U_6MY@ZQ zoez7SV&Y-UG*}D5Qk?=(QAKgv)cV^{RZsrm0J&)^xsTL z^GXPN_Sn4CX61Y}rN|W-_uYlW?Km4bshjeL=7BaZ zD#~U=d7wN=X0QT%AZ) zmg+{$!33^T!dH{&C&7~*f`A6j<$>+D3P!Ov%q-<@9^QT^-9tx%VnZ*7UFr#tCpjgWD!#~TJbNOuYdEI3J4m~BG%bUU*w{0tJFaG_yC?BF zvs=TG`m?2>)JIFU&m?7cC3wCum!cyby~zEe;=U!2dtE-?7CE)bPaE(ka$EH}e2D2s zQL7+Ff`y(5nsW8@3=|6dq7Kotz1E1$U5lZJ86tSz&by01!0e_WL?ib2A?`yW0(e4L zSeR0~#{)J((VrysL5q-~cZ4$Z?+9MnEvbAZcBe=&yRyyw=@KNJhwpMh2iZ{f&4x>w zKd)Vqz4G?58`D*_&=K1&UOXgY9cp}(cfxckFO?B5n+r^lc!#w9WGW)yifDGRHOd$+9i_fNM+u6uGA zzvfX$`Gg+f@T4@3$Wr9Z6&cakCl(JTRJ%F$CHjd(bjat*=Ug2#{GyM0KK{`7*8ER1wTCu0ZEvL961j!Dsd;lY()26g zt;jspDi+_khsyVLq>EE)LTVyB<2~Pxs~>;-SY)2}v*Dip38wYqIgOxv+`E!9O(jX0zMhQGG|v5)=O{P02{JI6otQuCPV>#S1}g zPlIUvJCBF-UcwtdJE6aszs0Jos|c%b<=Gx6Z(Zb1xC~*x)gO9i$sr{8aymfkla$4!aJ!u2`P?vwY2@GW$<`I?F{v z`sG?qFE+L4wQ`H!>qWij*E1=pGug~Bjuls)e<~%LS=gopg&orz@X#s>O#D;kuHQcnYxVy?@b$aC|8=#? zj5$57KCZ*={fKfd%2QoDAxj>l>GBCjho&Q?Q;n{tt)~eR{wx?NlIBu1ycELzEzu&~ zx#@e&bPaN#hilHU!07$7sYjJqm*iML=8^9njw+cdk}8U8o!gJgi)%XhS+YaczuxAcSp6|)FjKH*rS!7zdtNi&i`64Jox;;5*6!2donZfJq~U7 z%cm=H{m!ajcX8)hQgyii(l=qxi8@`iq5oluO$xlix#AVV7-1$NQ2(M{rJmvq3G=(p ziEf>lKetfQ-T2fKB}#+XfY>mGAm15Z8~-ir292#)jEwV10}taD!2c zVVm#ABR=Lz)=HTk7CXfs#pX|4?Y8@VQq&Q``YUX35e?x3WZ{s?v4$XUGrQ`l|H)bj1D&g%( zja3adWp<55p%M32zrIE#?6WSbiAx(<^V{xzdhyi$)%w;rZ^mvuaQHzTTP?jTud;fS&-Azn}l)GM8NAPd$Ct$(1tPblkK=HL1rf z59~6>Qo)Ka*+lsCmt^)gx@3hlQI5~P=ZvTG)VvuYrIypgu~h>O5B%^c03zR6&qmGk zpobcYD`QMb#BQs?H_>-`FEAU!7c_BNf9)2vAh)2QD8vH6E7v}Lhm$%gN`lIlXQt&I zh|6%U?kl+W4GU7mhb2XYUZr29ookt=bss$iLY#{?a6ye`-4V+${Tl{X4Caj2s@)MB zwR+D=oAe{#6(;f^-Kv$%nO+CpWU&+{GbJN7BSfRLBeaX8nw(QOfPI*&-RPIWXShzg zyYlkM`s@Pt18;K9Q({%IGBO|heol7& z2pf#J7hz8cW2}&TDxsqjF|H0r!-vZ-j?eAwT&{y1op&9@-N(;Xhb+H$bHq3>fC57S z=0|oWJYyr(h}1a991@+MkZe3!NZYY>u2HD3$t0KZ|Ml``qW~%mrD?X^@hFjpb?(yM)DT)G1PRVdq`5PkxZfrLR9cL(3XHtT=8 z%x^tkTWuL^p7&rw4V5+%Lu8OJ|7_RxrI7Wos)%FC(zD7lOHiQ$YDeiT)|jzdx()Gd z({JYyrZ;6PT(elSKZ{1Hbjxi+Y?5xu|KR8~YMxhtR~mQsyclb{tXi59alVT$tqlqY z!g21Db|O@ft8LRqlSfW_vf~|HkVcGjJMYoQ6AOgW_wK#yTQX87lAG7o`j6q0AXr;? zV6#lBgy`enZ^f!WgxB>2rRakZ5`-UF31!1j0)3Z{jx_ae9Hr^K=?MC=E09yxa7pXh zIWqU;CsLi1;H185k$T)fnULunnQVrHY-Wby`nHj}cSR!ALrGG{t7qxSFp@L%xNNbY zw117~gYeQulb#(#`Abo}4G9fJiF_0nz9%(|lztW0? zj|qtWpkE>&hyW8@{%;-);P>M50eCLT{O6Y_{tW>s@b4Dz_laG_5vxm5k)Wbh=hy&>hGVepCKeBi_NIf)E z*J6@)akpj?=HcVvdnirD#Ka`&Ze=5`qoDZT;=n(thjyNxuHwAB5D0_^BEaL~Zp-^b zOiYZIkDr&HpBu=*?cwX}Y2m}|?7{q>O8%@z!P>*p9qj4}c5!C9sMq3^i_L zKl1;--u&I-KS~<@T~h3c=s!#TtTrO70D|Kgc6*$~M~YJiRR!3vr$fL~yd{qd;;z8?e6#V_zkB)ud}0g@6H z$_jEXd?1v1w__x7KVk$ba zvQHjxye&lek|^BilqDg)S$!)S)HNxJ4`aFZJKD2I3z`e%2jLEolT4MPcS+J{ z-UknY2`*h>`uppHYDmE4e-TNelk4_*?-%9KEK42 zy)jqoA%l9|j|%U=m!~VgFVV};JwHanb@ErFcJY)l1xq@Q9XkQ5NvsV+M1!Q9_Er(?>x)ug2zc6S))Qu z2&U?+1=#H_QNUQ$VC=K?K~4sHO9wCZ)Xx@mzc->5D!~bJdhLzPS+MYG&cu|GB+kgT zEY~k??yCE9QOM|Ax4fjz~!UrLJm&w-BrbNjjE8I4X$4c*ab=dp?v{kEZBaOev zMN{moA;`*q?Hjz6hlrFi#k3i6g5@;IX&5gz8l10l>!(V4;!x3?6?5%qcR4}3iy?7u zSWH3}3o_eH-)GrUp_4{os(Nu&8HZ)^fq!k_V59oik?V`ZK) z-nQwE%zdL0?ruGAI_tU4gK4RsGexFcL%clDT$|0UzVEmC=@U=0H(n9^<5c=+vmQBz z=WW{#pn1~J-h38UJmN6Z6m~IOWB=J&dDv^C;iwKBIHJ+iVWeQ%VzeF!<<0byWMYJ$ z2@Y(1p0N!|K5s~x`29^Yp2W*u*-clXZF|{ZdCgW=*Q8iPl6SbZ{n`sNl4Lhd=HeRZ z4>BeknWEH}t}K-kgygeTjtBCY)I*owGD7!9+y_RKSjG{oIkj|1PMPyl)i1a)H%oES z9z@u{I29 z%~Kw?hrb|A{-!CdZKHOdnV9T!5W>D`a5O2=zmx*7wsDTVfaa$! zN}mrYj?64(x6r^#4{?Ar`jW&<$*85}0#6P^rfot;&eKVIoj{x9FRPp1_cEANY|sob z31KrB%U^EpkBy$DaC*Gy?b=x(JdiLt+OePh@T_{^#pUFg0r-A#{(!u@8@u*ztD|2L z*%@t?*|0Oq$YTeai97lZ-a}HQp&9n}Oq4P$!H)CCNiFyVet|2RE7BFm(`>N}&N5AV zCAEFrvtDuBpnx@JUj3TN>sv@A4T*z05ZL5^UTbs6$#&p^m~Aie%sX}O3=|Z}WpXB0 zKjmW2}xlod!X^`hcgIB%Y4LY_knN zH88DF9zXwKy~BMqjIw7r<)xJ-rbQ#`?6sN@0*}Wb=QnE|=GbwntFEnuJl*F zs`Bkm*&QjF$N_yI)-}tT_2+hix*dZH`?OciT%H^mecrW1;jDk%juEkDUhMzCJ4jsx z6AAhyCD?7fNg8Cl7QX^fcz`mL!igXQ*(`|qtzg;R<0Qhq?GKaW&c{P{czSu<=hdaRpZD%eX`Nl?3ys;c}nm~(E8q3L?wBc&? z@ir%A?aS{Grp(ajTy$yPQ0W+W8`jDky~kPjaMT zgxjJWE!vt-vtRXOqn9l%oCc9z+bwvJ2*Mmy8MkepdRQ-g2-%u7W<_!PEKmm*@>h;i zQ4SCLKlQY~0$cFKK*6((g{#A__-dU;47V!GCg#iJUQINkCD3xY{um6`=0H(yVUqO3 ze7o(rT-D(OBJ*JGjAtaw%eAm#(qdt*cO zyDsqRAj^>y(>sWSS@l16uCS1as-pr=*LU!Iu^Q(4J&VjL|*d&{#G!fWz)vaD86@g*a@%cIL!*OU+tC~+ zHj#$wB+0a=8&85fYtOZE@`NM@2fC(9Cj1=;ys~Xq)Tr^;IaYcvZ6(kE$HA&k`<(mU z!PczD3ipR!maL*^QXTW+TEay}=oCJq>0{Wa-OoF$%PSW7Gn@6wCnv^M{(kt-k4eSq zSgbanUIwu_E9#rJ$A?Q#wuVKUiE$NxL>2U#GauN@u8eWSS%6uRBPs=U@1ACu_dF3>1Apb6B~W zaxFcyUFOsuW4agkIexY(ezm1HdBTdz1O4epjrGu^?Kks?F|uq_J~ld@c$nK0In?ig z9_}mp6qd0?v_(vG?f6?}Jq>iKL#N$}M%pJ>x1T{Yzxg;T%}l}JmlIhXMc-d++>`Jf z*jAH)VWlwWeYTaJDP`8@xV*X3myb@SB~vVKUg(BKfO|b1I)&wiyI%r8PKdAFHa&?n z)fuo|ToR=jGNWX!561FTp~*}pK}BroEN!Bb|acXo_6XHQD{Irwe( z(*{gB%=FQu8Jwu`_*!H^`+X;ZrRqJh0Da<>IFftq?k)N>&GyfA$}s8Kwg&npToSzQ z1jaFg*TI+g8t$G-g3R`5#V7jEE@uYPe1q;XCe^IE35u{-bVZxW`b5RA{cq#6bg-JE zO8RB2j7vLO`kr3P3dfo)3EA%C2Z!zIAvLhhrvRFe9q<2qcD}N78!Zim7LO466)cd?nCw3rUR1 zP%sZ%^co)V(tOw>Q?(rT10Jg^#uJB@<(q(;V0kM#vh&o9lCR=6T;C3o_o(*!4s zH18PNm*)v{gfXfB8qrBd2F9u$X2N7K3slH`^|prEyEeLjp0sJ zM}`dteag33D0diRz1LjN#U3i_+NST~URRf3 zaG3cC1Bwo=CSx!N@3`N@)-$kl2E9&u8}3p#_$oQejz!=h9hn+ zPeD<Ix_!DvEj&|igqq>j&#M%4*6JSk{Bi{6 z=F-wya3YXyfSNqpRL#o9>^s#{BY4T051>ERNQ-4u>D7O(k$>=PuMDx?hLmAjF;4bv zwdX;~W46`uXsQC%n$Xsoy>)V~+|$;Pgm89d?TV3_Us7$-vork9O|DGm4My-bA6{za zpd_bb!N3oH96fx{htZO| zYJcgjt(g62mTPc+n(MLyABnW&(=7iTmPq%qESbZPHKW%Lf8?mMA!XS+*{MR2!6@O% z{lGSb{$qmO?cjwilZ6PhLdY_6?)Ux#O{v8#cy00wSN}<+1#Fij?2=T`Z*IsjgXjG1 zaKkIo`;W%DN3*00NgbL&`tTWpx);6iMy;EU=ZJPBfA*6-S;fw<{Rf{-BI?@s>E^mC z9Ax3kPYh@20PX&CnxEfopU_y;H~feHJBfcNyVd{?q+D%T&RE zNDwT8dczGEMfZsNwE#==WKS_;lgfgR?IJ9K~C?xp?1%nCHd0ZxqG! zhp7V^Xr%pGihFym40$rro9eYkVu$ik`Gu$;v5wSKIdomE+Ry zW`Tc*hygU_r}uF4BX|2j`G) z%UB(a9~J7g9toj}cv4DL(L<$iB#W?%xKy8!jlvHBO_@Q#ATF!GtZU*zX$v>KypZMz zM;Vv;i>$w`@Qyg7R+V&z*Y2A!i!k(FPBu;`2Q~TXZkq`=!*c`d6g4Ii0r@;B71|UA zdteoDb+x5ir9(ueApMy;td--uW(QG}uv<{Ve9Ri=7B`!)^<6{O6P)5ObzSt71DpSQ zRduH&W*hSy^HEfYUa9n5C;bX$7c{lQLgKsZC@UEl?wISs<68{)K-C^r2J93Y}6$c z4ZHuKhs$|~SXWX_FcgL2j6`b)a56Mxg}?PwWLY`h@t?g(Co_=$57nkmcZvEs{6iHWfc^kE|$^ ztEI@ZP*R&*voAq_SNDf&Pf*t8)1TEji;eT+TN8CvOa8-(_v(y#^m&}AIlwXaW`apN zShGaGaRT8Y?nB?4Z!ZPOIz)4WvTg6@< zV$F|oehIG=BW=##nJyA0+9-{@9(!*-l>#+xv1>!=O#Nea5~xH&U<6D!Y6SI`&Vq)pOM-iex^-HuOP^X$fDJDvrUZFJnx2W|5`>%?1c~d zCR62g8pHK~L~*@vk+prAnH24@R{~n^ASL9R5=VeF;sb%Ol_DY_>QQFC6Kk>emJOve zx8;g;hubt5pFxLVW~t||;gNB;5a#-j{K=l{tMlByWF;a=ta^9=VR(fw7?P}%-!}P4 z4iipWF&fOALF8Y{c0Va!x1BhH&BO|n=0Q#SQ+j<2PXacLA@=cnA=W>BoJ5$y({$gf z>mOCk>Y7@DhheLFh5BngL8`@vK?cVQZDRwQU?40Aa7u5J0_@@PohUbJAlVHQ>D4Bb z*|uDNK8Q4ggB=QE-2frNiF+F8zGkZlUFt-i4g ziIl#B?qC+afy**T@lpU`FrxFxf3SE}s9NP>E2Nv`FHsUFNuGgM8z7=5NspJq10!#$ zWnx^G2SmX25m&jw_H@@w40rt3_kx+$|p z-y_!^9=JXozPnSRnb8*0xuQ8uoSaUX>F{Jq?eQUb9pvn5C*Ds?UGl(CUHqPcserv> zPKez;p60gF(aIT2a?>r%>I_UqlFU8h?HQ&41u3r&e{g~!uj>Gv@<4d)d5fF@R1>xI(!9WNCuY5LFuJz!a{(VEC&ILp`HQz37IAJS z0+p4(_08QMn14|D;oUBsXnr*})RRy=s#-BIcWaQtzSd5;aEK z1Im^e#eN$x7MJ?T3c$6=jIhvh<_#<<<0_8<=t#49o$jvi1z#=x;H&fZ!B7hCyxO6= zeJMgj4yJ1`T2!51Lu-v1)owe-6 z>d6HYwJ1_?PB;a`)t*NAMc8rzE!7n zwf_m$-R=a{qwW}zpt7q`eQ3l_yOsqPd@Dk_*ibtScX$WGT0DDbW>oDQR?U4_nPaU2 z>szP3Exh(JNK*r5XOPKjVn6P*G49$(2h7qpP={J;?s8E5(NoKV!fXkb!kkk-xMhki zy`eohCY(Bh6T^G{qZnL^=-u+#idJqTbuItq;te`c8)OjbVBFc3wOs-v8RN+yms@tJ zA2`s43YB%5%fWQeDZC47p6+B^Vaqc1MxTWv&AJlxqTIHH&XHdCikpWPfPCn;bQr|z$$%sv&FtO6RF{mHTzh{8vFuA&&go|ikHBtO$ zw7NG>FJP}W0`_W?I#Jlsuq3a{>2}ylzn$>55UPE=`FXOrd9iNs>iuXVmdtBzd&#_3 zpTK75O-M;@leBfkN#Rw=iq>PYwUN)6a12-@B;U;Cmul4VVEw`qqBr-!$A0ti#o(-% zH^r!tyr8c_+v6d@Ey|322Lf&9oe?_Y(Pl-uS#v(~LJOu=n==RQn$?reV+!S7V=ggH zqXb6u3xZ+xtJIfpX)9T@FMV|RDz8&)=OKQqG>YREC*gI?h}|q^-ed>;X49DkG-H~c zsqLVN`PM9z?fUy_HO78y!6`L+#*?O8Vd9AnZHHb#;uEHIVe$qLbd#K|VH7U}R!^DS zp2~v;F#5Q@4cOa+Xb7w|mlzxzbdB#86P8^#y!=furO8*xDXLV(l?o}gEfXevc+Xw) z+1rEqIy$@Nq+#EtLMAb;yzM_i?MKfK%2bwHw$Xl;Ad!Z-$fZ03DB2B zsYTCXgTmz1qO^ta)?r!SgM%OmSg4{Aak6$`$_oKJ$87zyzvh09c!Ns6w`m4%QO&n5 zqIn4$+F-7GhHu!pJzZ+diQB?$LMXtH^dB2Y;p#&aGnM8 zwKjkqFLG3euDK!J2mW?Oe$-`ni;CBju+IPJ~ zn9!A-IFN@YzgOrDoUOoQdatt=$Sj&8uXHp)(RG_EQ4zUwVvoFF1!hDH?kmIN%K*2f zG@avP5=#e*o%-yX;^O2<`odA!4iEv#FQw||MbT|`TXa{cLI-PaZ^VNW5c~= zy{!>%nga?1mqYo$+_jTMeh%py(w!arYDcj;`Wt{9{f4{Lf;-nQ&%wM{$f1j2!`2>m znw#oB=;dkba^6`Pk{`J9q#+3Y+Pc?25K{%XicAvQMf?H)jlWFW$;-T-K12M=W6Y)b zYcHFG2QOL}Rx)0K@2=O1NzYkm&V4#S%}djHVfrMm#I)+i28QuZ)GY~Dju)tkzYgdO zRhk()F$<$)0`_oY`KrxQcy++X5vKgWy~(R5n(Hs9i2S?;?@S%?@3r4;)rerAY+&A< zh6>)Jllc8WY_Vq4I{~8?HZ!J@y?$*nG$bFZJ$mv9{~;6%0^BH0n|rhE2DT$@fnqG1 zG9u2qFOXIX*wn1dS>=2>cy;NP1K;6D)ppUd4N6n3)^H#Vq!>b1CZE@yu{ZA<^?>S1R0$bN~hH(4j&BE5f_hE`a{{iW`3>)>^ zE2?S>%UbOacovgvPV2t^<~zn>%yt&Z^@geOB3l%j-N-gLkzkH z&3H#j>-_GCxRpqr<+Znajb}$?NSoxeRFeM@dFYU%H)K8b9*HUy@|S2k`%d#f7goV- zcB~k4d(5GFnw5OL$0^C#v!H7`OOt4o=}vr-&gxpk+0WG0p<#dBXJzDazy~!cKPSAXD@bi;ok%n`h=4TEczfk*~|q zyPvZ|O+z7mQGn^MmcrxpZtRXmSPC^2ck)?S#8J%1ilMstQG3UAh9xM|1d}rA>lm#z z{qvrSCDMvgwGo^0-2Y~f{5+?T^&l!L#Ae?T`g2nMO6!SC26&-!qoR~5$+=0xUcWrU z`eVEA9yR~W^_5S1bHrmyBVEsw1nY|B_D+@(h9{>c)ZiC_edM^X%Fx&wO;JJ=JC(=m z6T>%%6|ZSE;1R{$Z=T*?cIM75?_It1(d9Mwc_TaLR(Q4HmH#ZMPr8rRUMLHL$?GRQ z9mo$#y&AKN&X0)4;hadYMG@WjOuTp=!!DwLkO%c=i$pIU4P zymh=hS>D#^xe*v?mLMfO64pr3@KO0hhQapu+o|0psdDy%$5ir}}PB zS*D){Y&uk=Z}`#^?I|672sp}=V-|c&2_s|heO>G&ss4^itn>0EVrxXj{O*B>#FG3N z#vN5*`bi@fDxxWZx1f~T=bLWu_4r~u%EvZ3Z1J${`I!FeMvYWx4Q8}BU*kzw@0}!5 z37jbOwTR=~2A27{2gSS+W)H-wHR&>6ts=gv^Eu~P#u~fldgdU=@hWpP%(kNq`C#j4kMk5sdu zf!YPT4EM$Ki1Lj*1R#{3QOf%z>dsNAJP(!b??43Bw-Hy{9o0QE=95d>MW&PDiV#1L zG{#Y&AJNp?%acwTGAETmVY)jWar3s-F)a@-JRElb4@dq=ptylI<;n@W>PxDSAD0r5 zKVEFSmCd@g`m#Vgv|4?J_@|pd$54vRBe(MRlfDeC%pV;-<`KF1o@~ro`d)0>H$)o% zpCx7R$za$Kq3dnR-?6esQg2gC2}E|7&aNaP!wy&WMl9JrLYYca#bQl_fK?Vg#1RIdXzY&_ViIi43==1K9F_iP zt>=%{=qi{uGOdnVhyaF9!~D(NFu=t98#|Ym1_X4{=lQCl{zd+(4%ooXkk_Yw=e$UPS!K=BY(%0r zE_m^GEpE2~?AxqmcB zczIM4P#pLz8nJ&Pc3-go(BRTpIh#Mb_D2&T`+(w5Yxh@-`Pb?|6L^i zKN88W%mNYj<3T*lhY#7kRIVEB_m%D*#JK#9d*UAQ&lACC;G7n{>@?k;*3_;iP^?}1 zE+G;Wgx4z>Z&7MH|D|M$ZwR{2$xSQbD$oG!rby$nZfYL7^w+KcRCPZ9&_=!i!*Bpi z+j#u0p9ORlptJTJy_AVd?)SI`iU%ONS=|Oh_%)EPcf}nppFIE22h587r?zIUlbS-c zsj-?gsJ42GG1kV?Ko3afMWp&KO(l8=Xu!S0tk|G^3$f;>zDT{vvRNiCIwX3Q>LvT1 zaNm|c;l9q)U$$&%-_I^-FLUwnZb4-jg3yk z$s1wxcT>RO5y}Rfnj$7IU~`4GQmg=SxZg8rh8))4L=NM3s;A@mFW}ZRDOTEyMcoAA zir9QQh2J={FpHv=QKlU)X8EQmu1(c;S|W+xSeQ|f66Oh6{;DmX|Fp4cs~bF4+^kEt z;cWf~sQk|{VlNJM3_6HnJ#}MYr_^mSUE86!9o+(!08NoMC7ccr5I`_msufL|s6m9uTcts+bMG9V4Tm!L6s$ zmm;Q{E?DFs(qAxWVl`FN3Z;XjY8wZguA#I}$^p-om{Mbv?ekBe#PfU58uwZNHm~{@ zn9_UoQ`Oh~les3R=0vGIPqFo)r(12DGG_{b(~V|gg@G1s|#Zw!)Mv` z$fV5njd*lLYv252k+q3Yt)B~@m56?OZdHv#Cf;0BHMxz4Q@?_&76dq%GIV8xx3t+k zzJsy+?ihS#ekKk6pk}nNY1R+R_$UelHD!&y|Ijvx7v=1-P!LvjXqQx-^|*3l#vyor zYjOk-V^j0A%M2Zu;%tlHbw!5LoLd@^L(|;e$V6EcKC-t zgvGmlFs)wpO>ElF3iOvYGA;w?ZdxLMUdGG-;gbt!J9cAXu~ce5YafRUO0i+(J0SUS z!e3eDed08F)<(6!Wo&tItt8KLdp_ydbhZd?<$Iom$u zEV3Z~ry$&r{TU1#rUr6nPnK#Toh$PPSvV0zoXzS2ebm`mnB-!hD$#eTeRTJ*v0ePum(rw-m058T4|)@T;71zj%PoX!6&WT4#6kO>WA z1IxBL<4KCO+M+Xba$ds>G7GDf`%j+19y`$TSH_<1jYiGmyteaL;H?kDnRLIE!V|x+ z-N&?vfY;U1B=J}Ot<)%lfpeN0m&~fHi$uCz2YLM3a>J~y>#+=ibPZqo1VdqIt3tViV zWBdCtef165&!`#hi7HTW2Tn{=u~gQu4FcCA=_4VXWyLxT+gGtSc1VJc1J4&smoHFS z$n5{%r$vuWens@F4iC5yAu^l2``#xxG(n#T70sK#8b#ht2A0hqUw0ku`ls?Ig`S82d9`m&73BNzbKwMu(%frk&F9@&DLL zWIK3e&DU^mTUN#s2eVcjIE?C*IgP5lGd;F$M^VKcX-ck`p#a4cZ?e@gTTWi>^7=yg zQeWpfp=4m?SGIgoAwTMk)oCSL3kS%kzbQ|^3JI@%Y zQT7iD-t`6!@ zKCS>8!~-tJ?4p<{S~WS+%{?Fg^5Y@rlZTZ9&J-i&X02%N{b>OWPHyIi>jU1Gjp=*! z9qbulVzd5a|FmjESAjiOH=tjl4$Pug`a)&B>>|_wB{>co`Lo5GVhzWF1Rzw?k%8tk zsE=rZrcBzZ+?H>x)pz7cDsNX;FzXvO2ckVISH`b#@>WU{fU z<7bfzFA7&ON2`pOAqhjRVAME(KL$eef_u*cH?~S+RH?aLPej-0dP$PSL3Zs7rgI)` zXPEjt4|udJ0?PdL%clqi`;}G6dBOKm^5ZA**LYoz<7VU!&gKmp6$@4duu$Lz&|kVt z^an98i9QiwwVrD1HeeatacpW&9G3yTw$>ZcGVijk0Rmo zyuhWRa9-ZHz_2$>)1w`JGq1JV0t*&dH50(G!*19#DX$g*)3;LQqcz>uRY$gCdIm39 zDGxTm2APPAORAY{o}C`o<}Kd=XA#dnYM5tRFRd3HtWg0nqdw#STxUqRL`)`1bhxq5 zI=d$C3tX0aJ83nu58OWzuTA%E^(>C(hyJ$f%?`p8RW`K(!22yvK5vddK+^x7(~n-9Z)Nwe}@r<@BD0N4rCT)$Ve$SmSn!+-fJS z^FZ;nuZtPGqxmxSJ=d6PUoCUsPgU#OJv@ZmS9Qg!IP;OSE<@>N9l zbd3Xor?zcL#c8&!YeBy)Kun$Hys28ok43za)tGm>lfNlw>tNbrWo>J#b+ghITigD- zDpeL$k!>(d&n1Bz(RSi(AI3Z&^cPj4hibBmTnct{N}82n#D>EYvm}owVq?q97a{ItIt0XYTAwZpL_JRZf%xL zqB52fnK?%~M;%r3%&J0{`}9g{u_<0pvKbVQ!GY;FAg0|CHC|}=&x9T1#;=Xs$TZvJSWrspkUn zyyd$0)+upQt4C}@ISw;xQdlect$1%O+Gu?eEm!FH)RvCZ(Co+Y{Ry~<5)BOvHYi~s zyCmXRW~+E%+cxXPq9S>wWd&wR8Od|h2}p(H$r1tmY*}P=ao474jqAv(W=9?(sMC^` zEpn96U=;-M)Q&*U%%*fuaq16Di+Et~O9@$}CYz-EKkZ!kKh$0Ozi&}dmMlq@lr0&Q zk~NWt$WDj|V-1CoeL~XWCdrI3mSh>mK6XMXRF=WmA|x5h7?XV&X1<>feeb89|KNGO z`eD4xcFuLK^S-wCx#nPMkym-7ro+=ldJA+jlMLxO`rAf^YG=)4*@LysPhIz6Q=#MX z$$FNlto=`F+E;M!lw!h-bI6VF{WdgrTHWl!SnGLvP_pA|BsZ!=*c8rqPq7aF6zWR4eF)MKAKkaKE40tM=5ThK%*#EAd3juUZGz2xAxg0s%Q4y*p=%HI*$aY^T*{(~3bKSp3S zzV}ux?f;LVopKJLAUkn`HZ6{naG*sH`*T7s~-7XXG#7hneZb9#x~(jWyjyC!w_J}n~!u&f0Im|1DH)C1=jNWtpw}9 zl9vx9_xZH1aK}0g1SVtlWOyA1P{?v$EVB2m(L6{Lsmp+8Hf{+18j3 zR}n7FbCKKhG9hk{;a~s+K`@F*+-?_uGE(FPZ$cMd~; z{G!aFDwabd1``7ZBcRX27F-YS8j1*qd!OwoboP%cd$OfH)OU`&bWFqda6;Jgi@L^Y ztrBuoB<TkYzbtNPCUZJqcZbtBcrhC|nc1i{Eg<5m=I>Z#2zoQ|ON z_mu=^-76?I&g#AdWFa44AQ7=apqKZ*QqH3KF@T@1K$F)M#)ZwH&>;8 zD*~F=10_r7lLs55$Cog(Fp+UKqG`N6iv1vZH!lCs6gKzf*pCQn7xDzZ24@LiMNEEj z!8)hz_1}|^fEU|UdC|&B6+QBTMjjEeANU`Fa5=o<+})xmE}cGp#t7&*@hNb5!C&E9 zjISBn+F26K>n$!s70^-5wfOx00-Kx;bpO=A>#_^x#ry{QSZXenKtu(icXO6!kJPcF z!WRa}FCM!oP%ri_7eMVs!az3#o=RH64 zC-=39@H>R%0J_ZKiaN;wIS@`tP3okFP@aUVpL>nF{YqO`wkYew{@ay)W zjZ!b9Z)jgp@XE?ODwJW8ebsF>B`|quG9roaK0jfW*6Q~HB)0Bw>=z}mz6DAwqo)am zKHBTEgH`3rtvm)O`x&E>2mq1mCpcs+lhMlp2&?WOeI3ON->6H+JwLxTCg!Nki&+)( z&o8apid$|3aW_v9RIt@arobnoM7bA1R)=Pn8SQLL2zC{b* z0jjdhu_Kr!JS)D;0C(OZKRNrjp(+*|eQZAE+doy}(~p}pQ++gGCZFM$gdL`6U%a~9 zsIWj&CW&|6P6rj}mvJ49m(8&V#PajTxn^CQ)2`*Qo zPKCzmwgB3f(0#i%A{(De)M_w{lTogRE`{6}t#^srYjw+W?!jYfuP*d-uveG5ps$}} zogrGmySqTiS3Z<@`x^BJ7bcylPZvs0HNb&75xC6oF(C4EE?pBWZymLDOJc@f0wCDG zE(0iQGZ5$W+H3uD!95d(Vl5GE>1Rg(KSDP7D+NdpX;QR`_Zl+fCVGxhsq>YdqytLW?|`U z!r*$}Xo!DZ<9~2|DlMB@2dliY0n|>)h0T(y%1w*H=zvJU(eRvOLlQMRXpJPh^wDnF=WS{0^!UDcQ{eok{MLlHZSQnh;9-gsNu<@Xf zzSLI%MCe3sn^Y3Y@Di!NC_BBspu3W+l2RL1)LU$Kz4%~oix+Nc4?8K+Fo!}2IRRUo zZLG_Pi8*MVXJeIk3hF-5B5hq2W~*QAKY#&^HLL7?dedUY&5^#@7y`7x7ihv7ooQH8SI-i`FMsRd};i~faHK}QO8 zwT_Zl?^SOqtDFQfEw!1B`{dnvjhdhElu_g=Zf{)u2kz3NHio|)Vx~#zXrAy_Ney1E zq!4G%ZMI%5%egj(UP^Mn6s@_9MilqX_dK2R<(7PY$n=5_-&_LzVB%+&%|peVgA0_Q z)+=jcp-aboCCL@ns|ZkfwK@tFtu!@^gXNEgOjVd=>J^9yVA2W@0h z#^l1j1qUrJV3vkE`~kUzrJ1(&BgFI&T}iyrpT3}in?IS?=DZ^hRe10=$w|XSrq2PJ zlMilA`|jrOZzCOjzh%SJ(=F5Hg0>3y==0% zbOYc>sHh}(<5wT@x+f*OG>R2Ll&R#Aa>f?H-eu52gVBUf4?gnskJxwM?oK#UT{K`P zwjV8z2||i*2bU_3^fY;qNZyCt=GGstW?nyfJ{Qv_+t3x}>*SwosMJvk0qrp3#Mdh& zugOIIUfPqkxq;_5DDycRcA<_-mmBBrNX}&}^1o9AcF3OR5o4Rv8U;N>o-=(QF;{PV z1BaiwPG!_<6s5aP&9=$*kp;bD++Y(q9h>gs3Y$UW4x!642MevJdhN97OQK1r=={-P zk_NR1*G;7dnyZZX5Jo93qd{x+n?6kgtMhO7w&@H%4uQ+XE63BW?9=ltdwg#!q&cY4 z@F7BG+gRaVZ7cJ8;uf*z>(suBUhufy5bqB*+P7((_#MB-!a(JRbfmV};TraMHyHf* zNk*|9XzIzZX95gfrxw@;wKISv8)mGg!AC#4kIkE)>T+v&xsLFLZ2tor=v$CO*3}|) zPTy$o$u4?blo?z_GbzNj#D;Z`(t69@EzhEZDWgHB_q83yq-2G&U$kGVnMk!I8{Qe& zqtX#{P>&wHIf(Xty&@yM>HoNn1-c87RRzoBx&|SD-T^pGtp9+2SASunG8>~_bOZ=~ z;TaIu?}3LJwW3NlQv~Lp-v~5aCtKqy3;2DP__vM+C zn-@1?U}w{HyX&mL6Y3rddw2wtRCXip`kKt@AeA-1glmlI9kZuN?qMVtWuKV=WJAbJ z;BjnhHJKs@80q8M@2}2Vbg2%xDQx;!l%3&(WXg)cPSH!{9A__6aXo&_i0*rBusE^e z#r1m{}{Odj}0!7fmPNMg)~TW8kQjud~n77J$9jT%Gsw zJ_~ooO_{o=rA!LFX3DW%z-VxQRdz~8 z+cp9!XLi7q@a28Wm-ZHjY;2JEJM9>sSViv0Nv^o%K1qpG`?xcsVnxJwNoRTtDt%an zjRg!u>PHW7ud+r2hAku=!Uqa%Y_jsh02Y{05AP!Y#>Mx%!j>&-$AO1#fcHsmlk6uv zbDP7J-uNXSypCh@Q{I7c-HEw2>x*=UwE5aK+6NgswbFA)beLRd4#C-}S=Esc66A{b zFrj^d%2{*95ODe|EkuJ#qA`TnqFOj)<32_{?{hN@?EZ`fAi8Ljeyb%KV5JTL^QOB2 zicf#AmTcrUAL7oCv%wa#A)_CrH+hw3rISB%u9hfMdSP?f7fa-6Pf3Lre1}OE)EIYp zh(r&`3-O=3>?on{cg%SVJNgAT3fU)TnKp{65eh9qhK{mT!TR4ej}4Kwz}{6xjh7`& zJt3Herb8>=I0a!&nkW}WG;qh5T^G_W)RVs8H4`Vb{$8#~Km|;MDIj8#1h zr%@hE%w2)njFl#|AE*mH)3g*DtDuB@%LaEVzga?i3IQFLv5=`mtT}MI7nw5XSBr7U z(xqYot5-=P>DLjk1c5Sv5B4bYY@GOO8ZLD77(&LhRi)028e5MaD+kb=qz9$ct~l2z zevgU|4EuI8ugws&om55^eh3#|X3dR)8jk|2fLQ1WhhDIhV37sV8H-M=^VptdJyK0g zjdl9=l3KLr63j6@VFOHNMTlcJw$0gBUk`ZQU7H^{y40cw91~mt+6PQZ9kgJVw`*&G zUw-s%WpR;}Rqhd|ajOa+LXL^!KnPDnvxezk<5$3;gF?SB)Z)`b&Mhs5;BV}&^_VblD)thP>nN zwCBpkZlA`No*S#bJ7(NJrvhZGPF@H?iAHE$>}O}SZT^MDBhs7mDpSw%hE>+OzHA(5 z_X-Ao_om%gy*3{?5_~Z$4g){@d2&lX%iuK)t#+@vp;Z<-vt6s6q5Yu2%I?G;S- z2awOEl6O+|=!o6=ialyfs0CR3z65oRrk-W_d}~(ok65q5NnV+E*2sG8&lih3@3{g+ z-iInp=6PXD@%`pW?qXNDuW#4F%<7!4QJq=z5Hb~MTB~I%%z+Na;(3WF@s8~^9(}oP zwk&nF9%W?}E{1bu_!BE83xo&cQ;GSOo;#?4` zz^#R@438Q098L0k%+0s;q&+$W0dc5=tDcsP=VffMHx<75p9)W9Y-4p7SP6{k5(wc*Dd50f`#ksr3?#Z zM3lpXFz^+?m^gnHt`19@(%>M*f6F%Ylv@>r+VS~qD0>xuy;#7XgRZ+~Srko~P4OLB(TKn7zd<#r3tcc7=R>AdAG$ND-Ob z4?O?cbr^J?c3|9E-EFKdfiv*38J#q{JbtfEx2A5bSUPrx6xTt$`I{NRr^QqIvT=p` z@r53hwXE9(QA%f;aHrh`c02pkBN#F)MzV2MEbwziLpbrK%-n~dh<8eu1)kXlTEe0L z+98M|wDK)MNNI5Yx7BESK4f$%umQlQ<}m7IdOXPd&K1puy*$SkrCX*Xra!$8&oCK5^uBk5!oa&iP1k5~G z#T7-GuSe4QF^I;jMByg6!knj_I}6RqRG%KKG$9lF(NQ*TqerA{I`vv_b-92ub}@WB zi15!%DPgzne?k~_YMW^x%+9f z&4J*<;dgXrfszdUlf~9&^v0VL2y$jexgww@VP{>r!T0_GZe=c@*kEt!MPbhwRqKmU zjYT(lopugp-W-S63rTVgHE4{(#`M~9FeCR)jhJed_&qiH{cMBdhbVAh@m>qez5vkt z`=<&x&yl|L`+<=9n*`uhF5bjlsQ&x;FTfd1)&Bn;!4P=W(8lb)rn_^112S^r#ea>U z%FF?|aM$?1r;}FyBk<0S{L%1FV7~bSiJwOLKL`mryRlyy_p7VbL%@CZf9Yx&TrRp~ GAMt-)?>u7w literal 0 HcmV?d00001 diff --git a/docs/pytorch/assets/img5.png b/docs/pytorch/assets/img5.png new file mode 100644 index 0000000000000000000000000000000000000000..4e7a44ca0db7e96ac6b6862a342d360c1716ffaa GIT binary patch literal 97295 zcmbSz1yq#L)+i!MNl1!-lt>MXbhmVOcQ?|Fw19+4H_|rgfN734Dkl< zz5l)MzqsC7Z`PXm&V1iK`yZTj{rzW5Bbm@Bkn}k zVQC{FJrS`H7gv@M7pGKqak8|rw?IOYj!e`-)mHmPl%@A6Uc@?A4!s?nn)6)(uI?{# zQFSpq%7kYRMSBg22=!Zw-(eXLs_by^n_8*Z&k;)yB)C@_Yc0Ke;I^pM!M)7wc>^Cy zz0KhD+*smAD$ZDa|Ax1i5vjS`@2z}1g}nXt;iy9BI_j5ZNnKzB3*bUZ^zuO_CKZ5ker%4 zC{r!u;gRICN@;{b)Wiz3(N~2`bl<$}-Np^X{cV1Vo0lLjZI!D)PBaT_uzYI6MdC!2GPKbs_O{p0^Cj{i@avQYNS9!%`IwuK; z{MhgG`0W#;XKQq`0~iy1L9F7Zw9Vc&y52t^+5u{+L*~S(G;VPT8D_) zenz;Td;jbS-|YSgM;+XZJ^3AWP%X|!@#Z3c^{|>pA_1S%JTCpWYjA!Wdhe+BqiS)p zgHXRF%JoaH7J6Yu#TRsj34Q31p9VszkWAh^x_Uq^ZLIK6O@s)y?qvF`i{D#`@&^*@ zAd+7b)B5o5_iuFKa1G5wpPu7W3k{NzY|_EjLy!jp$xYU^_~Th%%<(aUkCmuKALUY_ zF3Fkah0-kHeZ?O{yIIM6$VA+q{;;g>cKN7h$1@x~6+|V;w)3%)&0gjS4vu_Pc%$?OJKqCEA%o*yjdE~O5F5l z%B}0#8CZ;+_?B58XKrQmI}ZL?P?iBD_<59G8sVkVd|-MPj*{76?2d@!w|R4;n&~l` zM27?eN%j2Z8&=^%?McbQk>^_%5Mfxv&XF8Uzr!A4!ah={pEg6y${|bmR%ly$0@;K02mw`E@EFH z4pOqhY(CA~ip7o{MUw6W9?s6DTSGh_j@w?3GM_(0s`?xf5+c>szK5K61joiIs0-Hrk+xeZaKQpCQO@dCc?irk_W5kr-$<+ z)Sm}D#Mx0}{mHBA=PYg5PF+<$<9-KRkp{_rr<=nQK(p`m5_@d&JU2yIa!!;-j0f{f zm3xK!qx2NDF5cn~LwRb;r&hShc-(krPnDiRBMo{V;6>)k)zEr>q?RF56)H)o3#yB7 z`{WigrEr;(Q*507qZqe1zMN7$rAoOFq_+~<%^7Bek^QaI+_eUk_tP+q5*?7viH`eY zRebBWYzA8bxhP`V*iRCvQaVZ{1sMekMQtkT>KrO=S|2nC=tnj5tD1}sf>&w!;$*jF zxnuza1hkP8Yz3LJMG1TK?YXKYMCwMiY@FFGA9NK2#r>tPbL7<;v@Ds95;p0z=w}&> z)B;MxG+DJwO664O5$NlsXvOR^VKTkFpw&wI@4=r%r9=}`@-_*kG)TCJ6*kY}W3 zFsZ{XTB}?ucGP59L{w;z+cz^bu3PD@3`?4!b9m@b$Ytak=}~dYxqHT!=jP{r&PU4! zcXQnKn<#)i{$&ZPoT`}h&xwFReo4R-CZYMBnpI8-{q$oh3o5jE*s(2Taof;e1HZ<; zFBdMC3L6_+~AcnpKlQhSc{yIqA^6(&F!M;!@24dh^!`mY; z>6dxoeRM{YL=;IB4cKP(0k{JoNfJq%NoN(snnIdt6{Y2l+V~YDGmsh5imLe>E*7p- zEzvTqWq9T6-+c!OfiXmyz==LIf>@{8ZEzrTD<19}A#9;CIF{9a-#=4}0Y z=m&PggLH7ED~ak!FGQo+(5YukgU zDoHA8d9jlCd3j%$zGU}*h)E@J6-pxF=k@U1Y31o`T7P@CTCqxdFoMpA&VW9RzK?&6 zuSM$-OTiv6a}JjAkqOQZH&=BLJB(cmLVtiiqxehltqg-=Gv~OA=}K?(=i^uF@_a%% z7Hn3Bx$0uJrrUc{zzk_cLMzIcOwH7pF^jjeZ;gKYdE0rl`vGqlZ))ADjy1LfNeoHK z*nQaj*}!aY6o6>+4-HM{nT_%@$VUi&EvhS?>*+edGlw>hr^zf(v~!Api8wl%pU9hKzU`S2KzpH~*XCO|?Xs*FZL%!6ELgS^u+Gwc z44X5Wz2$2H`0GGjX4lg-akbF27Ie32U1k~UH6+SfG$X1i^~8~F^mB^byl>ht5iE(WY*y^Z85W47mlTi_Mm3Bqax;D=Elay z4i#lJ#f(tN1x?WI)@kVOW}w-XX1lnP)K+X2e;7Z%XWx!(iAP!GQbnh)HaIq z?WJw4ou1g(J09BexlG;s8a3-5V2rhUVH6OI5PoFWX9+q~#Yn}I%u$}jX@Ta8rPKo} z$GSI-b(vU#zAO4an%Or~Ha;78R+Ym`yh9cYyh0CZ>f86vABLl<520~uC-9f zq3_-~KP6Oo-OHJ6S5yTz67~%7|9Lp_K7F@w)oF20dh1u)Nb902-R5Xn>myI$<#gX{ z=gzgD?U0&?OPsPB(2bcc&#eh)9Y~QZW!OQ(`$$Mc+B}(8d62Y>)w>!Ku zq;tL}7onSrz&AjlGs4@FQ>%5kvgC-{!|Ss8K)=86HX$*ffWz)?1yupnGu7jp}ISv6yn-VqkiDxAX4L%;f-x5ntf6HN|5@^`cR z4{}~T5DD32A40jfP}0P`NYx1c8rXfv{-wMLSsC+oIS>A0SpgLZ-q`b#cuGs=0c8}1 zNCv-1W`^YUZyg1X$^;^60aSbRo3y0#$2STevv~vG!#9bfH8Q)#b~}+$zWTi%0FQg5!bs{ zJmP&f=O5RHpTdz)5#R6-Z?7EWe?dP2_#q$S8Iq`)xQqm*wt_sLnUe#HiMf-h1&f!1^Bo=}0WUtp z(80pZgwo5w-qDrMOOW~x1Rr92cN<7e`3JNd3kw(ui1cXY|ID;@VHxn;r zM^~DEAo&-bHx{mDE;i0?HcpO|cX&-qo!s37sj2T4`k&W7_G#f|^Y4`$UH?fIf&d@Y{EwO1|DMUp&c^Yd zQ~%@9|DLMhYT+X8C@c|AQ6(;Paom2u2HG2mt>_XhImD zN_JofIg;7DQBp@-5n6WldXS0up#R79Zd{77HCbzbgd~C_^F~zN>%mSYY6gMCe1zRM zN)pjvv@bb$ZzPmeP*~}Pi?Vn#mQqgyex}x1VV|W6PLhn0v6^BR=b)$re|l;4@g>OU z+O8oYYbiA|D{C~5$aiYwp(SzVe;IuBe{tl8 zNRGg?P(4O(fdc;E{Py+zsIX|?8S98MVt3Vqs}n#%@5dB{6l|e+Ub=6N0^T-LkiB05 z%{1?fQ~6Ix3O*R8K4rlUzZX?;E((SiXlKf9bK?0>w!QehJU}SSBgq_dxFEI^2mq2T z;JJT6+~N`xSr6eg#IDkGeb-3;OUwSjh)0r0T{T}B6ISoh!#jwaSNI~J@^AH0OMerb zF@oKgX1IfAXBTOP*IjMOX#*0z|-7pS+@ z+fK9D%~rr5lN|jTDn1PN4qem+ zaxVMd#NspA&6b$)xh*Y3M%R}oS^x@F?F@9lwlSyNFMRu;M5CD|0ht0qj)^)|0pgQR z$=3=Yk_xM=`NUK`NqN>#iboOuOH}_J0Y)iRyEtr;soX9k$3I2L`)Sr1*wjYz!aVE1sw}6`>8pAQNxsu z9+%y8zkTiGQlUr1yk@PwRaI*JI~{YZzw1899VKQ&NkgWViVid7I%eVRmnlqotuZZW zFZ>^?N5%6vsLF$;7LxQFJIgLEBA#wE_F-ncgB1GHHIBIMe74C75!>X0O_*yWq&u(| zDRNA{is9^J)-{fpuaZfPyu^B*I+HZ;O3&VGo6|bwkjEpG4&-w}VR=%TSV=xxHv4s| z)S>U~y<(-u`@;oqt6x)=F@&CojWyxpaW8nW*YHPqJK})KR?c<6UoG<+r2FDg& z{}$2&*;fbf*X}e*Se{E&4U{k2KR0NYn|Fiw!BVVK6Q$Ewiy8S%Mn8PpfCHDJ2|B~U zXQUi2Cz|`bJ^}2CH}DBzu5*^%rm&z4E6cPQWn#T0#k zzy=bN%qwZ*Ptq;T^))-7w~aNQB;~j*b8N;&LSC;B zu)MvDPQ98uJIG8EuhU4!K5!@cgs%)IIa!A%ogA;B=0oNS#jPgGmRTCG*z^Uk7(MRs zzREplch>A@QmQtCojewL@+3J{W}-2~K)jpDo&9Q=%$#<_mV4_s`;Bk$ zqp=S9QtP#%C!afXg2QpXdppO@fsxkn`{ff@f@N1m3hkGSFM}AhI$$o3z>P03KA37E zb>m9jY&Kh&CW~jPa;k2FO*WT!n(smD*m4ZTY}Y6)W87{kN~9y!?p1PHS@rjxe2NZE zVW`QEZ(>7beUGx>7GtVf|*qv1D?YOMS0`IPjZv$bVm zO|C<(=qZ57g&zI{O7H7sJy!O`-J?mtZ^$lkBCWj~DT$7a!gHasuGF5)k>R!Wh7rRf9pAaN-_ z`Jvy~AC<^xrQW5Q^lmxeu&@?QT>yd-=>)|R`szdGl%&An?Y*~lkT)DU-UD8*a2QZn z$VgcCIOqC6ah;gT7EtFTXRm_nJNc)*FP=Sf;EBN%CB*)846g?$ zEa&s9Etkq|HsUmx*ckiLIPHcpzzdiaPGeB3b?q9NG4sNKUD-sqKq2==5Vq8RZ1`_Bd?t$+9Sud#Ze8!2*Q@yK zW`|x@LA>WkOxi=;Y!5NVwVN$8(kQ498Vkjf&*bU9I=)V3d5#sGgj9YZiplWd;!?cU zc?kfUvrctjPs5+)iBLdnlE#5`UB@rQ{Wal}U9S15;~jqbrd0R)!VYDcyZDHt7DOOE z=A%Mw=<`4=t0e+qwnL0k6+QJssSeprp((};K@Q(ljBXFs1%z;jg3UjC34f&t7WE*6CYDOw5)of8}+T)lx*yT=qNmY3|{guPZ{BnbY-# z+6VWl_nSZJz0%h&1)V-NEeJQ%N}S9Db(f%kb!N<(HmX19XwOu8;{Y{v+4+5&dLdV6 zyo3O)3J!{n(>G+H;WFITQ#>7p!ob8#$SLOgzSShl|!Ibv~|{z%13eR&go_Gu<&erJY%&qmh)Wa4ff#&NGL ztjE2xg_`rn$hlTJB>Y0YMf_^zdCBN`5v{*2u@j6TE%xs{n%W40Co|vD2yspZUR3Ko z*H3tp)}Xy?&wc)tzG&u`D-kfotOI^!P;0w>n7{tLEitY0)4fWM5qTH1H@mdZVBSRl zE`}ZOZ{0Q1j~fDdA`|&43;7qvm7PaOn>|LJ&sN@faQmtHqUkWZ^}5WlR948<;ZM&4 z0mQOMlXYQ)pQB#_QuuqK68GnLb+zc2-_9Cd%WpdBy;>ZQyjRgb|55aalrCp`9X$k_ zt%gDodJSBLVlMzKO}5zZ9bbUc_S~$tRv&FdpBa8goEz!!&Ny*n84zv8$?iOAySQ$Y zV4)Gb+_9hc+)IMXYlvef0W$2`gE~!KzCsO|-`dbS;U?!=5@?FP|ES@j9Lj?dEX<~P z78&Xd+mZnpPCtR}r=JZOYm08}3vUHRdDFH!Mv!U^CpWlItJ{YZcIVy8X|j^4{b--6 z*&$rkdMA)I8{2<02}tyK&og1X?c)WmkoS>It?9)+3zMEB_nf_Pg)pYzAoOZ;|7r|y zd)??FQK8nD^%QlZ91szXbM=IiE0egdU2P>I-<+|0we#iZR?08Ixk|B~Y zBZ|H^39Bb9rFmG%z2F)G9ZR*4OMdxswnI&8(hM4e4%H62 zFJlRly-VGAe!M(-Q}{fV`QT|0|67-j#FmPg*8P6hx%{1?JtxEO{9`|mX<20cYUD_W zUvL2LGBItp#cm=36OOcwuM}Lc_I8N%WY&!W;GP%_DJb|Zi7hC9Cg{my>X?dRcoG1(N(xcb(E2ZSffwwW(nBo-%EyYD*=FCz@NT3dZ1X%w-8y+d~;t}B53uF zrgc6ko^N5_)T#5!$HR4CB4Hjbiqu^bQOPEcq;#}nDNYh&I;M_4hgp1?^Eh%Js#b5IxaM%OObyMH9JGNgIYIPdmHB{e2 zC=v=r;+=(k`;|mnN;7;ile27gj;g6T_q#i6hMTtjr3Yguz9OymXpJI5%MI)vQ#fao z7!)b~a>bu?j@;N$3jRqBWAOjFB((+rE(acDrLvpV$Qg}VuDKOzi`DP~VAD><2)pA} zO_`H%&uJ#$4HCcylr3*5ez8uuh(AW0**v?Pwx5}KF`+g3p-M|W{pH#XQ?v&5*jBgz z*YQvQ2$gxMMTlZMkl9 zw{+c+*zICPPiA4?inEl3BS%jlj19+fDf+jZE@uHTDAC6i#!*{=`MDsbQ0u*rZODEnvB(lO=M@6|zlp_Baa! z!lzX_ldpJz)4G)!t+5cF0YL@u%x;}7vk7FPZ?whbV!yNbn9qIJqQ#-vpgunn7+J1g zJ)EG6s(7QUT9#CTFw%ke;CtjLl4Fd%mTJt9tI`wLgBe>heDx!7V27cg(7UXmaBS2! zSwnL=I&ld0UTK-_$m({xEo7fI5rsm22{DJgkaEBP!X&zOe+DnO?KKo^&6gRm)QZgCM-mb;FXf$nmG7)DGnowx zg!X!C@?7m5c`m%d%HEx`z?^K-VWN3ZTc_S*C%5X6?(*Z)30L~aGSL_}qDsOP4d%(M zOAJ-rq9%CV!poJCepEh9Frk>vPQFlE3?Nggu6z&84+opVRdJR5Q3A^G|Du~*`-hR z%Mm8V9hQc0%?M9N_);|C{$;!s|M-t0H1Sfi&7>3(wf=M&vrbMI{V&SXrWSWOXiVc= zzuT`V&KhkSb{!ALa)PU{`y3JOI}uj=yC7M-gfrPJlxo_dJ3HJLXwdexT)Shq-+VJd z-OnSN6yt>+I3Rr_w|A(2M7L$ZM>DHBoInm(>rl;p@wXm|=^ZQ9TTI5s`-bd8NANP$qJ`?cM|5x1-?~Fr$uY6F8et^&cp7*zUvh=gS{W$&Hxk;Y5uGNjX;I7jaw7zh$_t2wO?2xGgWzAJa2O>b z)2iT;xSV%$7>8(!-AtqN-rIj8d8%FYR&BDu%xbcH`m#_o zM6xhH|7G40?=xf#(Humpr(C;w7?34AHk`%CB?u;6-D$~r70)jpEk-DY$JlEQ^Y?7i>%|F??uO{j@m>T z>ok`~Vj)XNGaZu01Oo)* zK^GogO&ex1L`{yLR7~`!@P6>o_v$idv5f~r7gaJapx9yrsh<_xwC-BT%BIAY6;9Dy z0F>^u#_whcw(jw(9lfiDjED;%wJkL3y>;AvWqS zWoH11@@DD*t){HL1R39uv75~U(Ze=dWWILEKE*|BHdvPB2lYe%W|IrUa}-Y9vl#UC)e|;D0hEjd6@O4tFG7Nbg;qn9}>eJU!4W{99iVh`u{#gZN zY|W^52yJ+(?(%^6I2t7_Xp#yeVW(8^_pE7~>T>m&vm ze$M<CGpV?)yiBoo>qwLF>pnzdxx6vh9jf(w`eAnB@hUtvrhR)kw)15y8 z2kLblZXy!`(zzY%Jr)Wh#MxFO&@DGI9+~`JJ7Gmwb^}3kY-C&!uUss3!l2k)Xqv&} zW6sYGMe9F-9TlbKrL;@N5uad%x*#dpe3hQZg>pq$3PrKI^4FIqBRIkeY{H7tZfOJz zriyHwTYPiga0mI_`TCKyE%srchPle8avAgVz3^S`*O~kc+@y7m7Mhj%7U9_N;c~^pMv#sp?q0&7?uZpk3Ki4ZkI=6W{*w&n~;H5~-i$ zGxc>kbxciFs13ATuWW8yXe}#vdKK)By;{6JBQp=*;eAa7v@-Jftkd8p(aoj#<>~wR zfRGZ0g_01$mIiiGr>U+byEMom?Z&K5ZRYEnqqVJ#vZr|?X|gE~0Vyo7M7bQ#xRGHJ zI`S22a$%dOi`n+uZ+t-r5y0dvNVwT`9hh!{h$SglM7jHR^*GKd2I}J4D%gM=&ykYVisiSzD?R~&}G_XAl zF^_ZwqrGwHeDRgf-8LORmgoC%t>zoTF!WA&_Ot6vo!Yg=WhOp=)2T*(JGZS#jW>&w z!X&>FcyDnym5yRdGcb(?{FUgAol0@reRO;BJ zO0>EQt#cVmg&Yy}gV&j-sN(boNfhDnf~LCB;^@_Alvmd(tz{VbVm#xTE@;ZveX>7BzP2BhYEh<{+LvTh z=pSmSFvC8w^Zk^!p+iFx08pFef>I%h;*_|OUMiARJh$prw0Y}^r;ojF2pb{k3$hiO z&wO;jL$yl0i~&!AEyyIgfPi`9hDpn`xK6T-ORB}4>MZhRo^{sJjWM*)F79kEX^Tmk1@BU}(xHrR# z_P6cJN$IH@3hBKcWk6Sbr&~tPE~_sDjW0UKzU~g^>^9b{LY}*pxG$_es?TN`6_}~$ zI6tLRJYVqt+ECc@7)GkVokPibtigY|3a5D0eDXT*%A$Ftqi8tSch{b4f8ra5VO1`_ zf##lB|E;NEjSY(e)q4;7+4&uZqs59H@y6u6-jdEVZsDt;rA{l~^x=jXv=t@z6F0SY zz1;JU#_~=Bjr^A*^Q|ZXLD1_dyZuC_vBDX?+XR|ZSGbYirW))c|AARc22rNF{>~Mh zk@wh#%eUk>)$`Lck=s?@d}3C0lrQrfcq0Oi6Xk?EdRR~(cFUA@ZB!~+D-iDR)GDcK z?Ofv$_?n3?PTl(QaOM%D(d@&9m;x~fBro;;8WFr%Bb$U0OKcDW>Ti~bmpFQa;JVR- zkFgrZaY=xb+CDGOD;$v_ZOG>%ZrKcx05dDqd|_qZ`|{o?!L<*5eeDwpTJhX4|mX5>d;+yML&+3KU&Y+Zc31`O;w2 z*Sg7aJ(3IG-}@AgI<2>h`Z2pmUP)T@Xfw-sEy%Jgt6C^dNMzCPj0SyR?Wb(R*^>QG z!iXzxx*IRI03zFo(y{QpMmQ`D25s3X=#r3CQkAw%;aHkoi^tMyLmnki0z1XX_*|~N zy1^CFa(CNB^zijqn7A#c}FJh%|nS>pRdrJ-g>4{zR`+6{tdcGyIO^z zk%O(8Z%TM;?}2Dw6vOcUhSdMX)*0+6% zaI(gm-Swx1RjbL$QV=0}QVVNCgOUHX&g4~U?WTo5Wcv$TJmw8%eR~cv4*BeYIC++QK)s(MLa*`^0r| zO)Nih{;i_X{z(OXe3Iq(>hezV?*c*%t8SrL!yPiGdV7PBI&3V&Z-3H3%bj?f`-WsL znl618f$n8zf~UeqyJx~)t&CR=CQv0;{)B*}_RV5^iYHh*Cj8+LCkEAZUNeT>d0Gnc2ZA5JvaZm@u`4Th3A z7cUU5t4~K~R_xUo%)C3-tFgLmwp?d-FUBN7#lj^{Hp!8%ay~FEQ^?fLuIuRrWV=T^scw>l27)hzlaT6O~+9--E z>S{IZ%60}p%?-k24V`J<+>>o|g0Di(3bt_p?E+W!6|_>iO0(>iW1i`SSA5_nzgi1; zjT^}qFO`Iq*s}xJ=c7C@6{Pt4)b^RVs)Nml8#>A)!{2g}j zyKH}<=B8Ky{HZ$80k$pNes+O&G?`ygSl62}Qc+(3Aer_c0$h$P*ht8&%V`!X?AD5q z#qt?*6DO}G;ucfpDqjel{tz5VO8OW+D@T74(V6YmWP?ayp!wrb$%{AEVz;}Y;-_4K zE$&?G_JJ1-5x2y`?qiwo)J2CZg2Isu7=XRc$sgLe*9_^mY-OWT+gybAf<0cYQS51V z8kb;gANkqBc9XYi4Fa|H!8K_LTrc%!kEndHosk&ti961o3~7INV9L~@By4|=Yt`qE?Fw=h(ByP(t_h1 zXK}v8>3GJ`0F-Vt^v;qyWU$*R$<=bv1gS(cC{ljMc5le?J=slgowZ0S)DmjXAm=mp ztX<@4Ulk{DJ9@!-DRr~}cG9C`f~FhnhFGNfH)YyYm)fm#=a%oW0k&eA7j*8{NfOD;t0sbT=0w zlMbI1C0&~V+I4gKUMJ)S0LL~JU)QgOL!QYf4}2et?D)Y)pD`TwlX6ZxYy3oz<`{`AZpV%Nwi!+2xf@xrsUBd3%Ch7I;@^LF|d)=+O8=&1W5 zQ*j>0h)1|^-R^jx!v1&9vhUt!hd*u;&GLG}cRtqajLn}?Mo4zS`c7Ny;XC``4;edd z1&*RF?7hmxNegxYDwCJJ;&^Y~Kg_VE>QXalAVjAK+|!`h#CE|xE_!&%p?9_*Fe(L% z$i}9yG4!@e;TJlWTHO?C#RiE#gSXUp!8K4R{BJ6}cQbK5V|2F^y=b_7F*DJg{_$&m zj_(Yzo%`>bBBK^x`#EfDL=3hi(N5%6DV6}QH{!WEKXNK}UwAQkIARy))9>dfLC3o$ zvp)4|NN_0I@tKvdi{&^-^g8quX3%)E`M%S?pw-A~h{?+VpjX|!(+C|c6o-)YUVi^Q zPQ08#Y#2CpWnwhwvD27H+QITxToh2G9bpo8b(WalV|$dl%b2YKt=iC;T1do5TUv|U zG;{@*+_*K2da(#@nm0m)H$7juKP%W}=#t+Sn)G#=@{#Xn*m32>w@ghi@c#7MbM}N7 z>I-`70MEqfbk@N##7RA7Eu&V9Gitb20%n74fG!kJXJ>;58;YT{|who4F2lw6?%RUB?)uDbtR@Zbws?PE9323;kcSah2-5?HK6kff2beA0m@mHXc5_AKbM336 zx4oO2_O|fTWN7ZeP+nGvZL;aX(G&>_`H4a$9~@<}*u3 z1m)#x@qi5KbU%6ozJZ={4ZL7|$#O#*y+g~?(0;K*cVJL+*|c8T+3Jo?;oq?Hf>`Az zY9h0Mqtj@%Um{PSX#xjfroiaX!)k%hSC}>NY3Ag#*4o*C`q7rP{?H!tYwrUH9V9{} zBy@Kv$}r8T4Q66xu3NZ}Ic9lqg4>Y+C^( zyqvWlgV^%5sTEnftpzBysKDNDsspoN%$o~9&s6Kgcm0&KWM+*NhrT7~e&EEgQ4{@2 z{He#Y@i42*l$uPKF|VM{D2Bv#({A13Ozu%uyfP%}&0F-OlHjx8^-e>{`Oy*-fzJ24 z=t(Nzy|;<)c*e0GorH&79?^nB_+NMpWtay97m;=9p>v>8)j9kq9`?rZI$1xPJKpcPB8su@fIVckn%P2C8)le1o4< zdARj;otnL=p&Y;NZ>4C+dyH2Rc4q}T5mq3hs052iKX4#qBe{lHzLBbrVv!&58+JrR zN}G_P0=EG>uc2KYc4q|=eB^@$Lj74HERt*>i9r91_Raoi10th3(Y{5sjC-F8E zwT!FkSv9+XjIrvX6yL{gz}fg&5jSS*gno@d&{l<|qbX=M!Q+!L#qWJceoK5JqseE_p2EMb6D>UlEaVPt{X8O zTZCEg!k+>H>nK_&d`@FrWNaM=I&XLp{~e%PaSPv)MXuABfoU)Yt@Mo||E0)eff>IQ zmCw<`vEE5R__=sRfw>)fS4ZvQt=l1v@@AG-F)5MoXn=XZc{BIV%|d*@^nT0EamvH? z0V6-C^fS$0t?L^bzx_Q~)hgKKPZoVWpmTo4%>2uB;k+0%Z3&MGgN+7v#_MZ&SH8C| zo}y;^FAi>VK70An$Y*|Bb3uR1&k4?<)OH-p-wV_p!aEiQKEcY`o(TzF?+9t|bT7}` z*X}wc&o)ly2!2WW6!2NZ1Rqffin%KVvHAuV0ODVw?RE}`SZN%@l{q$Q8m~tfoh_C| z@UG8^8x)#HdPiku%ZYnl2NxiMr#Y&h+hp5iP|O+Mb1%D3Myh4K$MdFYFi=4@zT`B7 z=d32MuT%Ac|FbOOj|vQbTQui`LJ0RxpSR_~{Ju@>b=&RN^@y5o9H>lAVcrjg=8R1HhGslcg2Syl5BBIRcjE9B zO0H+Ye6QvQbZdVFu{(7kx*p^>wAyGsZyyJSkkuJ|=^JDf<|~_I-jTaBhQ3ux!>D-= zAJl#B-R1lA_l!j*kKdX#`v^9natenvngSaBxrC!K;vk_g`WhW*+CXwk}! z)&fC#O-@*SWw~B!1MgOGhGQ5-YLwN!h7D2+p^8~K$XtM%nssuIF8!tKL&-#bY}!_ygIB5dVNxv{JE#%QBE)l zxeoo1S#>$i%R#3}JIu!Y_P9>Av5vONhxr~Z2RaSy+8Zs77T)mo`cdr$`>-vWsx5N3 zg9B8qTyW`j?r0-@f50-qGuiq_T)5-6pC-QiJFsK?IA`AzRZiO-ug3Ck3l=tHZHCA7 zIx}5(#I(pG^`j}~g3s%BL@OEL#72QuRq3i$SZznA(55Rdppwtio4EjqWcI=ri(GGs zl3P&ejjRk@#DnYC_8N}Pi>RtEQ#icSNKUtAEo;cJ>&C-LpS5Q=Of=_3>k;|m4YmE~ zG<=B%3)8olc6a_^*qTx~^F_C7xuebX^R;9%Hi6QV9o6==u8mK3ZW1Af#5)vj1#W6N z7k`3Z))jEV;8jzuGkV~naRy87%Rt?F&6IW)mbo)q!wt73Z21`5od!O8MhmEtvjeqbz2jzdO+xlK@ZqJ!SmSt$BI2uH z3xYipsD46IqLfb4&~x>zV&RIocKpCBQ>Zuo*+*5P*3}Yt(UL+N?s^NR9uY7`y|W9h z>W-FF!*#vs5ZBw+ukNiTSiW3SlDS#k!LDm<+>8yW=p?uVp6i0Vf%4Ycr_$m+~ysa4GS|#=WKKn3t^^Sp$off=*5?Khzt`y#?0w z>2%_oiaQDb3RP_|;REpr^d!FD6oI_mIwiKxPQd#C+oOX0QNP+-omNr4_Q@rs^_-HJ z2h?A0Ln*wMc9YQntUNsGeq#^(NA~#pgfxvjpgoeVcCRq$t`UYkY!_<%41RJ;3=>Cm zP-d5I7yQh1p+m_Wfm*9eM5;;GW_}>H>u8K3>c*K>$1SJ+mDc8wWCg0rFp5i4(Yv!e{$G?7e4HQ|sC`x|Sj+NRhgvBfSQx(nLT+r767^ zrB~@C6hQ$21r!J!r1#!yKty^dp?7HkLTCX(3FJ&%?|#3%*SGdKf6q8${mBSpOlIcu zJa@hB`@SX?!BgH>4cIq73loO3OvAE`=!|z!L=j3e6ENSO&8w-<2Sn*#$(94}oOq5s zKrgKfbxScy>UKB}or1pO z>iC5F78=L6OYf!&sy zrG8DlJyT#y>eP5(lpRg*suWQx|FG70jnnSD72QOX_BH!_FLyG(zDL)fS3Xhlv7K~Me-Juaiz1C4ooj#VmXnEIv8Bm<(_lJR%778_ ztV`KYrei>d5RX?^Y}ugVCvb84{B~GBYy5lv9}k%_2)i9C0zGS)rbTrtnhG(}*pC!p zHJ~kzd^dQ!Vz(A$@GnS4lruY&H=i(B$>nR>S$6D&S?;A(QB0xP>9Q14$`F*nP(ae> z9uBdo)Qn0ZgiysRW@Yoy9sQi4;9CnnPo;Q;FS;i5EJ7m{yE6(Ja(-w##zSnd~?cokyD8g-~__%?Y-@l8$iG zinzBwTa9ctCc7r*llJa$ecs+8Qune|em@fVMyNx`cwJ=V0%eI_Z#g?wTCc5_bo*%FYlq@cdiAH5oF(iD3=l?+oab=a!xf+u=rR%3aU#OhTdJG3YvKSD zyxYwBB^~G9XAC;xFpu}N>UYA4vb$9Uy<|=nbGlE&&m?;8tqVyxr?-jVzgrZNU#`T{ zy@88t*VbX)hrac7G4f2OlHT@ve-P^1nMMjup$K6AUR0gWT>L~|oF|9eU!h+2Ut9ou z*;RX9k7fGh6JmCwLuYsG5O-?*uM0b2hin!!51sI6lmNj zYu8yRxw|H#CpXbM4-`utdl`G_PBq<@RJ#RtA<^7j6C$5t<_+nPI&z!5C`6c2KrdUx zN=~=SnlCy|JaL=LS*KQ$Gj&@uBAu)AOzLkWC^`X5rvo6+c(!T1Vmmc=6Bhc|AtmG1I0!>!e(%GAw z^M$6&ihDb8Ag?gbT`0BC9X|LP|M-Og!slCN2^9>wEQWZ`_eX|_(Gj2KaDQ=05EY<* z?qvy1+Jun8oyq6WteA)doPHL!5o5)WO5kw+#Qdb`2HrE`FdaG~d{vfA`zcTqLUUF- zAiKs?mCgmnpU}8$CFCz36FqT(`Jm;Is5+|*$*bqrp0^Kd0y~QxZRBQbiO;)9+^*Jg z0ncj2%Y1A|{FOBT?swDW=`nabB)>7*_-vzmM5gx}P)l$+p38;KuF>$tG9VWr5zSH0 zCfBk1Y=&x7_0ZD1WD|CMziA_{UzNYUTC>gi4GLbLNt2dUuj&CckJ`B?oy~PLp9#f{ z{!fw)W-v)HG4f|?a59vIIja-{OuB3nQkJ2q`8H z%?I3rizpjbefz$Om6li%VH8RUn{RsF-UB|1w`OvtHMD+oz$ZYuVzP&)4Bgm5p>aAP zW`%wa59jEfg=L<@)#?BmfRBW;Ko`wZyl*!_5f8ZZ&P>bRffkkY2qbCqxbClEd5^Lh zc;d_oS>y*x$CVX$B}E;E+eNBy(X~;JDMj8JC=60!LAn7fa1`G;YkAwGiC>Q&b;oMg%0)V+{`FkNLm=^qtU13tQupf7;$7`&RI6gP=Oiv1pW_IG zE<-HB*c{MqPmjNBGHlmJ?SsXS`f_|F(O**f!j;L*b{qG}#i2GrNL5h4t;(%!ZfU#L z{Jzg2@_=U3QKwUIcKO3(oukz6d_59mnU8#_=z@1uNuq;3WwrlyVa3ba0R90&@jx)#9dcGM*6V{I6W7e&nd z62#$<4WSWOP|Y_JOj0ON;~L#FC7QkoG5^y&Sn&KHsM zY0j6&dkVf^TSG8gd+Y|BSPJsERWq<>B4YC{h7XxK-fX#qH8p~)htjMOH`*pqu>88> z+lr3R{kk~$65^_-wL4HHHt@RD>@jvTv=p2c`^yQPq2nAR0F_{+f!^MJhaWf zOmxT(KX99!RimgtMpbe<*=3m(wr$l0%5on&y=I^$xv=rBc+O))%Br_@v6<|q?w%da z=>L-+-a!}(p30nGoGFY*4-h>jX}BuEE?v$(0HIvB-8#4{N#V4xQuBF}J@8FnI{73# z4CUD5JR`9dvsXb4kp%QQWIc`*DfzNvqie~OpUbvw<58>mba#F^;(i-XQAhAqD{#fiCdXn=J93RU-d`Gw7 z>FwLUG=Y%-pgaT37;jr_m>?Db#P0}CSt7e>bvt&xY)KR$`*7Sfu_a$pcUJfgAbR30 zC&K*AFy-!Xke&&E)jJ88fn+Z7$oYmIY~GZ@Ojuni@EToC`ML!x2Rb*rK9n!|=`t^l z{PHFWfQc&5zR&zd_uPrWw<)rW2qMH&>{E0YIv;PaL6J$_46~(KnJiM{x&?t1m`; zo4}9j96`_Ld7+y-KuPPhqI3=DO0I=Lmt2;ltO9vWj2cG(P7HH6%V5l~fCe}pQyuM# z>>hd~q^@P%B!s+d;LUP(RO^2H$xsIpD0Qj6MjzJu$D`%7{A0%TPa+zUfFPUqWw~pF zEctUM1m`Yx^z-ieTRlAW)oOIe@Qm}YsD3F`-^xJ<>1(i_=OPeuwTOMTM+6x%0BxB@ zpWWZwXCI{BSf}(ho%kdu_T!lstr%)(^GhT%Ul`D z9NtqTmH}V@eXL?@6<&XVZQq%Tl2k@y3)U{`f%6yJwP_~lmxCa^vk-cQl)laaJb4L` z(cwriQ-CBZU>g&hK&J65dOB>6o>6{gw^FGvver)kjw&=Hw5y1fr<;r1U6^>&^}c3Yc8 z;3|4Ql;wT3OT)*zwbJXda+Cm};mdZ_O&_gobXeBGHsw1yGrT#TZOY%Y3Araj(E63s zlh!fk4gnz$L`cdS>$|HcvUr>J<(hF1jdwFuCG?l^kuFwKf4F4+j4BU&*si}|3s$A# z!_^Ei!^^z}d{%|aNre~VBnl4uiayJCGuGsL7|aX|25w^5$}*R&83I2@9V$zaub*Zz zf%Dn1zQK!rvtCZ-h$UURmQfhp8Cc&`88WClZN9X%GE@tf?%f~eoOm|GYpLej(zAMc zejxl>Gqu-jINT2_Ek6RQPx^-i}mx zbGye=8UZ5*M!$m_!Y&7gc*XZ{dwxJi2vr`R{YOO1}0Owf{e=hg0fu|6-5W3U>$Kj(}zu|iz>&$T{x(H2>ICtC{0AuRe&IaF1zxGlwI zm? zenmZ9_^6Fec!42hhw-!OY@`9EPY(!sEhk=+88jhWC9NVrcGJc3MkNsssy74ieazr$o>`ln#%}OSq30sx<<6c(Jms^mT0PG!wlC_@`Frfq7B|3HF`#GVU zk~kpDLvmq!JUEwelqX}0=#J6ukX4G415B3xSi2W=7~<3TT-1ouF8T83i0|)hrwd9e z>E5kaU)Y119iT&MXP?d<`eJ^+J$5F}W6!OiXESPXwn|oyV3V z_K{`lB~a|NOY=~js81g;KCwDe#r(vM@_Mu%fi$Ljf=1TcI1^R>Y&*v{*Gmy=<8xoe>I5I*y!1KE zV2|h4J3C!(LKQgyy<5-(E1B8yR#bq!W#fyN9K^GZ@cvZ4!w#nm&iD50^D^??j~&4; zktViz%@`eR86K4T(3;>t7NnKud)HM9@@4*me8O~iTExR``l>}ItMZP@%*IbNOkI>f`Q>g(ofjw#Y9+Psn%nm3f! zjPK3aDv`SQGnaA_k(1)k--caDRMn-nev{&B^Z&+u`Ey|*N?Bb>@}cAwDmpC1OV)4_ zF@AsccxHgUa?#AIA(hTJ;GC5^f8#Xv$a`VPS6ZEFm3gDA+eGMqV<*JnhH@9Hi>}G| zr2Sa?(I(T`V#<2{GtE{ujmvxg8t>4&YN_kmlo@!3);%pnT71e4uN1Yw$lr?boC=`< zq|+poHo`k74L-QLuV;$6^u_*saxGZ?PqTTnDv{5)M!&@+`}&8f2-7r6dF(b#<1tE1 z&>uBf8gx-VEbnF^zD}B=geP1Ac$wu1Cj0b!%)_q3oOUlpd?{@3GpTXshp#6 zkkFMxexX~rr!7RJyvEBD$>FK@o7njebA7=Ax7WlrG{7;S$##*vUZ3S?lSvSXV324XZC{i!s>&RiXOO0)X+) zbkC}5E$}E8fzN}pC$g8-XhvlcIt3#Qj*G}>uHxVG465;<_a3*l35N^`c$=J0-+>o0 zoH8(Vq$Nwg;uUGY1&lSU*_0V3z>a4OEgAimLtHPURZ%%C$AtBliz`5#6ktv+m`qbs z)1^*$bR!Ue7dL_}jonZ_oOh2%7wZ)o7oRVyJVCP^@4`FhW6PS5m2r)?ba&AjQ8C3qQ+m$eSZC;k!F{_$@6~mj9Q4aG;fc6HGQ2o%RuDY2PPKi(o>4~ zU9S>&`u!D-#9ZZs#=|a9$ORY$nuyY$# zqfR4gbE`FofjpqQ*7Iot)re*9=`wOR<-V+{JIWMhNHSxsi#Q!tw8EX-P_FHbEL};{ z@f2Y6u#!D-Z@6pA$!1#RUN}AA3!-n)gq|F&)H}T}0%{bwN!#^8c>NB2=~4Cosgb3O zcSeVmGdTK1OvqZ;v86?pBZ#(be0ITEq&CzfZte8^+Ii(u3{z{3kyme$CrKW4nShD+ zpxW`nbtP8&Z53Spd9kEZSGSZJ1%xR;%hnk25pQE zG{`tZs-V}KGp0yE@RA1yVVh>MMq8H+n5DF31r84!yml?b9dna`rWU1G3eLT|zCf4D zNafYE6qwEq76>z^{#WwyDp3_zohA7Kr^?Do37zbwbCeAgFg*=RLpMZD`wx>iJ$wox zftuZoZ(e>rxt{f-^<7%AK74~~L!o@#D+8#Mb64Vgo!Y`rt5F1EwXb4Jj)$NJbq!P0 zM@^cNn|)cYc?f}Gq*g~&mPpnPJZ6&vS!zwKuRw{5Gk&?!KZ&NDV%c4g-j6J~|L*LC zdGO=MpSG*!OI^{=LnUPU?voRXmRr8I-!HHpx@X`1Ib1lE%l3%`L5C2bC=6f$`9coh zH5=a}@RS1ywc+wSwK##c85}d6e_%z0VWX$l^=Y%+UNzJmSUHr2&dGDoc61 zws?yPo2!;q2r&_kC<;Whuc&Xl0Q8(#9n{?2cQn{jzrTb^bd*`VK!-NBj>kd&#BM;N zIUlF$J7qy@BtR3@RnufWhM(67dU6KdXBB0Fi_)b1{O(*^&ATlah&#%&FPh~@Tkf|1 z2~1;rR2O7gzp=M>9V67NUn!;kqT42wDqLo0JOEf0k~X%Y9MoJGTq2v{cd5#ma~l0D z!3$T}vDQb%M`b-$mA{hw$*d6kX{Pho{|hNxqs>yze@H={Q;!OH=`;G7pz8)DLwdd>w@`VemmU2D zsN=4ei93Uv!0)|`;_7H>cUu3)!O;X3E&fM52!EZRii1b8PCJwGuBKVe*1nIiki%5T z5FB|m0~K_w9TQrr+QtGGdsr)4T^44u;|bbmeT>q@QCXZ~8`2^tfT3>&nzbf0th9h{ z;eKHN09^V3`0H8{itMS^wL`T27C=9(qy)BiCk}JvMO3 zNquycZE?NN-paobfO6UTo*o6TSJgR%nuNu6vu&=e+^&*)V!OG&>|R0j?7Z;T038Dg!WN8Dz*|AT?4Zh^gJ*NRhb z^35Yq57ALIh*&hm3+XVsZ^rrH@`z~?LId7zU1n}J1<9L$Kfe%jJ2sls(U2&8n&1Uu z>J1R3L>CdMxiw!p?#eAMlbY|)E$qJp?09h?S2Aah&*rAry9s29J?P=QV-v0tF(|&s zNs=;}fW3Z9YvEa?DPm)C5A>n9q{y_I~)>yz^huL-PgeRU+)5+PVj zSoCvV@ZODe{Z<+EjQR@91O_|+5O+I{*b*~VfI)(tTBh{Vp6}rf`<(~P6^^S;H=4?1 zW)h}A(7TrD)oz`66RhZW{x9RXU2>=ExTaMO*O8t%d&AiC2xZhoeuA3`g7~_^P=q6RP8ZXZ=1;f$hxf zG$JiLO9W=xVhqPFtpyozO=pxQo;6z%0yhPNrX3oynV1{d_!1j5hgDzi=fBfU`hXa8?m8jLV{zcdzR>`7uCAz%fqk z69TYE0;XC^>CURT9z$-ey)sI0{>Ck|sgCwpQchv{I^7J5DIqW$r}jq^a%hp=^Fhh? z)(nozS5Bvxl}qDW{p8sO`u3_JHzsIB$BEp&JqwEeYF7Aji7*0u0pU;W8gIEl)Gt9m z_&9c?D0HCAIqZA_;IJY_k4wjp%YM1x{P0;-1x1$s1JEvD+C47|( z_WBkf#0U7s-Go2;X1B&SOZ5R{$gk0fho}5~K&|1e(COQ2pDhilIaD2*O<(0-(-Tkf z0`~X3uzV)v^s1(902APW%6Es-gJ`y3X^d% z`hKSsrB&w{g?%~JkKN8YG&@xTtKpY650A7in$75^->6TnSB1|W$!*tg@%E+1oA*8NC#*8o?BkIGk$o1oLT>LAW4U$`_U`DR(Q8zKIU zs(E}TC~QB3;TvycZ_#alcCbiAIJQA1jL|&I*qYqSgNbhYh3mYQ7jcZqLtu8jC(MAE z^yRJLfb1_TEk@)$-Ivw^K9mMC9p?a-Pv-a=7YIQ|)dOUjhQin9H2etphCD2q6bLEgt3ksVh|R&(V0cBwfk$ z!3@vKQj(2QR%LjAS?z+=2Oy@K!vW|_>K+BkYo`;qj~e)}XKKKhEI>g33ojLo1Q;a- zi$JbKK`k6__sH9PEsTA70-95TF3!Ir!W#sm#@; z!W~-XRzF}sKZdmS5n~ql*NZ3Y-9D%C%%*19{4D#4&%Bj;~>Jw0lhE)Bk$ ztHHm0Qx+(QwNhmzyE^@`8d^4Jyc|H^AjCzkR0bS^n_{{<&`_?07kouspp`Ra5)1H> zOytbmD<|u51NOnkM9u;Px(NUPM&dp%`7N@NZ@ppA5`OcgSnFEN;`6R&7QoOUJK4>J z1TsF}-+5lHrUHitD}H+$AjbpbhEmN~<4e(>zplk#{|aV$&4}RCP<)k726%MeC-lDq zqcxzQCI7zO{HMg%v>%ymsy4HK-)a?)ubIu!wf)AZb4BNo!?HC0Xq5hk(@jdz zKewVFMPY`!LfBsw}wa@DBI8Xkhq8wNFYJpKu`=RW8 zRKfAL6o6OPF%t)Ufytaf$_y$;<7NW!{`>tJ+1@7X8I5u}{VGY;7+*yn6-cJAgU%QE=AjdTHO z((7+KqeUaBL51HbKky{rki_bFD8d{;aD?>_pG3*g&tzufh& z+j2Z!@PE_Cnow?JzOL6_o>Xm>1Zuxr zGod0fF7cSxjGm*3_B|LYT<=-I(V9kARag!&t}n5i|8Y_0G~YK?XgmwhOic`=K->N9 zW9iUmDZ7dHR{}ngdwh~ad_%)TJnSdK;}Bp*?}omvPA}7o4BI*HR_{xvMzjg~8?W>C zN&SLpyt3v4a$#>o+Hy$4_7>eB&<#UxYW;ulvq!`N!ORC=+3l|^>n*~aw=8&d%Y=3V z55pVJaf~Id_c|YBIj!n|vdGA?7Xk2?VJM zT4o%^6=cZnr67&$$)h~%~2GCfy&B0XB*|g~rePMWuK0K$=rr_`;XNDC-;6i5e zGP$9Wg9ws6eSj>jL)o4Uhxto(gO%m9adnm% zbNJh*FlLE&?aUI-$jE+w*-IYB*3m+E4R|*jL3|Ed{MQ1*z`=#&qO7|`)?9RZt%z-% zCjaKM!X1h3Xu5m1&z&doVr>M~C2-eh`Y215%4=QLb3zVIiq~_NcHZl59gFb5d-;j3TEJ$$ zQ*?qpD$H{J>{At*uQPT?$t-Ch`MU7pCm(j_bfYg)m?PK24fQI=bCT&-7a$Em&pMW$ zO?qmDyI{uC8AG>^?$MbR6j^?<` z=+*j`&MU)P>a=4+vswvMM-<MinTj^&byRl_FCqA&XcRjZIbbP z+Ig{2*ca%YA2lEFrgQXcOb!)i<7wJiC1;?%W3{on0IL z$+|wgkk`CS*ocJ9uAbSu^=T&w_gB8|E`h2Ntip$&G1(IVIj(h3;nRb)cthv5X9CJ; z1`q?Z*3P`wBN&z5mMmi&m&p`bY_@LKJs>ijh@ScFZ`W@ApARFUYj}{~87*H0<({w6 zx_+XSY$tIX@F9N2=)ZMW!sj4P!qU0gx}W1>rRiyq->uAdg0Wmp zElMjn1$w;G87*LbxdTZ;BZTZAud<_Usi*58gI7$TR2jvxfwvF<1>n_Cwo83G`ri3YdJxWm|*sEQr%G(Yz)m9h_?MilX`z*0>KCTfV>SaPt+vc zcIQ)!;Qk=DP>rcGFQ1yXr-8o0rjyHsMQ7g0i>VqKTX5TqYTvHN24-ceSg60J;}*+7 z9Yb@D0V)*MJu>X^vxGZ+u{IDkl6kRQf^exuC$Bq7f#p?=(^>;iv^vo%ys45_N(xwR zVXOY0%9TNGYa~tzz7gU%I%UgrGGQ?is0Xk2HB&fQ3~dY+j}bS@F6o}^8hvTbyMFFxLx;-%fY2mh9tuM=IMs{ zbvCcE_lmlyk!5Ki%8+N#3HTBCAM)4`wU^B17eAbH^gheyZ3@>pt0b|o{%Bd3*d^7< z(s$4IFShbD+q4z@78LrN*s#V!UOCXLP=-}q|M!aKu&m}Jc)q%_Kkw8`#eJ64WZ@%^ zUWjO>cv6{2DX&^ask;+8IL?3dcf^4fZ|Yqk*N^@xX57o9M&eA3otvs=g+sQ% zR1hV?HhMFACB!Y%LNk=lUp7w_Wu$LAn^mcaagd5#NakQQO`A+VIq%`|bK7VqOYYH^+> zV)n7e$Hr5`#WatGi_c{ zqne@D)*U#b#j{QGyW#zKkkS-!NPd5UN6`iXYj=U)IF~Gi&Li)VCYb?ifL|t)tD4!G zWG{xt=Bqu~?wYk;5*!JMBj>$r9~~o}adHjFAMumL@)k6MGsGPAl!|ox`sSKi({KwK zo72``Tgw$%@}c)(jb$>1DK*<>OtRjK(A?&6@bo;=AfIdMWKf4Y28XZCRSFt5fy8pC zd(AxG-u+nUUe>Mw1)a6tSIj3Gme7P6cv(f;Xomgu5OP_s4nZF{1UolL-7MIORYbFR zhQL`?Bs4KgfnN0R{AahqF}MGGLB~aEyQTs;gPPw-&mC|_VosY5T{IY#pU%dhzn~eRIbhB6K#a-=>Yop=zN!-RYt15 z9&>~SFyD7sl{o9!*-%5TZW(@bX%smCN?m>ZPuqWG+YuD0t+i< zA6>kAp%h&xY~2qjm11iZ9V!f+Fj&Mj?~Z0iV-Nd>gets95lr_Eoz23WWY$)20$vNT z4F4AR03lG2vfmby58`EpY7M(54)!iElJ%>XCK8)ucCSiwu&)DhlM{<1WSWpxFWRs z*6_LF#2XjXV9AiIcHbCGR$lsLZr(h25+|PLxk7zRN!S%hUAZ_LV>5*05#5sfy*hpJ z)G%M&cl@qe8RBp^^rO6F;BUOmm z-BJlj12^VgB!qwD$X4Xz?zU5J9`AvYd013sIypFZD%U9D-3ZtxE1NmB70b zvM^iC@=pPdSt{PIi;M@E^p3a0p90i(%g)~(9uq(=pU378eKF7eai;F}@js$8<~QL) zWP14C?IZXQqJ{Kr{SyU5`Irx1tzU&%9ekF%-EdZ0Whp6j(oO-GDo4DB|JH4a4T^+0 zTI)EgsR5psp}EHYXG!E*!!B!O`{*JP-clAv6&yu7x=24G(_5o7eHB+?>la;N$f)Cm zF+oo5koJQCMX!gZmCsFV7TNFRm(?vaET9jxPP?Vu5}%Yfq^a1M*{@B0YN;qrww8@(S7i+ggU3pA^g;c@pI@V(;>32c)=c*5iWl9nUArn? z)N-RgXC!q?AzMRQaZfx}!agA@NjjZoCED_<8B>-s65M}mds!@1oMc{HLWMnOq0CL~ zx;d_Zji=#-EB15!TQGYS)&_!pzt;cTkW_NJkYQbkP76uQjs7oQ*51_D zM;f`+GcM1-UqCLCss~=RzPN3g83-9c_8H!0=HKlO8J@9 zlrh$>foO?DPK+casLcwoj|R(z-+;`ps_2RwCrCL%74b2fo2e7yX&Hsn?K}Kum)QC= zAGzA#GKUhhMSAImQC1frji-P!kYP;tQ9+lKV%kTY3p%$3rE_O&h9C7c{=NtcndlVK zD9EobY5mQVPE^bNCO9VxZIY)>BMZ%K45%c4^j@SDDGcw1m06VY&ZJ~TYLA!gmyCm} z68(#w%;)Kl_V-(Ce~>%gPnTJI)?6mDR=R%+?S0H7X|#|oyx3-v`z>~~<>j}QL|&Z$ z?!j44p+PA_e|3Ln%{dvCi$LqSKv@<4Y8hR#!Xx18Aad&7Us8pct5hM~dAME3V{1&P zCJ=Pmt;(LJ@j*#v4o8FAwCy3F8aOb|_ z30%#|_5M))nFr>1CXadVRETz|RpqyK=a*m5!GdKOX9NoQlEOohtMu2vtm!Y<0pkkTG_ezV3FMq;nXMn++y4Rm7*stwbtG*pyp*6=sQ1gRd}1dG&tI?q>T_ zxPwCrXz?1rm$mlOTJDO@nr#;W&xamfPn)!E6N*)#7oMwbs?*l0nKmcc3LW(V?&Mf+_ z-b8_cKJCP_n=Nw+VMplAYxik?vv0TR>tfWqa(*P2j|3}s!=#wU7Y{`B%j&pgv8&M0 zHhb)wlORwlm)D|8ivNPKHI#}LS#v9ILpIL_yfZpyFzyee)Ud*9rw6_E<+u0o4t5s9 zaJwoZL2HKaMTo61&}iBneaP2RZ4S38T&#(=4iZF!PRacTyh7HniU$BJ(i&GOjQ={! z)CQ{2*`>iAq0D06Q;hGnlckVmFKWXgnL0&@3uZLPHymm^7|G9=#@#QI(=$pqK9;|~ zE-DF4J$|YJv#;e3jQNVFb6VsUMzruMMKi_&)6 zhwui*?>udwJ~XK~p1Y|JGkd?t*7mpGxZ9pRmeZOzzcfEuYQZe#mYSFH%eaBlc`L>| zfn@kzMnK&+gBJEyQQ241!l4ZHJ5%!{4{OVg1I8a|((Jh;)S5b1IO>+4K6hR1;lMz+ z`AO$-OkPyeb&g?U1&d=QagRZV+j9#$;Z{7hdqpG}25ztHhUW%oHHvw;n}HDF5Hygp zn<=yC{yDbwob=?%p19Wka9bh}``raRj&#tkmXzVG4Tg2< zm3p#r-hK+RzmN+?;NPR)crnm45qx*nbKAA-?7A}Io4#igW(O1rX#usCkR7+-;`YZz zPl-xLieN9aFWFkKV0f#J~@(M}R=_^2(B5tNI+H^Vw^DuUPVdZeAc+f?WV zc!D#DFKDUpNhS2fhp}wk*#e#BO!Vv#>EI;Npy|g^JOFQfls&=W5d8_dPl&gWcOj_4 zCCQVcNz>f;qjSB}H3LKmpavS(lpP0Jzs4{ko%K7Nh}F3N;!6|iehadEi=cBfE0i-b zf_7x_&OPDb*37F3^{SU)ZAVXzzsNU%L#n(Me@AGznjlHZ-MN~|2>t4^(6U?7F*{#? zXcqtoj_n5T3gvi-Benx;om%7g7@;pdq^s~%jG6eJA2Y?xI=4>yw1Smd?Zj+oT>|=M zof`F*n>=q){@6ILsndBn5%PSFeV%I5Vr;lA-Na@GNekJ+Cg_^RPfMUk`OxdVj+n>F zvFon+QPYia*OV_*E?Gv`-L`bVh%{-B=Vkp#f=q~9GCdB2SCIgaZVKh7TN4*&tm_YN z^?c4g*nn#*BEF7iVXEIuP!3^C%H#dd^hZLVSjE8dPW(ep9deB6*f_k1aBcRFW1Sh+>m8^@qa+sU*80be+45GvL)jP~l-y(rmMIS||GbZ?=s9s!<3Hu zX^-o;_Gpcq@ra_4%E*hQ+DT5s&IOtzO_*oLY^;nza~hHL^yX-BG@m{-lkYkJn%bVr zQ*dh(e3f}@Xqx5bwq-X@C+__D^Sg7&Qa|kC+UNu!QSAwM(>7W?ZK%v?(C+xf8w@tA z>7-w<*cu6Y?8{~MKxf_{aW(V&awSO@-qLtXYj`}uDfDLJ>FH@g&(KRSfoHuu;!9VA zw+_?fLXN_dkC4gks5d&`Nl&KYy%Gjk5@xvc*tdA^`)iH$^kz2AypNuwLMTO@TAg=^ z0)opKUamjCSRTCE{e8zUp}A!9I{wk-&s6wuk!t=4Vl5Smiz=gB`07dQa}sne?e;OC z?N$7328kyI^)_tvaZSBG({4w}ee0$BaITL)6sx~)z&qwR`h3E1%4U+-m6Y}cDUu0M z(~6B8AI&nhMm!jWo!W^PLN(pg0TFPg+FMp6*QqtNyI|PDE-UA8gJ<)VtWR|}o73Ey zrV<3sJnA%BZ0?+kdm?3>s;;Yh$@t>~>};QL0=RGfGYd@OuRc%$=(q&V@10eZEm{XL z)w;6lGhYc#MKE2V(E_aaNBb?%OJiT#_7`5l+pK?yq+S7$)ct_3w05flN%IC=tdTO? z=jW|pnaCwJPw+^an1)%S`HIy!zk5J0-^441?vmeLf<2Sd^}|~OHOg(<6Om1+ItAc9 zgU$1=)>ED}jx+d^>oHo~8hss10hGkxchsm3ZNb2RVgg;A-WZm5sSjKr z5N)%O-Nrdf&n(*J4AJ%PhQYd%0f7EoJPSuwJ+#$PLge}KFOpAoQyIw1`KE~~=D%O- zC))FDWdF~^{a02Q1kL;{Xl-#LlS1$E6QdXt)qm!iS8llBDPXukqXY?YvmC&ie`w49 zV%UF`LQZ&ri*V0Te?WCnwAyQu_B&5Cf3zHj*3KLUxb;x~-**LogG3;s`hmUk=K5cZ z-v9h*2Kf9>c!q!T(7@e3Gy;Zkfa!Pf|2)_Kh1H+DZ6AGFq4=L+#Qzj+{`ukm-KKxG z`acVKf9lXb!u6kM`ES?$QKx^GlmB-EB9Po(AXEKcOpvnH+x7!;{|Z08zqM}y zPk^u`a=*}J=x=SKQ5N975;oqgy=M74`}gKPp%IW^b$#fU`HSECAH{#<4*dU!&r7<$ zv&+BZ$x{MlsXGaIcm93R|9dI3B=Bq-Ed`(cGe`c{gRlUpa_9$T_}@M_Gg%s7x7|&~ zp8UOKCIh^P!uMRDmHF>}`hyza(Ii23|7B4hfJLz^kBajEo$DBa1A&sW?zbxu8 zuqZMYTcCgb?|wQ9DAav~Mb7+}MP&ht`p_WqUv^~V7Mb1?grei$-kI#?OJGrtjgtS% zj%)y4FdLmE_V%o`C9q*^z&a`7dI|{LeA}bw~cu=6_kP|50wT|Bq|)EPD)( z_VY&M@hsaeaiitWWlPt+FeYQ&|A)P|j;eC&-awU*76bu7T2e|uL>dWcP;%2CCEeYf z(w%~IZaO#JTR>WJlOo*>oBQH9=lgx1$8-O^_m4Bi9t<{&G2gZ3nrp^0pS9kp--a3r zms@W=;M8@4to1=&`zfE&JpaWl4B-ZlO;&z9zkCH9SCQ!89^z3(@i==8Gm7ba;q#+e8E~xzuVaILZ{&Od zug(wDBrc)k0HWtI{Dlqihhch)1718z@x7JB5g0CC;q-in z$8xS&p>Wc8g=<1v)zT*0Q2;*@zp`;F%!t{;8I|?61i3A5VY%^XUa7O-7Z2R|rV@B8#x|Er6U`)7JqAdH~BvHx8d0a~yJ2xnAH{3iwsk*pc+bS>8> zDH;w_27{XJm)BYappqI}GY6pEB5Ls^MJxoLg&Kf6EM%cOe6apci~F`>aGjEA_pQ7P z#POLLy@a_p!&JyLdI6ZSuG(cY^gVK^k#6S){Uw|*>VzCwW~mZqj?*0;3Q=_YNa-Xt zZu`j+7^yKm+8w6IB|6MnIre?tuUDyybe@UM$_^O*jeH2Fd1NBm4YA&8XSf9!BLDat~hWP(!(6zk)yLRPIA= z@#96x(bMBaYIlFE?wFi3*GzS>sU40tu$SVq%7YplEf^Z4$9}5RvPf2-a(f2%}$kL{{)^Z0EnT6kkTZh}P zm7z0nnoTo_Vh|oRIm#nI5m7uN73k!gU3eNE^SNJCjnyK^IX=5oxfc(#gf-MSOn)@z zu%B>%%ttC9X451s;7H}OH9kkl-JLWMR}ii8eNuj^Ax<+Eps6HXOg#DbJj}ZV0wh{t zEeNBu;r2qjq%So}$|$^FSe#7Bz4wZ>iLR!lnP^a6i%80l844ksl>v` zDkEa>K=;XyqBWA9fBt~2e2N1gho{gXft&ZWum%O}DbEG3+0zWRkxE;hp)4`tF$-9^ zLI@!v6VUnDo-LYJpX+7~x<5Y2_Q}z)er<3jZL{IW@CY_Pfu{I+v*(RMg#o0y{$#_C zQ-5~TlB;Ly*Q)&1H>5HM>Nw~2X*;|L)G`(P(BExYBhFkzm>!>i(yTv3WL(N&JoQ?+ zMAh_un)%l*JD-Wrhaiu={X91N?`Aw#(i!||q-X0*@xz#ClaD8~V)-ubNB=>3vDKca z3eXWyEiFD&wOi|&N7M(vz?1qFr1;!n+Y;PT#>$~|-q#fdP!lbx9i4Y&ryCQmcbFBX z>-pU$G>DE47;QwSt13PWD$?Qbc`ViODwbBTPv!*V>_b?WJ5 z4Fcr_@8}9E&uI2*bLX0o(ZS1)qyiFJDKDn@ORwyljfd-+ZOsRp{6~y*gzAi0kg%wH zCjiP6z>;7n;c+-U+Ig`{YOcfOboL?XqVW>_n}1LdTgV31)a)m5(_lL+YcT%MJo zGTFoX%Aefh+ARwuQ1U2E2O4uk#gQkF`q4D9#A!T5*QSlE*P*ba#UsCQGBBbx8N(qV zb6m{Co+J8i0-)g0ZPApHmZ0*D?=n~V2$QH(t2Wqdu4c~SW}0Nm;Jf;A+9|ig79GQ- zol@RudOgmIAWXHP|8NZ6&kt4tBpElb8_jv2TJ@}ERiMEP!N)WO;>a2^*f>9&5dkVj z-XzyhWx3kavg201AvIJ^HtPT#7~~>t(Ei?Lb5iMK->Ji(oE>Q^<0zYUp;T47bs>|o zRr95PHC^&oV(CwF5&?7_){6`8lcx#o%cs2Nvpm}IIpyt%;5+!VRwt83xEeDQ?>dP5 zr;6>1&xt23ueuM@zf&Xw{r1H%{HE)!-A}fX`~a^-a~sfUkx~K0$IbBr*aTO^fXNEb zC5lNvIg+&#wb7i?aE%Z5)a&P($wBzL0{0vVfw2s8ev&z)1y~GXpIuHxrREcMAP`EU zuM}(__&jw~8?7;h`sa2Y-_mjXEy(CqhOcd0Zw6;9quShExYrza=9!1a%yh_6(3sW+ z+@wmIpH?2a;87>zttEFf^bq_8hkJQxh)q&bXB z%5rhk{P8Bxzi~|rlUU?l38Au10lDIu``9`8T#njmUXJxO@V1|vT~F#X*OT&yYj1-N z0(1QVmCdwKzfA2&#h1~rhscJJB=My&}!dA9yni@JGlMdDc zR{9P1o|4*chjz&c8;7p>8aeC%$0?g;zbj!;ne+}w_fDUVr6fLI)8x9Iu}r@NvY4D7 zOFTOxIb!6h2W?`544yaz!evmULKg0!tVW~~LPH&{8%WMb<&fZ7%?ohaE5kY?vLTgj znb=@U+3HQ{q-Sb&IpzvlX=k}%m2e};q!C967@6jmOb9?ym~z~+O1d6&TjQv=(WD!t zr9E=twL1$^EYuS7;PRL)_{?*gAG|Og`G&EZ%IiFm{+Z3a!OmewN@2gZT+e}3+BTKd zlFZqC3soYC%_Gi=PdmrZeX80uqM-eEcyTM8Ahr~FueNo5Xw#4Pthsfr_{p}0GtElu z4B98Uw)Ai^FPad1eA47?7UC`D%LP5x-UFcRz9sc~)1L^m@$rjxl?eZ}nSZui#yl~u z_QXb1BFugBBxEJSqq&_wkB__>C7VI4M9}Jcs`JEV0u)O*a_S3lzqiEoa-L_Qo8He6p6}oSki}TkgTjN7?VPv%a=+v`;EDeWCQiv-(!hzdl(L;)E{p>@l z6WcsFE@^KLGT2VVfjIYNJNQ0L**LiV!-rzgjtS15k2Wr<8`lCSN0molyUt(x-nSsg zAKsFBj6FfyqyCHMCG+sSbFN+JCfzRcbt{QWP0ntE@I{Zk?clZHal6}5fzisE_LA$J zjnv>!!=6)dyQkNlJ*0ksjRJJw7O~U)khr2%WK;h=0r*)5e_AZE^_8~)G#4v-DQ0!n za(OFdKK$IW>GhlYJ9I?shE}EPgY)hv)VANHZ~cAxq2}@h@UkWHY2jfbRYr(5aHwJJ z!q_w%`VnSUqk_(=+LZs}@Z~LQH{0YY+@3KQ$iBwPrcp5CDB=25pfz8c)iTx&QE3`?ghwD8)yZtp)O4z0F&j zuTl6Qqyz37${lUVSjqk6dIDT|qnc5&Z?Lu*8*m-7v+f$f-8g9*e#p-A){6?na9@O? zF&(>=Q~B#BR!=)NFqZDA9FoYnm?qB&kEUJ$y{8KIlb6>u6IcQDu}-a}Yl=zdYW0degKfn>W~#;lX0wcw`mM z@rB70IX*(*B60=l>dR-81&~ZF7jf6eE#E30cYl0DC0Zh?C++-`kLX;)bz~-nwa%I& zNnkrT{%2Q>eADVfn(s)gIgf+Hb#6)a&z|8Pl%?^DM)Z(!ajfpd#uU$!FWZT$8%5c! zc{TDCpa*i6qMFa$dxM5gNTM2iY8fJwoF>Tn&BN`BIv|X^`-!pjYb>t#mfLLwH$_0! zx^;fynpb!~rR>y=qki>!%2xf;0pv7nyN=v}tV7dM4M=yYLk(AjQG)USF^GVMTKK{* zz4C$^i#|aW1MOqdUciT4so#dqB_MPLUuwjvaA01IqOVk)8osCb0O_0-@8{5*<5{cb zqK>Q1j~u-Gp}jyWpVENf=v)r4s@%5rNB_dZ-_%Op3&2>~O7esM0loy9 zQURFKhyQ@|X#ikd9X`kJJTQ(Rg+5@}k>19?iz2{I_PJ#c)@ITEJv82CdVWj*l*jgL z;2)qoD?s#u^4RY(cA%9Od5fFg%XR+Ed;!x%WdTIbh9v*PoGO5%y0`U3#P1c-28i}m ze~0&*Edb0(hyZ|!i4wp6gF@;6(N>Hmzwp@pLZN$sm7qi%<^8=vkbran(?V3{ALgVA ztc1Q!ve<3S@;9*f-$nijEd1XOoPQVjf6W``-<|xwyXN1W{EHj;*IoawUl#oT-d(2> zdSgzBS}-v)R1ic8j2cBfWROowFf?9iCpbF+`V(>24Dq0{PEXYowm}DoA5;+cf&C9y*|-@wEKn{RPxB{wM5_x+>soeJ6S>E zjSd$rTFFJ ztxtg?*2YGZUn!1jC3NS#!74u$p}|D?vZFgy#hOCYXrL&|)iyOsckpxX$I$swso940 z!`|3b${6tzUMkU@*t++QbSvkRT*2CxY@*O!977grL4;pE43T$`p%1??6yu_K32G!= zP>Jq73~r$~IDU+K1LZ`DX-5;$09Q>vRspB>4#)TGWy4Xuv=gNjo=kew#DAVhRg~yBc zJVli0Ywu$IMo47$ty=U*`@d&nAy{*WZ3J|(?h?uSmKM&w%szut-848Pw+PRYdwD%> zW0RUNlT0Pxy1Y!f!$rrxa&je}iqR5v2QBiKk03DIHa3G34AZzlv^xB3dJyeyFkYcf z(sc!ieEtUp&2{(2TIjk09SKM4_W2^c?Hj6JEDZSStxpRGN#gP9it%Daus~)&k8l3a zZJ|x?V-U`0BjMfNMI(DH{C3j=3Wo?lgO^*7OTaUSPd;>q{mqnsc{6z3TGLmwVgxiN zJS#@=ciphCV)wTbI#4{H^F+Hf z@Ciigcar{Y2@q}#!aJ?~X_S}x>kxwYKGz*20;c6`!8R%z(LvtAXn3cj!i^J`#}Rnz z#-Mg8>c{Bzc@#aK$M3f0LrE_m1N4nVX(IN1|1+n0t3{!&YU9NxK^n0PF=m&>jc&Eu zJ)icUkW#AGNaY+IPxe)-B*ZeQR`e1+Zixq~Wm!e(Pp|qkZ$$8AytQZ1<=c>;Jf#d2 zt5V-jtDIH|XAE|u6w-GMeT)1a<7T?w!(}S38(ifojdePH?1;NGHnK=3M}o)Hy-Yfr zPCkFW$8&2PoguAVgxXA=@N=W$5UGS!pau7OW3rSIz+qy~rIdTqA(taGfkzWRq5ZDkUI?G*ms-8tIku)=FV)qf>peeg z_U{$)FO8;Cdl{1=38SdDU5%~uOpVKM+>WZpi0=)+7LZ6*edJ0p{i0AoA@n8C$cSmV zIj0Mol{Xr3-D%PcfXr&Ez*7cbl09T{d4mDq3Caf_PC)MK-)iDy70OLW&zEQGBS+ZT z{@je&inppNFsd-@dwQMWF24g=lWpN>)o~5Oy-Ucf9Y-x|gYFU2gPFjVQ#NVOZM~H3 zR*$a}^}9ireX z88V9@?3nb7@+XmWjw}T#l?^O=bH}^T@C?U5MW?!T;iq9y3RvDI5sL9t$Qb?4?Xf*< zhev`Y8}(A+M~bZx#m@N^E4@?s-DxFe(H-Yle6_Y16Zw`Hn<^cAo6h>JdX(3i55T9o zyd3(10uV+>$jcg?^W*Q#BLzL|FR3h0C~>x7N|8&NW1<6|O+v7PaWE@`uk!jnjRLh`WCfC-@59#WW1bCf_ z)gd5tM~i6yXc0JSeNt1hQ>x z6-Iz&dJm*A>gVj1vDgf@p3TJ)-owHRdXed8H5w>Su$)VVpce>Xs*Pu*Yq@IB-rzLF z+YdS2o>QQhC*koBa@e|>vV`wh?MvSztjyLQ1~0WvTVI?-+4Rv6n!dKFPtI+y9wq2Y z;HiCKtf*4j8ztP6!LcTj#C=#TVm#-Osl(78BH%k#q$I2})$mpZ4c0hxvt(|;mA7g) z4A&G5B}ImQh%#vY!vpon0B(ZqNN`)MNCJTW?E17ljz0o7>b&*N$s+AfV1DAO4VTP&EwNDs8S{~~(Y`K~S zeetXBr^;{Ynre2`iM}%F7Lo&cNk@Lj^aRSzyh2dA@W|q#sx{JFT#{nCy}!2&ab8rjZxR(BrX=ME5{cp8ixg;zTwY^j#M`Ivvb2!$edpS7@b$@2 zw~;nN6}faY=np=kLxzgu8F>6LH3omc)$9KVh7a-4L@1W#OIggPn>lli*NRp zsi$~?my6)@L~ZgEj-?vW>_+#w9JaDa&bP&>h9`JD)Go**+A54nW>$b3L83ibI;F~p zHff+VgniHf2GE;jw@e)(tis^|ym;}XSj%DemaC0#Yor=3E3A&`xSMO0iRbl zJzVFMTw;SjmK=7*Vr@L|`|!(Q@{0k#uej4Ro6$N3ogkcA%Q zKB?@%(yVrF)WQPmuHa=GmFGO~g(%$HH*j^OARFV}J;~h_P<{*J9yEonwubr*j6|gy zMV4?v*xg6FrGnmbzrK2PYlrVL`YK#Hm23B1OUK&{gM(GNKOGk}ClFZs zz9QDFzNQe6nqnDv%9+6U*%auT%-~@unzvbAUy@5CoS3f$Y?j7{vNVWeOM1*hRN_lO zX`MH#$CFyj3OygkoKM_NoFNUhoCv%Wdhba6O6BKjzETZ!&rvPMq`Ho+jlgFab6z#t?kwQY2GM>^EcT@&=jea zm^I@00%NADGnAy8#h3NDIiHZG?O6T-HuK>2y^z z(6d{=WMJz=Ab?rS(RMtflWwc=)uiKGQ_4SW6t(;k>*jnw&0Y4z{ZW=kOwY{BxGe-? zvq;hxcPMGDiN*+h+m3iTUt$<^G%S(sx@eFQy7_5{U?{C1FTnF^eQl&@C^YloBD+Nu z67=mQY8=LClOP$ka)aFTOUuGM$7LP_5ojN9-AQ1E8?9;lmr+)kHxU_IM4P(k@M)#c zV|cKFQU9zza0$;YjHk1Df;qW&JV(;WEsX4ZJ3lPx1#^^uP1+euz~U9fdX8#kAxy#g z{G@T0w8gIGeB8M>hj-#r_zTO~x6fE!>TaIS=+CYsRTykK8Ya|1$O0}J+d&PQMc+5H z-VQD|qs6sCKc0ui_qFFR^4haKc1ZEjY$=5_WLLR1)z4a+I?KVqZ{QO3MxY9_>Zap) z?wAsI6*5$Ed}AZwF8*$#dT;@n2h>{o+tIf2wQIN^hiz8-x&NQ(W|lbMq8~0^K&NX` zQ=b_Gd`W8grmfg=`Eel-Wh=#^X?_{IpQjD{eEAqLZ_0BS;P_Obq4@x9)02c7cX$rh z$~lwk=tnl=qqoJG=sZv}3WG0{5Yq zD2^+0X)|cM;O|0ujKW*C1SmeLS0}Wl(b|n$4zIWU63pZHT%vgM0?3^X@Jx zAD?1FZ6OUw9TNPPjBN*V_PcabmJY9Eu;1wkX86T5pTsd~es2-mv6a}Wbv|5zYFN}ZuP*aY|v;)S$sYpu;wIH35{OnR)?KIrD8_K^c zOj~+we1U_}bXDgbIBR38I;5)(wo@;=XKx}odH)7t2jp-*0Jq9N^?DiGxncLt)Y*Vx zr4$BiF;yjVem8!|Q$~Hp`KgZi)}=wqiw`?D{N|!qb!bvG*f7mZe?!oD#l;4N`_u#ZiU6`5Sfr?4u03@ z3RUKud>quwdg?j&MA_N4wXWQ_qrru?dHDy>}r8!9j`iHulgzWMMy4lo4Vr` zr7Ik=(*PLnLE}@P4<;=iDmT4U;J8X#l)zio{9ItzVl-E0lJ!e$GMhu4{!lveJC{og zf@xw-=YTcG4;b-K<XXdv2(kJu%utUoSQQ4a6m zD_3cOC@Vv*!_D9)u00Ab!xjRi_a^XtKeNVygVs&)cbbLeXnIl{colg;pS@e<@3oca z1RL9{Xe9}U6foEbYrANz`M+kn7meBzp7u4PZ7I;D#Te{mgO4%}Gu>Kt^Sebcpg+<$-hjij z(9pO%RkAbZgS6+_r$uv;t_C`?jK4Nuk@ttx&OeZ@WGp1ZX|y+6Q$-!Pyg(?w=L1tZ zKQxx=m@>(X9BMg}TpAZRJ2xG!iXw14A?KwAI3JS=jFcoRiUcO$PUmttOHC%Nw-DgE zAeYP9&tfr5itjf9w5p6O!Q43S`Wf zJMGk3KBVt@DDO?EAdpRmJ+nJPGFWV$78yrS&X<;6!pG23+2Oa@C}UJ2$0UB)O@e;5 zJ{Oo$<3_h@{*6Mw9F{Zs8pnE6MI1BSlG$CsR!rbNF}K=WOl=|m7Y6*DCK(qye^yLb&Cf!3jpKTf~I$RB4)y3D>SP~+R}NZ$p&>Ogo7w(wYpQu8Qr z;zRQ>6A=d6jf&+PzFI>rCBhTK8egNFlrfLi-i(fnStq{Uz zin=GXv9ey)&uOwR;`bC=IX3s>hAC7t$glcwt+buH4Rmq15F1)4cYV5)YuNEUeDIy6 z2TnENQ-ORZZ!JO17_xk;TCq;QnY$W}i|BpZ$6wcX(FsCFyCc3{=}z&=p#^5RH@}lj z<)vJO7ML1t6U*-vA|$W3W7_F;?vOMjX{JgWjWDEwjE2Wx1}>%S)C|SK+QPr!kPE3(J7?%yA>a z|18hFf0pEI_R=x;>~m}{CxRt=Oi~l4eF}_jFK}GQ?9+bcL+&=hJ7?*oWiT5aTEI>^ zO$~Zwx3yp8R(Y=O{;fpE_`qQ)V?=^P!P9lYuOKNg;NFcmkK<0XgQlyqpK@$O?Ucf} z;C;)CeJ|hjqj}}Gx#>6zqZ+EuXn(S%%5-DXT%;%rPD0^?&W$aEKCyO#l-ztn4 zaxyQK&fpWXT$Zcjr%%fcq`0GdXSBTtxO1ehYA@%|a6M3&x6@%e?wQi0Iykk(slK&3 zkTm3J(Fjq@8&fFsaP20xe|*zLB*!Z8(OrD(E67DtWUoM_X7g#yT;OB=E0J_hV{jfn zZRw1QQr)Hl5jVJHFc;?4CXu^2slxhYr^qVLo4$vavQejB1 z?~AB`G4mp3(+NK;Mc%zeGJm~C00EaN2II0B%syi-U3Sg)q49u{x}L=E)?WLqqMzt5s@*(zSb<4w=J5*U9l3OEjc|t6T(wuAdyVMl zl+DaO&TqRL>*<==%EwQ+FX|*l`4o0!T+S@hxQvU*%G#+e+lY_(PKg?2;HBCQsm7IB z2}xx`?5dk`poo^2_NlE#*R(V*9es3uSGTZZZ2Y z_GFPJkZVxxPs~=l00{u!^oAB>7;=wv?ktf~h*-c+c&gD)+06Hs+-Ffe8c%oPdemr- zRgsGxovplnROE&`pkAkFnWx`&u)|{!v*@e!_1Fxy&5#lQ_(YgyKHu|3c`XEWA|IaN5!`IlA?CMq{LH z>Te-h=P%Qr>sIl1~aPBL8JWi?yyFpQ-HdHxrl=QwVx1bAlgU*y|RdGk?~O` z2x;2=Td}#(RAw%%D@yHgd&op$R+v{oDvn?(Qlo5Us>o9O%xA zk6IyYU~M0(r6Ez7Xpy@P|7S|+N$FQz-DNrIyUqOSvsLf-*2i|N-I)~Ia4}xJrxe@0 zb5pYXrYXzuREZT)hdB)AK1y~V=-z{?OLEVHAjfay^xv+u$X==`Lyj$?f8cD#L%7r% zMSSj{dKREslm^ls>a;LcJ{q4e7Z9J3Z8mzfuu7nnO3?}lQpNmY5!14nP#>XVIJ`{S zaGq-Ke@JqgjIq%LdGNx#ZDbOIa15E>vxpZYabw7+a%y=VR`(Y$94UVb2H)koE9DpX zbgqm5yY3ya$ z%OxNYJgzy-Xok4{F!lk*mMPbkuYvK}hViC)0%DH#xazD1G5OqQT-9DNo$C%`n`Ug- zr#LYonzz|(Uts6JTHE*~RV+kGTV&3V?PRy}s{|%yMxk0z8be zd(X!1e4EzX+=(91eUx3cR1eN8oEF_s2Onctb9-VY;xW|+r*-)<2_LV`?=Bw>?jJR_ zXT|SfhwN9ghT6rTXjv?Fp&@jyJ<5BM`0~8bPZXV=wc!g#nmH%_S#w0E-C`%ZKArfjDL)!xGm^PEiq^;F2Hv@< zah?#hjik$)FC`DjO_Yv|7Ab`47?bR!T%cU~>FrZ~?nViXe;Uz<&m^U|O^?UTOP`?a z5=`<6-iVoc8m!@kC&d{1)GFtw*u7|;{dimEV7s*AATK%D`Dlzh9n?~q_I>JmfGxrs zFwiTqT?^Lev~%^SqklR;{Tw?KVA>mxHVeVD_^PyDsDnFACcuSgG>T=DBEU^Fr7^D_ zkRYyZzu?n45d`g}6Ey4ZnDNb#hVHjsj3B8$YQtcjrE^1`jhQIX%Es2s41A84w~pW3 zp}SN>LU-wNtXt=fELEYRwY98DIy;6&b=0`|@&hBQo~}taauMFrId@q*1cr>OEIl{t z(S9wN>7&A6TIa+(atSqB+#IcL;SXv|#Mkvn8VQ5l@xXu!^}J-xQC+@wvFFRNVNC7^ zU*eu3H*RA~<+pLwie*)Q(;XoVwsEumYUOQTRHk!Dgrx)~DOGOl5mKMO3n$Vo57NPi zU$pv~RDY&-*2ZeuWnzC-$`USGp@ST*lOdd`g)M`RcwM@nZr9z3WP058q`UuM+%iQ! zn7=%6_+nhDYG@JwwoN}HiaawvaKx@46stU^>z^GWR0sPtQZHAuhr^vJd$P@o{0OZd zGMTf@(*A6{i1z`3B6nfF9OeDI<@9Kl4wuQElueI0TD-S+u8ZE+pyiDGIH>`<#D}vV zOAEg%M>as-$pP;b%J(PZv6Y?v=njnu^T|OG6$cUv;dcDd-ZnCVcSPFl(bqV^ui&LJ zbu79$8=re*QI(s+t98&gDZmwe_EAX$76Lks&IeN>Md~v?Iomf_=2gC|ZEPB}4)9Df zl+6zDF$@jx9f{SDQwDACF1!-l`xz;$SVWY&#d8Y<4q$G|tg@fspF+1wp`PCI5xLJE zji!Xma}NHT@>Ue2hhK2&<(PBzPFz*ia+-(e2Twz4l`0&0^I%VyK>?#;7DE{7FOF6_ zzJkF+$3UgCX!YzE>v(hzgBC>WS=S3AU=oIqC0`(4XiiOz3i?nHz^A~51j$E(bxDkm8fjg@qUX;R zUW~T$@C~b7O7Od#1*sj^DpB27*XR&gN!Fmek zoabgC5jKEO=M@FUxfV?r7ptgthwIo2k!Y_e)txp9wxjoB(zV{`*A%O+HdR>oR+qQv zunN9cw^vdq3i&*M3U@nR{I)sTEhRyNJ`)4M-*=R3l+>Ro>Z7xeVHyOSrqMHZ41p=PtCs^Dl!(z?e zhti~8SXEz!4w~b`&GA^P*h>9#VxpJb=a^LJ@wq7H5$!8IF4Jm5O+ySpUYWvXV>X=9$ZstK(DTDc6-) zI#OmPK6;_)PdY@nrHOCupMZn<&v4_xFjU5Q)^IB(5E!|~yW>sYM(sRCJoWJY+Y!AS ztFtqX(RS@LZY}f`C}Y_`8f#20-P!Y(Ni#kMpF|yr$BUTfxNFAylB?J0c|7JzYCCk_ zZ{Td?#qftIL&C->Hq2VKY~|^GE)dU?ViI0EaT9@fP(E5*k)xY}_8DKvyRxJMPnQXv z5IDc%gg!vi2d8q|tC5w)Ej-3Xk%U=6q)8JNv>#y9>f9yVwOTW;?FCTzBa5?Mzp^h@ zz5`P&UsDv4)|L->|;JTQIe`ID3cCQiKY$zB?8p z^Oq=rEAb;cVfLe1c0I0PVRo4s*bm4lzwP!jh2ZrHn1V$e0SK6$TwzeRuikcz#TYJ< zAi|ntMRL~yQSu>cq4vh(n~H}_c;EI|f8xS|(@%hAFww;rX3#U4LZ!>nbS0JTLcy` zYG=gYk1KN^k;YB)F~Fq`cR<2rvR&=V_~23ygQ+`kJhO->k@d678yoJBh_WDv9r`A} z+#Q5448U9x0NiD&X4p{u@qXwtOqa}vhb(q+HR6>jxV3o?8ul9>OS*uNO4VMd=or@l zjgKM8LD0V2(7d}tTy@UP)9T(Zv~Gc%^KiMQg=8+{Sgln;1$N!@`xs7sR*4Jz3j59I z2Q$NZ7yNz&9kM<#{C4WBOtOkQxg$0^S6u4?R{5nVnshTJU(>aAvt^qHi>!7X-;4>7 znvPi}1DABq3C}fe$EP5NCQ+QF#A`tMg?$(nQ7s|0vaG$?E%^d2?{HUPxbL|xYV3P< zDz@@2#Uxb?)$;#IkSH2@hd9!1R@B5Vh8uy$%#`LVXISAOp` z(zZ}+-*>K8Dp{Csl0aktQM8~a0Whf5t54C1_6jH6o5+mLdKWD*I%kwDNc-{IjlDou z0gt8&N)!~03U&)bZXy+`Crh?#^H8Z#+W#gosz2YJLt6m%OXRg30ll8Wxgsxq2YbSu z{1UkVMc3XMH668?=>ns`^-GaL_xkgz_!3wjo~G-1-9sjhdw-VY;=Z?)k#OELqg6Yu z;f_z&L7&m%W5iWaLIZh)><|fGZVqGuRMfSh8;0X#{`4Q-od8O8mHUxAQ^lj}r82ih z#rx`1K3C<-&5dj~IbADbAtvNoa76~AZR0bllP|=si+HNZph&+jTJ`CyeW1fv?DAtr zC72pkO5k?3HeoEn%^%DNOCz9%j)BiildxJRKZe)zS+ctTL?=g<;NE!S`jr!HE}fj# z-Q~*e0Q^G=giz$>e7LbmTRczs4Z?4k=~ljP%H{Oq_R68RY8>X&U_X(>{b`LPe%J8% zZgS}|chf%J>+8-w1t0vlmia1Ck8}GLk59)UN0kpuF7CRg6U^#KpLxv4xgZ-J&bdt! z#2b?5QhMz5(b=5sS=xl+zInD?wV;H2q+1Na@HC_K>sZNIi`#9##M4SXt4wEG{gxqn z@zoqs$giP#!8fNscsf@@j_ko#>(L^!G$znQ?^>u%SAP&fa{H11W}Nm68lM>MbDpyH zdRQiZVqy55Nfs&P(s=Dt^0BqJd7|!&Fe3K+E)^}j)Lm4Z7D8tA92Jx9_q?Ynb?WI( zO-Te~y6#i5z@2WWm54|lK|QEAP?Y@f=*1fj*Augm5_fe-9e%i|3HPgmkSd*E-no-^ z0P@sdq67&MBsnB}CXABxT&N0IrNwweCM(_G&^v7p^i-RIFbFUO@UYi#N(CObOl6h! z)E{GDKT%#%yQstMlFQ)N%W~yU1JW)ZpyoPz)b)tlu{8BU>zQR6?)g#ZM}!9zZuM*~ zTla@?br-%yRa|UW)0&D+qH84NF>FpZ)PEqN27j>2dpdh2q01 zW(Whw<(aMT&)l2hJaUy-EfRZr!8oRExMt~|Clb!tHlhk?)EVF-=fa`TX*RmPwigv0B^0Y7;3l9& z^1kirrY|=wF#7Fh|Gykq)N5}a(bbSt=P`nnUO`H&aB@DTOV6oo_UM_z4kN>L z{XFM8_x0zNGw7lk!M|a zWj$9bQHxu5OsD?c`PPfOS^5E(k`;=;8~vJiNnW_wWqAM*&T~!a)hUlFhp__8y=5!m z-oQ*^nXdc$u44eB*b9p9#eB~P9II{(?bv}_8tYDOv1fcx*QU9n;+`Kpl|u;g&fxFB zYgU?ZaF)MMem;Olm-_JoYP`iIed=NRGp|?U@(@h#w-yMAE!>XYOiDX$^Xm2DyW##i z>+!zkNn!om)%Q;J6j+^`XU}A^5}pMh^HV0)u76W-*q$=oJu%|}5=A6g4;3(G`U}Il znyeS=O#*pe+>{K5k}^Kj=E+FT7R#@di}Y-b3C;wmQomKOvGDWufK7o)S@d>y(k)(?wKr8%|aFfT}Y?C zgz~RU!GA#FE*9He{jEjnw&QL3g|hrLGzDA)1ygmyE0E^QCb=ND`=)8;bO8v7 z+dxQUp!3PWd-;$l?X4s+U7}9#L+1m+*jZ;C??t()u!JXpr_e>j7HZ}N7$3k3X(7t> zMeMe@H#xd-oP1_hp7cIwyxEk)B#HXI+hMgryg{AA%v}$pwWfOU_%aW=U{J6VxZ8N- zQz`GkThaQf3P027Lssml(+PJSqh&{)*rU3HR=ERwE}IYD1uk>6>B#T55CGExTAT<) z?G~gNkrkXYpb#pS+|Mj3aAZ2^wAD(@pydFxT;BKgFaV-LXDpl<^^eMHS;)qgQR6&D zXe8)wzZo{cc12QJ33Oe?7JyTK?v|6a*|nC>6Z~mxpn{1KCC%fhj}23oC)ZE0)Q}u8 z-akUV>{MLv7~*9xkb+U*5X*S+JWgd@&XtxsU>omjqnn;6bk;N0XbEq{@*)L48~d@Z zu#y0r$ePnN7ZV1H_|G1_t6THGdw0h_HqLfumTNWNCKId@JhEV^t`#9qLdc|A8q-SZ zr<&<)=O5Sn6>J@$#6lZ@Yn)cR|F) z&5~)cHtBCRj#uTYAH|}9zmak}H`I)9Ts8>+E^ONlFi=nq1n6IyTzIHP+=<}T5k)^G z&K1~lOH0M0<=1*tWjQCrO0SRP5$1iGU%q=q!>GTizMGhT0i@4$)m|6@D1{RftSY#o zZ?yq3MSjmU4%e=0U2e00Q_rH~U!bG$Z7u0V)^?{-q&!PL`z__R$Yov{&v;cSqRYaA z?TyT>c4UkKhQ$`>>*Gj^+FP#MAKt+8*bJWda5_ z-o9!i`*HKG+WFEt!%naHyp}0>lPSksk{Z19;8*7>Te~gLyY{=*8%Cqq*vIUtmzony zsx+gb{jf#yH9Jr5l9@ZtzciNgVk1Z{wyX4&nOtl;nslQqabe-Cm(@4;t@Nd-d#Ld4 zw-3+c6^=F`bcM$2821D4vox73zpicqpZd#jY{H`#z{?v1$!b=00(T&YAr7+rm^9&! z@4pheW}aUuGE)b?mP99FS84wwRwz;F*ANaUNBY_vBh?__+& z8290{@ZBaHGAHYcW103bD4zD8Vg&~X?{NsQpE#5_g#$3U?r;t%s=GZ%04?t!6X}z1$Fp23$y?`xH9X zX`^|mC7(76Jl$X@2EsWLXkc3Vlg@gC^u~OIk{@9c z_kjJA^zEC9rki29(os62_9`k+yV2^1b7dE(e`mH@OjQm3$<3Wii6Fpv73jD+<$fE| zu99YLh)JFO{LbN(Pfp6F?u+SG-v6+I+v$MP#4X^9HxiX>YYByD8{3lR1NneV^!Q$0 z)0!<@Pf9KLs}wtb1yBD*K7p&%p6h}X!4hJnWAv(A{Jpl|uD1?FarW29=<)F0?44_? zLz~I%+6IeGAZxumO4;V_ctk7B=tC2!yPv=h;GN@H?v#Gy?GpW-=c(st{7>DxBnB?q zA27{nU@NdQPNe;*!4WnfiWt{BPSq$%{V^)%#@UZL>$%;4XXZ3`F4IqEHI!B>RoSyq z&}Bc^JJjd^z3PIlo&~o~G^nx}#h;j~{pg0&!i6&!`SPyfD+8$a$nP?psZ_e)uL{4_ zZ&C}RnTEf+VdUag-y1*M^Tj{*;Q*yRSI7<3A8K~5aPN_w%Jy4L<8c(tTX-0`c}w^p zjA{2#88xI~LRzU(17K{~Lqpy_t&zg}8!k{ZtIq>BE0R3;Nzk?P<1~o^Xf;^O?0;ZX z|4l;|7rV9I&x3{v3J<)+0t@2&*d?#}=cx$cvZDBMFr|+G8J9AqbNo?ZanRUCKch>I z<$@XCpB$tA<0}g$-!heD92g(RO`~-xxxArPCkyrjpr5|$0tnZ7F8uieCRSDo>r(+5 zo9{o#*cRU^GXDU0{`=>8AOQ9pjZT^wn^0LPXa+Mn)OPO57gbwGq{tpCC^)_&hLbOpN|H+0?{$PimvO@Gh=>ZLj# z&Z1Sl6m;ddAZ-CfRaG*{#|z)k|5D(8F{r@+_)(=}6@w&?>s?oIEE+Of&{=>%r9GsS zwK~aA;8GlljWGO>sT$;Lt4H(R2xscBpwm+A$LD*qqJN{S3nCO`0CO9I1)9B9qp{l0 z!Bwe`=oL+}$2dN+qEYNy@6ZUM^FML!M3$;<4c8y~>~C6!`gO6)7PsZZAWFbfF7qLX z`8Q)kjR2(XojvD36G^xu0^KZp+O`qyx4t;c{INbyAHjsUZtGlnskS5&loWj#Z2#oH zruT;(pr!x3;fgbbhCQc`6{Y6%K0EWI-Z}k@>#5(^ zJ48NeWMJcn3Z?s-tROUC0iL&g5&HYu{O2#JIDi}R!(IMAw-U|W5AL(b6IKNTbUweQKFn{jM&R4%k|L?EQDFM@_O;knw zo7Mkk;9htDQ#S4G`@dqsU()~U!QP3$yovC>F!}xEntwS80eo$}Ri7;V^ZWn&Bthw0 zKn~52Khl|NJDJ+j;Bx zB>DfpEaodfeIsZz-TyC7`qylJV=#X#!@p+pmpT4jMgN-3-*ximU;Z_lf6eAspZsr- z`cJ(6+b{nHsb8(-KPvjyZ2qH)zy0#B+5G>`YzEOZY$OQk*&Ma{;YpN4>ex6%`(F%y?wdr#Jdq>ep$a_Ny**~gbN;kF`cFKA zA`WNa71OHy!ya~qGC9x_+#nhtxi8NisJJauTCwk9MoZ1TRc15n&DSHj-i{;@Z~+Zw z@X+n=6JBL6?Tkq;bXU}FZdlG8Ig2DeAWV48(wC}Q+M6V1^S!_5@JcK=Zl(n$JliHN(IP8e$puuZX|@%{R0pd z4ji<#3k!L0J5>s9Bw%Esl3EICa)_{6Bb2WGA3Czu^>1vwBD#gDm=v?KE53eAAQfN?+lB8&+DM@FL7t)DWfRx((t10bmpNy2I_ zNly|x)n8(45SruyCcQ1rtaLZ_ zOv|2#nc6slhQqN@iQC49NM*QWg?oH|3cUm`k)=P#*g zIJZ*69olcDKDw2vax2wzsB-RBYJgXaw^kLS>wXs@G{Y0V-jKK=yW6nTm-x)8rAGqj z`KGk$i^?a%MhiB$qN{{|*8k#;NKP4;Wl+S!Ta`52s^k`Q zQJlY_2_=kksV0xx=5go;PT|B?wAR?lGbc52}jMUH|-QChTbPWUF8(**Y^L?JrdjAE_`mE&-t~HBu-}gED?7h!E z`?@Z*%NyUdaJ}9gX@uj2IDG1&dhr3?(>nPy>uE*D*N~vqfmro{8y79;LbdEV8;Blg z5N~%9bfc{uP4>x}7!*f^cX+T3x@5O^bqU!TrsxV@uZEfhc6c>NWw{odNZR*OJ|~V4)0@kd z*(5N(D@?^Am&BiDIGdUz$UY=KXd?um zyXgTX@<;$*efA>pjYm%3d%{b&WN^J>yafAMEWiP9CCCEw%F(jJIp<1^9E12 z7a1oT>e;NXSHu6#=jv#k46#k-&Fu1zq=%~~TU=*sjv?Y1(`KjHX0uv?G93jTs}XJ+ zlJENSeh|GIY?@c)&(8X_yuc__W5d)>&gVRqRprHs-yX3-CYy`-93YXnxU07iCRp#o z&(nk~et&>Ugk2!h{bQ_a-*+k}F(9GS2(Fu=ZnL$SLShUB9A~0GQOsEvs= zcGx%~GI%60W{k=`$zgFbA!;Y!w7J?W!~X5Ma72l}5%{`6=shH;TIjtwRE`8a0Lw@D zwL7X>yEhEtqtH^gv~$q3q*C^AiVJf6u68vlteUn~UuV{Su1e-;ZGcsYd_0!fb#*AQ zFX=c(G4*oBpX{bSc5W+L<5lcF4s0=%S4eA(zgXqPpXu{qvo4i)(GygwAM3!_csp9` zBsfUsMITwLhZMxGY`^fw-PvQHdvCvm#oU2}pfBi|({!B@7%|WMyi19o*FmOgJ6ZQg z34oLfbZWPBz8&6$om|a+V;HsGpxTF2I3d!mf;skOg}^tvu(|~9|G>qeD_NcQmyg_L z)D9DDHHE-?Q7?NZ6Xv^*5=XGD6UYrpwH~%`PXyb_Uo1dEN%Nlx z?ON1U`w3jGO`_DAbdt?YHgkpmT%ttISE`(7_#5klny;0|xD$>-Y_RAMasK;ngg@2* zxX3QG$Lh5-zs?=!iTnd!vV5a6fByBEkQ@oCUOwy)TfJ@3&!d0@yfj+j7~NGp4r2_Q zdAEN8Hc&eT^P{Nc>&-NkUwPp1l|X1@3~8X(`NF7c1+>a+V=^L@F9K5)Hd5Yy4oT`n z=Y7uKJF>#%I6M^kpUVBeCbKt2lsepUR^4s^utr$|YFf_)a9HzvYvlJ8HD;{IQ-b*P z(LgiJ@X@O`)PUbV0mA!N(Uli3rwDc7SStrISolHU2)<~F(t54j5a*$v2Ba1ik*+AV zoHI0C*9UpgDU@vB7aJLKG9HQT*a6O1#xzYZc|gZAqqx@)z%&Bw0?!rJ2PU#F?z(k`OCqGIoQ}M(19)Im|G{O~Hx#o(3pE-Hj=`yvn?gp;K(WN~sjc7a7G;$T*$F9F z3m1GQ{gjU6S~zi|p46HdH(6x?mtH*pvv{`ceAlKOhh(F4i>uVfcUXfA)e#{1215KqX6>kh;pl5DU*z zQFfm)Mo%9VnUUtVch+|Nf31zj+hjP;vRJ75CMcw@L^{3vF^h{5vsP6eKrJC0>cROM zL;FE6K1oz`uFBSq{ZDyaBfoj|?=_LG21hVmKdim>UPVSaySSpLCUL{+1-}$ZVK3Ez zYv_a=rwS#=(Ck4#$F`(W7*Z0T1ofyVq`6db5gC_Te||1)wk&`QIB`Ai^T(9K9&?u;&l9|A1jtV zkh?}~u-3IFB7w3qV`6L-W9Y>AYk3Uy18Oi@)3P( zgH{=6z_DWgoA!)Wlq>c98 z>SK)ZRoiKdZ|JoeYL-;gA(vKeyJN^o84iDZsVUV>IKc7-+Fb%eR9T{&J@Ox?<#!Wh zb-lVR+~M`Ju24MjR;cA1`Ju$jq*;-m?>;PZ^^64VLhF*5BIl@m?E2(!E7=DVo{^_^ z3y0TnLAY_OW0$jVk5>2I5IdeREk!tgvd!W@Z7x-((sKJJXz1}>tZ?<*Z_*p~!>1#U1DQg#3zTIdIJNERNhj!$i`6tUbCXEG`&vcEdh z+nPJp8H6IQ7V>0-wvOEiYf(V2Df>FnlK)lgBeN=hn`_>Bur)IP3eVM5Wm<*?$($V^ zQ+anhRMITl;JIvi)N78&aqgixgCMdv-3OhglvBVEMIVXxlk$DVx(%i zpkHjJz5_WE&LIUrEvuQIN>lRN#}q+#8l`QnApuVF;NqlS2UrYo!!G)fsDpJc9&Ct)?#zZDN`_D&qzzufVT)cSr>3=$`0RM8fXgc?m3{Rt8-Z|UsI3Uzre3Ug0!a5BB z_nUlH0-Aqf%9Vpm1`sluGzBjP&$jm;T`N)|-70*Xy-&Yg{)biI@c_P1`1F1$Mz7~& zV`=bvqdnfkNbI8{(ji6%A+f~YYB-WtcLV}2rN^*e&#gQHH6D9gTs%XGJ0hG^b9121 zeDvk#wcckDVyj#5`{tPyl!}COQTNM?11*}Y(w?N)c|9x9U_DeINap$Y?82`j=;;3T zj45@(UBK=F0(Li)u$^9O4WmFRD`NWmJx+|rxChzj=SRCcSq3FyEw2B<8x%5h9<0d1 z>DPMWzV|Oe+4t&2*19M37deo|VS-xA`$EE9eVs-<Wojo+oeg9g_l)=R z5yeRHo<;q)dc)IQ{bw6SP*xSubHB&mYXE{CAD3c)Z-M7MF+?)McDB}vUYqNznu@(> zzxFs>8vUnwONII2Puoww52V?JiJz9hz0RYzXb@f(1cd|Gb{9>CiQHw*cpkfEu<3pj z%a||UwrzZ^*QISf(3IyeY%^oxe?_kS%O;l<#HLsKHsXBOwU5^l;=VZ{G^}7w={9;7 zXXts@oCSF~_dK|;IBignq{V#-e3CF$kmMA#m}#UwblSr8&nnZ_L}A1=0G(!}0L=Xb z0T6-KDNi&6zuA9uOYd{Jv^bg=;p4%@i5nk3CN@+|B;|Q_YgRt!-J^;4Cor7t-A|K@uh=vd)TaufiFM8wR0KxojZf%&U2AIN2iP+(S=2qu)(jR-b?<3*j#ewF-~Sf&O6;SfXVVCCvA_0-exgc}MyT;?_PJ^6 zIaN;Xta{t^!y)pC!k2eof$5(&Wd&2kSnXx>MJM8zwMJ`UZ~PBk$wW-PEPUcw-R^L! zj{O2FD|ntKW*}Jgz4LTu6Bbt;EAy&|9v#y^(1MyrbI;UbdGfY4w`x#zbu{0q74HvG zpc4F!=s<=y=5Wt*u`pO(i>Y^js;8jr7k3^iF9yEsV<3!``z$MIso!HX4zb$!kYY7= zuipI$sMyf&u)~%d!Gaj0P0E>Qb&-_B zr$4_>e`MlXg;ZZ)v^vQ~Y-{46swrHY!%Kn)6 zp5D7(PTm5YYV{N@Qc1fh@T;_7hRerdGs};@Jv|L#!#J4vrstz2Exl-J9O7dlzmoAI zbNrrTp{;^)K7cD3$yD%VEy_eu)gf!ZBf(ie#J*F0z$9j&JCfR6thI~a)?{Wz;$Ju{ z^L-l1TO&>*ebSw}U<6TLfsP`|3y>2^>0JeRdGTh8vUH&R3-9>8MA7$6eu2Lh-ZpAf zT0PH@d`om}_)_?BV7ASheJ8J$vq7;#HJBDp#2RDCyq8xnZH@iftZjKCv8(T8a<*PB z`0C~O<*|Q(&SMV|cC5(s+T5guN&k!ThWaxI>zg?B^8*0>=%e51h5x*4zs2ckC0DcA zSqk6UF*`iU>y!{*%DD4>QLXdC&c?Z-vVDkyY$FMaA%h-#mLYdc>bw+rj_t7)7<@k$ z01eECHA}jtkq7ymUA}!elrp4MaB4Rh8MsXy^ajvP)eai%Q zN({~EuTke>X{1R$A?ad{ouUg41Z&Xs^@VQ*zVV?DA_lt0H1Exbs+!W z;z~f@tOfsWPWou}rmMFkd6+nK2?3ClgPnDU|*P*-XqB{DuJU(XY z=HJl&WHmY$^kY|k2IS5++~Sg`*m)c&pydVi$i zY33E8dudQfy`M3lEr!HAW7!{(u)Sd+C$@uv=UqBOsnrfbA8`zhEpcQW?f zbAQFAt^$GY{7t5slg4XPKrD(|w7<~`6aE!0^{l{AIS`ye*UuIl;&Iu|@b%jA^C}`! zIc%zG8cs;6=Bq|YSWMQ&t0&z;p)>~uzswgD9Z=YIWy&ItOK$U2enH5s{erJEIoNP` zdGV|sd=$jSTZh?U^gizR@~T(B~Lkxn|!tasVt7#+lBg*uTQ0$!*0Pn6;z@=qiS!iO~)Dd zP)`c$%7I3zfZunGj5=xIu9Y-ZDL8SzPfl%CFxBM_v}$}B8PZ&VcG*gXh*$tRnlIf- z?Bn5bLHvNk-lTugwy=x=bhd8a_zb(9k7dRr@cdr?F5#I?PMWE%b8ratsqn9u#x1Nw zZl46@eN?l@9USBXc);XMb{&}0mOZ}Y>jr;6;- zWwpF^vpRar+xO|NTTXt!;$JHAwQL80f`>jD2f6z1!GU@sCVZC2xEg;)8*xvRqrX!Lr%H3TgD+|n=+>N zNEFLb_SJD;bYyi!IG!CM$?nLzIOB*-YR(9f;%(WW7%2zeA`R#VyyL z<5Rk+bSNZ~pCgx9rn5*3wl_SBzxB5yJx>%R&He@ubz5{g=1G}YH&4V72 z2`K9o__#GE-2-{vqhk$+JQPEKWq0mes+K{*-9?1^ol9jiO0S&-Tq>r5Vnine$SM5` za15)YdHSJxRcL2X8_Fa$iJz2U)3^{@8O&ouXvhFBf7Ws@*D$)P-*NDS=lJckTk}kF z6^$a_CzWFRcuVzL_Hn`lubr=|%Pkn`-cz4q`&tFKPGVjmY?gB%WdnJDpwx3_VgFXD z9!VVT49858n3kWMI9?xu)bfd9$&@GL zm;KZ^m~lsqpP7X|-Oy&fjVF?NM0*2=L4$O+d=%al#%#CStVaCD-Y7XU($n@4sG~Lt zi+=(5K74*%Waor{snw_DU)FD(gR1EsZ)g{7lNV7UMQ!opyz)O`dx5xH$RA0z1KhX6 z(=zI1CmX~QXA%a;axJ+Wp0?Ni`$gS#P!!hdu;Wu2pCm_VLYA*9PVZ}_CrvG0;@ZjS z>xrr1i_;qGUjK)X!Zrs}>sAze{ZXSZ*?7%nW20AaM2HZ901oYBbO~gCdQE%VLlV9~ zCB1A8a2mZL1xUh;Z91`j_ijyfIlASYbE#V$0~9_E@u8x!i%2{mMNLgJj4(Uv)#2L% zplfrzE-L2)cNCU)=nK!}L(q#W?x733U>P%gB76HM1J8aZC!c#B&vr~b-H&(!mCc*Z z3%;ZF6RP_~W%hRaF2za{tYn{b~r^ ztc%)C%`$1>RsB{jX#Cq6aNQ>z2i*PqK5~yqMr8o7viAEOcVOxUK8NXo>#NhV7&ENm zg|L0`XOvfJ{*8jUj1@B<#F&a)>)u{|4eX%IAxkv=$g`?huABOT;95zz$n^{C2B#*{ z1j`zZp}-`6&JdKoN`TN*18~EjX`IS24PD_rGq41)MIzSW>!iT!rk#3L(1h)nTG>z% z6%ZysGL)05{%8~DWvfBAZB}VCuO?$&GnIYDe`&mqWoXJ@TZ7$0s-9(~sF;9ydzvk0ZB{Z@U2%K*D&1luF{L?Lk?P)^Tb-q@AZ2}<^W;rag zpm&vs`WqV)A(C^s6)pUg68Z6+ODg~Dw`t`|*lgvm8F|job`;TVVgVa5-_$`$^oAPS zsPKGhgEm<3wWJ$|t{lL;!gO`2wxwd%0D9ydZ1QM=^n{Y}l`SZBui*0{;26+v%J;hs z__L;tx8!qqgiJJ|ULSx`?Yq13M=J%{q)kQh&6}|QJT7!aAEz7X+C}EUj6*Au zc#D`*)7~X~X*`2iwB-27=6JM>C@y#Y)CDX;iea;~qe}KRe_FA3B zPttj|%+jV+5wBWx__qtLRt`VlDMC+vghT8P-;9wuJ0gHJUbtx<#v%qX3Vv!^q33j< zx;=H^>^t+M+a*A~Wu*TZj>p3%ez91hm#Zsxfu~H|n3+fQ!xHwH+?czYBZW>ejfr z+(Wq8u*CHk{#*&?1t^LC;^R_CuoHFB#fPzi_cH9RL8!AB*5^eg{uxZmmNJ^YU*3!) zpd~q>nXsF|Y4znIvExU-wFl;mwM9U2D*%mNLDYazeibwWen$l7xq-3V)?Co}r z+;EDL;g-@@*I_=`MfAe12lpB&3O}_nM&klOWb8Ss-`k4C`-h)31p>B-w#ZxvhaOP=H8{Z!`XuMH7R_T9$MVN1_JB&rpXBoeJn(RNLnM}c>Qv-0Nt|--$2J&8 z4!6_;{d(JSIATDpFi0i!GO3i|a&BXFW}j=hNhnpB=2L{jg^Bd%)tCeS!sSb6&NG)0 zOMjBMze|pmyJF~Bte716CagU;47;>QFaTKDMW~xIk$!zS%1gpHGu#n@6GQRqi*8 zeKBKA|LrjwhkM2}3VXB52l&~0z!r}YonHZ-DS~BoUDfD>KvEE72gWpNa9$;Iknzhw) zZ#8;V^l8NB^WSC9hx9Uu$zMD)(d8%kJOh+H2u*QBF22RG7-}GPfXely?WpD8A<#UL z|Big>Zg;mn`p@pRt6*<=ytdYxtZkSsDpC5@As)dBBG^6us^PO_DByAwLqfXyYpPQq zg4D?iEuQi(nTI_=S=9BWfBMcg1-=bS&ZnLF(WTWHY8G}kF@5kPr(^tH#1vWWzsr1sg6h>WrlZZ2h5m;*OHM= z0bgAH{?3U5dFR_Q?zdDzD|T^f~bTfA{-hw=e0DH@4}8!6lH>{QovdzJ-E6k zhwqluTTmNm1Ilh19%d{fC1kuzit`z#Z05u1C&0^F>Vak)PQxSIE#!?#KmMY-3Lsj9 z;Kq6N!fl839eD0o!Sl5Xv3|;D#;M%N2AtGHivh=;=!pxiR?iEvKqpz^qt$_zy&H_6 z{ARpd2y8)4%f2JVWz>i`ckm3y;gcP0%g6(M4kc>nK-q^NTrUvyiK$V`5_yYWSyk@u zSvZ%HkN@5PKvb+#>l_~b3HsJQb8+wP0H3d3S+KxkvaOhSY#GSTiZo#c$lNyUYm?=B zYmYawZkvCE@@LZe_PD%KzuEK2t=W4=&>V;xXRMyt({`j^>PSuumAXInlQe-hNyL|T zYXV-pf1mOsji7`$PwcOMsrJX(KG*j?VswpO$|^3Czqu0@cz-`O`i1N3X_G}gz|C9t zJdb6TzuicaAA9Z8GD6~-wmG9vV~=8ce|-gX_EEdKtGLF(nm_B?vT=SxX_YE z0Dn7MNNoL6jBEIzkjw#zY?;cUw&CXUV0?q@+&4+F?Wy{+-$hl+az(;Zxs}#g^^;x+ zcD~|b{5CGh$r43eF zar}mb3BOt2yX%SJIMsm`aR-&+uGU%=%f1AOOM8DMXJxu29gE=#fVgRq-G?I$$vjtF zWSzX_E7*CyHwLlX7%<3x8awEN{io~jtBUy?zyP!aXvZjoWYClKLx%!?!603Of=K?* z=w9~0l8^1ewoe!9t1x4jNa5FSvipUAi+B^dYH=OaWhFM2JO;%?CK|t^{Oh{g53AbV zFrZ<<>dWQl^5~OqPcO+$`eV@O%zhlPR=+P^08~O9^Sz8PBS4qErN}iGi$u(Tehj1g zRSK~x@vwMCMO-oO?HI)rK?PCd5p#D5NkR)n_gS$XP7$xt!AsDesSlzmy+N`2T*IqK z2Qj%FWn8D%B0@p-7sUID6}fdDBkECyR9X;#Aw#Q=0B`2Y5I`oomZr@~yz^beJfj73a<=9FDNwrpiV#WMN3vR#00&}JqtjSZXr_|MZqi}N%{ppDx% ziiudgp4j28gD!v%43Po@=(D>_=uWmNj@NdS^xPW{-#p8k=O+cYM6yOD%XAA!@Mo^~ zv})xqM;cU8gQ^R(hr8aC`A9x2>Bl?1i>-kc;~~rnO}W0?ejMYE;L$Yz^G7PmWk%)I z6&kZ`t0`tH$F)w*DUo#ssqJhkPMjCri1w+(c*tw-iV!Cq7PUY#9Ov4hn%-z?w07e6 z4nP)N&zA2kq}oLKT34Q}_Xjm}jAu?xyfjtVTmNkDW>9w`3tuBVGca}*0YFbQy> zuiNxUcD7@&txo_IWm}kB(Y6D6k-}Y}mL`MF^^RFZmlb7KEr0olNjED4zTbxa5RExZ zMVtBSb08Dn`HXfWJ|{Izr1t)-0aIahl%03G<84o&uMWT(Q@#0#1&e@+sc@YB1K61v zR7Zv6x2Y6s8QBA>38Phz?js)CqO;Jtvzwgji1^q~+u0s1g?O872Zy1D&jM$`pPqw0 z&E~drY7ajn#h{=F$e!gO8o|PYceZ_w2;MErplZFHttaGX?75Cu5&IHieBWl|2_`ei z?#3;P6#|n;wLS0)I>|1m%#>6HLq?|D6Zkg>o)GHzVkPq>ke*gXHRkphG7%bWAEm@Q zy=%U4eBsI`lz1eJu=?G1ee}e@jF95Z)Z)%2Vtc`iWM#1~yfkJ<08|d1ztY+CP$Q;& z*mTh#J)|ahbv+s{B{3p$f}5MVhwu)v@L2QY72CJkmS9c;8$P*cf7MKpzK!?BE2iIMwj zLB!OAH+@^4HO;ADqJ1#cu5-`W*2GTab5uIsNQzr$Hynse&}}D(9kfShV^-+aoBdF$ zm8X|w*;dYF{&TNi23xtwW-zseX0CibngZ)+O$*533b?`!WAo&4yk)j)R@=d>B-!?; z%aS7tcb{KsV^wY=)VsuDd71$Sw`bSDDUzAr;mJ>wA|&Jdv3j|EQ5|iiXP1N%`Mph* z^P6jWj{R@MmKVPKc>JXK-DImSa!kp+N$Lueo;D8&i#TxMHryT0bSlf=R|r?kuYnE5 zl)CB~YBs_g7Kf8~oYuZ*Jdz&JugT{1olr~`Jx#7E)bL2DqLA@3xe(OVY*w4+Ks0O# z(uk{%GC9c2<>GNa zZpPDNAL7d7mh-{FqjY}R%$DTBz;kZloE{g6QA*>#ty91GAUxXCJ>}o&qy&ycQd;rvF4p9hg+?^F5zp!w1`PKat0P-Pwu--{_SN2;{9yWNUog z$S#a~$gMN29#h#5aLRh;4sL_6rI#_BOaOh^0B_^2o1Y!bi`3N^7j6>*QZm z44WC@{w`OS6*fay!dyWe`GnhRdz$pRFLi5Tyuw(Y=9^14o9ObG&FT1V)(ZBFk6mdh z&bz$c4BdyKdzKVUDj;HlqP-Pe9O|+9db`uf(J^_>cecTN+gHigN?K(s7pOnc zD-dTzR)G=GFgIx<>P?V-OvK zC3iEJc0|xo+SixD%v+l~PQ2BQN=mlDqh?KwkIihA6FK{RsB}4?F%z1RIsQ%Xn!z-G zCH<_mp6I&WZ*IiYey2*Vw0zb&Z+3F0YbI~7ZBDD?7NZEMTmv088DxfO&*kyv@{_|Y zDJLJm1fjop6w97hn+xP=6V5usQ$4P1u?x)HF!U0z;hk__BS2S~pEFlHdKlGnvc2Op zlaTDhBk)oE*IKK^^S8-$KXVz@wuX_2gD~~32sH@xwr=5m{O zHwP$}?WSb=Z7D-<(c!>^VPP~9Ha$YZ0$Q3sCbRuf11ptFPFnC}J6$MH^0k|mFD~N5 zWR0I83+?n&A0v<&aDF&fQ>i_4(F_%toq> zkbQkMf^!;n%dw@^UB|NMo37uJvgf0fYUjysoX?JW()Ubw(^{;>Se1W%Qpq_w&0xnT z$NB+cEXGN|BLZk$hhHy1BOGsT=snUyPWMuO(abtdLQsuLUa8v?`DgFPQJP9~q~`T} zcs-h_z8S`|icM{|V0A=@rZNPr=`x1`+23X)sl7qX^;-&lI}zqWfYnuk-xTJZs&UVd zl}+2AI#tQ7u}T!&*m~lp9k0#?of{lm*g zBUfSh!`l8l;Ys0zgTbV0f6>)SkjFQ|dXMf3=-8v@-Qd?*uRTFi&ot6XRn3Htj1I`l1iW~Bp$es1r+*E~r-u3bPr(S;i${YvKZTYiQx2rqkP`q);=Qy@=a zMhxLeF??fyd&Q$-Ta(B<(7Jl z8&LNm67kK@;I(f`uUe{)Q^hr2&DcZI!`{|Qflw&wRxaqR(j`I_asL zSAU3_4K`kUft+;b4)?wgC>TXg3F;p2_28U-zStzcrd_bIst28ry%&%~48$R~nq4wL z1Nyhz2a2+kgzY)`?1FPOs@wrhB239Uap=#%_a?hr3u-l>*@DU@Gn8`jlyAnpfLOFupQR>JyiG~ zb#B#6^tq6yoxLbU6X@M|iI_Mb%bH~>K#4lz1-e~H<#!%e1B|ANT`9ej-qt9}=%6vU zIFsW+yu)0(U}<9jt6|k|(RA+eGkhh2e^h5bC)_(X3D(3p=L)_o=RwCY2W%yA`jaQ{ zYd{oEl?{-Xz^olAp(!SdN~oL{U8}U!?5bZV{053=O08V~>O2!Upq0c1j}!j)A-HYF zrUXkX0}v#l7DuGKo@Yf-l<`4hCE`kpOMDI!b2FtA)1KJ3rXwLWUWe?}ddA`5e+84p zF))7-G8d`c?VE^Vz#=MvCGX>H2m*eMQOqz1886m3)~Xpp@p&A@W3VbP82MrsbW>c% z`ju5gkGtLWr}2#DC|%Z_;q`p~MlXLUJoe) z4LYU8D+32Fx3H5rAb2f331b2se>K)$DSkkfwO-qr+U;A}O2$IyRYie;dUH#DGojvvo>AJpTXXK!!hj4TEJ(`aoqMdXqZ|bOt%w5y$RxA#>ceqy^|*o0O2{) zXw{DopcMD6+}tsKz7TKbl&_XB{Acz5ze>zRpKF0RPDsD^gyz3$o4>*(D*LMD5J~!9 zgwN41Oxb{;*BS41{8wdkT6y3rGBqexG5_n(3HZR!m$>4G|5eZZa}@Aiug0;wCH|Y} zI_)|zv>6m#)$hN)*Z+R<|E}`?8utI#bpijom;Z+c{QtfCYR2W(FUe<}{1L`FTa?OU zT@g55s7*tPQSby*cS4ASEDP&#`zEm_9F?=`yvglx0HOVBGf|l2Jht}CFnfny_NFLQ z@%!hVG+ON5m^TIQ1F{w3XG+6JXRj~a_Ht<~r~Y&_0eqNK0_Zr>+CX%5-}#h6_d&c5 zW1G+3oVmC*n8&8RrnY`1AvWy~iDvWsE_=#NVn6EI0cqC#=jVP>D&60VLX{MgoY&gA zcotli!wh@zj_f<0)`9DQirCcGEp_{DgaUmOb49>C^z6q#I`!Hn{4OUjf23MgaXNvm zi~7`iQN6B$Y9~=nHgC$0oug^8nvR0?Xd0tR^sb8hPj2)&m?TkHHDB#gXR`q6bwA|k zwdgJAsIcE_&oD5q*(A*1&xB#W3Mk{#Uja4u=2ad>_Q$-DNwF z4~pj|>Wuz2ZdQYMjlI40=sezHm1>pi6VPhq8|YiKRU38qK-|*>se1u}48ViKHD7f| z+zU7&23gixPHZDY{P+&0X&S8YtsWP#H0kzkEKEz&#;N6)$NbittF%KB6tTO-A*yT2%+(UQ$b z0dmnex4rM47({u*=*-2(gp0x2^UWK|K8?yTp^_S`s-t`tJ!l(Raq=^7VngzW7+d7}$lt7v#k+Byp`^)j&sl!az zN`f&X!P6TnG6k0)C&!}?U?IwV#s>*Et^FJ$*?E6I1ZS0D$Rd-~&+{OH5#EL&RQvs9 zvE~;f**v4?w6)tJ>-9>g6C+i+7?2h2;}Mk=)q5y(1OHdVnAsa!+;E>cHtT)S<;3F= z{~s;@^U7!Fj?!JgTMcN?kckTS$mAhaB2^m?@%=0(+mVp~k&fC5zxP@u}i!helgzb=q&MgM2q z$LO{C5yZ^LKcmFipdG-0bB#ZivkUm~l^rm$g#pBz8W=fHTnQs4PREiAxd&S;=umHh zLPl>D;#l%y_X<6RIOkp=zj=z-%|dzXCK_Uyb#ja?R5$;hx~{Rr2qKBUDT<((<{$fs zDHlK6ak0TWgEgW0p{Ucz5z3<9vfcaI4amgZE+*F>Q&fOQl>|KMi-I55oIi};4m$eI z+TPW{R%oib7nPZsBqLXT;_cRNIMyh(eLCB^6@^8Jp^Ea5B|AdQZ^iQ^g05Ov zw+Yz8Nrb6`F06t{J$Jf2LbN8{2-0Tgf4o=p^&tg!S)&0Jc-EIsuNs1_XPNlv4An=o zTMS#QPIhXVwM22(zl9Cf9SdGr)=)Spl~k&Y#yV_`0HloYv}8fQ{giaY=)~M|Kewd+SAuxyv3sT z_#ZLS`vPX91T4?^M`rWNfz*kz`@Z1H?50~2^&A!0?0y~l#ypSh37^pAMSxb5;eEHk zTyXcn_nOu7MYjbwh}p4mJPzC;vvD=w$ag43;r=JmbT0`Nli%E{B4&C8z9>P`R18lp z>gQzVrP`B{tE$=c;17z<*UbAAe#QLdxXTLe5+Tfj7@hJS9pM-6eA>x{__wqMgXg-f zrgGCY2|n2bNy1cohn}(Yfmh~#YEn%00B0~fU}ox3{Kxh3vrl}WnlmOJNnWYeca;d~ zPClG~c}#5EQwf!<>*lf44Bnh)D_t#;o;CP_uFXeo>0`_))C|x{IL2emgaCglPdj`1 ztH2@XJek|ko{@@g=xoj**Z8}E?+SnY@d7rne~6L&vA^C-#|9w}WH^pZ z`5!XXD)7mn9GQXV9*@IZ!guKOIhQ3U9eRH+UMB4E&I9VPMS`iOkipgufN!)=4m2WCymVP+{bpeEGswq%C&D%agft~bG-q~ z%}E_QrP-Nk!}s`#FAdB*ieJW--kVG~Di(4L7azmKtEHy#GgcAWK9G9M`f@zh7wK@j z9F_vio}sPn^$+(o9<$vRS?c4`Qz?TsMC0DuTZ_uhs$G|%Wt%1zWqYCk(%_4=XcRDS z>B8k~z`PaR&D*I`{rT93Y+V))p{1cLTB*8+lZKU*^FD1na!%t~&74fc*8v}h@1`hM zf(|WPRHS~^<$lBo_9E<@j`ui(hS|b=N+376Q)2{n;k)M_iQ@KEHcdSq5FQ85+xXs| zUy>5Sa?|}e4$};a%36=kmZa)Y}HD4So08{HMn|F(P_=~|pz}RQS-$rDd|}LAoiSF8-|aAZ zBr+m*4($d-9LZ(%Co@dq4J?dQ((G3d@NrThgXRh;01SalWskShwb2o@Pvt zVIIj7`SMn2bDK)2E>56w6zZ1mNS@?pzTc4UwqKFXn!XL6%5kvcTXYgZ-XIYUH-W*^ zTBRpxHAdA^dZS)}ix9rdk$$D@hd25e`_09A)hF}HPx_Mf9P?aLe@o4ljMQbPb6h2N zt5;BS&9`OgN)*Fq3V8d!jVIZ8^TZ9>mW+@6F(R3+Lcj+FN)vmG2zv2xU!`(hJexHg z{RSa&ZC+9zAvqrk@|pecoZu8e<=q!57fnC!ef(^v>dF?7lQo)ck)=wWKxP<<>`tM{ zUhIuas6XgK>u9SYw#@urx?92j&~ar?f}o)V5c`qUC~&j`(Ikj6j4vWU{nyV zj#~ebt&J%P$H)2Za*bFsB_lKBJcbhtJC%;zQSOCL9dv$72Dy{HCj$bGNq;h5bviU@ zrvxRo`ZPn+PD5!To1GDkgy!_4E;rmm@~afP#(4~j1J_wWOQrE)q#L}W4qmfci-UPj z+zDvD+9`s9CIL^X7$8~Fg$KIXXbc6nWVfM?wURY;EgpgjPS-j%00jg8b#Kz_RQ=q; ziryJ1&xqr}x227Taj8BNmmMsQ!{?fxjYJcf#$ArOiFfEfi1*)Rk2;H0zJ%B;9_ppd zQ}z3jxpJ(W*=zB3rV*mfxm)NtAJQ16RplRoUQfAMFW$FD%zJ_*3baD!^F2Z>@!{76 z!*Yalby>K!ZI`s+=2=I^*&+)L(jf|?37PPrZ{?e>JRAM~dAoe9)meEBXqI4E`# zNeDOCz>0w*0}gLcIZE0UT{ORXo2W&PbZ-wvN)Z8~v_FH%Rd&C3rJV$+7VajEU*xqB z){yjTpUrO4xkTZAdQg8-AV1$X4lylc<@IcdQ`y^GQ90eoy%}n!OLm+aU${Mx5<1Ij z66SgT9@cp7E|EG7ZvG)Vts|ve#mYC^{$xxuiPNLbz;*52D28O+FhA7TfA)+BTk?N%d(D&t;7)`J-pM_ZSCtx zGS1f@8TURVJM})39`h2syZ58=T#<&o+Y^^)MpFB=Q+r(ux8JB4Ss&qfI1RIeNW{x~ zC|oYa8%?_0CsOO`B!|rjW-3G&c zrHoBfGWY9h%!qyi|Frkz|4_E?-yso6Nfd?ZL)nuo z*`>{%u^VFzF&Nv}hoZPumh4MHSsME`h8eq1$v%v2j3r^lmTfSY=W^el@9TNpJ>Ngz z`5`}e;bqR5>%5NhIF9qZoL6GsO>s=faZS3;NPfgO_r~QOEqv6POa=9MJEM#b>k z_CIxkXyLKTB(i%kXZm}?<-|f4QUHHslNBakaLUvQ7 zDM`AW4I@t^3J(_Ja}EYe&n3MCiiu&w|58jmn%8@%e3ihXLwB|Uv1%6i6#_xNOgFfB+=iw4Vfh0yP66C*atL(3IXmh_A1kv`JaL>56CR|}i zxhqW+ekQZ_gcFLbcoSRr87@emjn?~YtxObXE%8MlqO20lE5GetVE2Aw}Y2wh~ zA0Z5fJZ&KHZ+sXkmwrHZYDIo;vX67#dDH3w{yKaUd}G;fTo*mLc+|;Z?Ax8qfldy( zzF%0f$AW%Wza5dGiysIcWqu@~U z>jCc$bZ*j}`2^!yVxhP4BQy=?K;kKrSWg3C$oD*Sjkr7TME~(^A8W6CZI}Mg&)Me? ztAF7ry-M(Ms@Ls0{6qiC1gA~a?H|?37b4#Iyjw=&SEh#&tVBHmNak(v7Cm0knpbDo zfi2(%HL0>rV@9!Fm4hdPTMwhzeJ8wWZ}R%MwfC-S;p2cylJpe+TB|PJXaGE}x&6kF zXXV`UBa*iIsGyUVW4TV7Z7bVl6BZn!4Dn!8pkYy%&!RiG$72CaajC4(Uk-`~KVD^P z$lHtt(GHegsGnzl3~Si1CWftKd?oHBUsH5%iazkpctw}J8uT;_f^b{^d{O=8<4zN8 z{mTy*Q$LMn>ge+ZHfG_}x`qa-3IRQqA+%9Sj^L7D8D}7Ft_No~QHQc1&fUQ? zs+L{!PJp%a4=|n_f|rpaAk|31%@tYfRu6R_dceGH_Ne*RNSZ~0^6vMIQtSkXTtR%F z4||xDcpY`ah_&@0U(lFiZ>Bxi9TUZ-^797magj+5kkYOdWSl_x)Gdyk9WLz_2{`~0 zbV6Yb2mD~t;ke7|?qF>*)MEoVc}AP?U-(wKE`g@NDIaN}b|Er?PCixL#BS zFM0Z&*-Oc9) z(8an>KImBEjMOye@qcwi2s)lg$+{H2NfDRbI=8;CszZnmgIoL0T%q0BYSk_q>9bAQ zYk%p^zob~&6wbSvEhz`C6qWsw4X!}$p4md??)Z0&rc5Jmhcz#`jn?QS&Re@M z$8^cpXG}8YmK&Hf_uLv81AY@IVr+HiG(9ut5#)KZ$;IvRX|ol-W?SD8^@d=wD!hFX zYie!ySnM4Rq#-Vw$Z2He`SSe02{C;>r7w&!iaSy)l9=EZ;uh8N6&Vq1ykqvI+5v+} zCY$11w||<-Bel07?qZ73ki1eff)H*NxxGvfE53%vmO#xpNC3)2KBeHksay$Lav8c* zI5j`lX3Vw}mBRNlELUO-=*O_EZ*EtZT89qmDPli}VkRX`n%7%S#*QalD`P#q7+NG6 z(B}YFudR}-z3nWf$`)KgSDsWEo$}LkXCsoN0bxkG&5Wkcoz4|-KI4BHZK2I(md|?A z&qrJi#^;S<7sxwd&?avBpw7ml527kx(;;(N?k5j%4$9x(<;Mo%4A#Fx%WO->q`Pbmq;DmZ2I*1 zY(0El@yiuty$~?l@`q`|#IR?A_zS%){}clcWr+s@#f8u23};t=wfWUA@<_e&-y6NC zT`{{oKK9ZnYlWz~`^I~j2y002B#ebzTN5=_d1auV_f51Dz~);4gq!Q9X-SD8LvmMH z`8&d$ZHuNd*0Kw>`bt}t10nE=f%~GVy1~5!U%lg)R&lWeS*?wSeJC~YiJDP|tb1`H zW-`IHMih*$WwsGJJWyaU=yP^1Q;sEla&Oblt4xILeeL3@3xVZojIz65Bc!(0t`!ux8;Dy0~AidN1EZQ~E3PdrPcJ z=97ciJ6|QW4|3EB`LdI6VfV$&6T|iwu29#WhbM&9gfLPx=F8UiBZH0&GX%wjMSjHv zvQC=5?qgcznWvrdQ=JSf64#yGyfHUEHwiOh3c=lLzBTbmbABqyz4a|F>fUYdTE{b{ z17<-kX_>;#pO`W;(yQ+!q}(`hvTmE+!cH@)WCOq zynuQT+XS<1lT_(#%DeLTHm8;gOX!tclX%RE$V-M~o)ZtOwep2fcM(4(!3S%6k`3z^ zTB{NHg=&>F!hJz|Aab>C2n`xadFJxAjI&3-79SD1(aXPo2W)G15~5?a8Qx;e$^o|XdOHtsc~ z^gLeLTnc#dpy?e2!k{I8`vbCADr z-nD>8k|-$cB()Yud_^v{diSo`TNmJpugTW$`~v=4Skv-tE0 z+!!BL&eGwi9cUJ`HTp`^?e@oYxIIi4yVp0Ux5?#VTj><55|ZiBd<)ViH)2qWA~B{| zE@CU7w}jwB_}%zY2gmXfp^!d?>Vb0Yvg+$uh!%9Z5g^9nUHNvw)o^Ha$`(W&P%%u{ zrjop4ciTy%G6|ZN%MkSiYs6O_O?Cnx>x$3t{Kb&5)I*mU4p%;}XoF1Fb(zjhEY)_L zLfZ1ITu>aN1&sdH-w1X(%1QLxv_O(AzZx?;#9(22+Wzw|_)$1{HQ$nl;TqgiF4Gn} zVl0Wn3$+-=*=Z625%J~%LSs0p1LsxwlJyG}>UhqN;Dhg(N4KGUi=@^jTk5&RGN0^=@EADZ%b^_1sYoo^8;s`X-=V`k^)IBk|s<&YdvSwLh< z_HL^#cr?jR%-YN&0@DKxbG2FWMLregs03~zpLQ31o!G03qD!jOm3`|TYH2@!EI&ne z{MD5+ma(tTG8c*11dK8|Q6*_IS4dXw?`CzJ``$*5wjcs`(SDObv|QUex7-ZDT?|d2 z%S{)yj0)?$E&+Y64nQ^7h764(7cN6ND-)syN$Zn2OO?!~xRhvbN_`>_nE`?u#kc^sW z2x_kdPtQg$B$M*X1ia+uc*{@q7G9dvufb!S`@6N}ZPFBCocx2}8KKirOb1tW!=wS2 zxbgl{H{Mt$-DfcgKLUt%Ps8H4oLs}WQ~!a`H=3{4$F1|YmG{7TPtM)m}r z>$;FwAxjzG@fI98q0#f)i{R7N-K`Jh8DEk^D)f zD(n`^kFCLKKqv@O*)jH~5?e{3&WcL+pAQL(7Q176AU~BbPxcV2g(~fa$l3m*tvpS^LC-VeskiQiDMyjy<9|Q?Co$O0l zsXrwBZ5r3(HyY9c5le(B7^Zu_QoUz;iXmtZukPXXIj>E}o_M%6D>hzW+Ag1b(JyZO z1U{AX%poA%{BV+UBhb>g`PPmrF2#Ag?yFB&jsl2P+I3;EX7Kd*o~?A)12!c2-rTC) zo`a|%GA2Qsy*?5k+W$v)?*mLyv;TLLk`e2d&i8>G5{-7b?D9%J`0jl35Shkd^LfY; zx|g&36gsf)73NrvM?&?};KJRF`3#H41O{W_$7U z;a7=;h!oN%2?|S0&z^G(GRV@U`IeHq8L0;71gjV?57ATdu8UfHXH;JVn31Ipv4#$Q zAutq+3qBsoy?zsk0H_mck78{`)v~KDeqEmqEg*dC=2E5T!P0t5C1w+AMwcfpNBj&w zt4u`44+Y5Bti@@Syfh@>=BnUDCfnbjupJW%k#EKLM`iS)R~s-at~63gE3! zm30=Qc?94X9w?iGI^VFDolC;EZ+4mF1Mmo#&z~KTH;Mw?arNZk)9FZ>2edJJu}P?_ z|A!((&P-LR##;VSoP-T>YDhYEa-QM5w9}ytfO@B8Ncn&UX ze*Op-?8af8uJ)Bym{Jxd3kH6io?ia81(%}N;<;*rTYHTt(nJOG$nO|sC>&r#G#^-n zyv^`%u_v~Y;ts=g>{s*hZ60Wgo4yuVR(*7|>AD`%qz#{iyx9=d+A=J&Ql>FRuHO!A z39E+Wgzvk^hueH&jqc|;&Jj*786LdJG`muNUFuwIvI9@+-Bn@dQ`*rE$lMB%`R@m# zY<^w^tFKDQt4Hu0aWM97DN|HtZopZ~!663wPUWDT(VoHLWM2ahU0lW}<^_{R#S1Jv z(uuiuSWt;sF9OZxXOExXUAp5v@Kcg-w^VA;$P8db+t2)SYv)cthO2lVvQb7urt?f) z7s|h}Y1E9_Nn=R^tosVF06_)u9r|R)S=F(9gfgUw{XZx}GEm)FC&K+=HpIJbPbOPv zBa19*86$mx`+Sk_%OMF0Tbt$1=W_c#lmf=H;VG{N8O7RmF?$$Zw=#lnMGw?xlkP+K zY5oN;jgeX${mkL!ZXDk!zf}{d6DD!|2g3~cVKpq1!@kX+-II+M*;M}n4KNnIRQ_Fd zQO}=iJe*LxVja8{O@!a{)PoBV4E(%M9sO^rA}=8 zr?!#AX%hvW0$p)BvW3X)>?7Nb>-Qnk)Cix)qEg+HrBXKDnnA0LMVSFE?K>r9bzEiz zn6!~L=g#FCDYn7v#1-)2T{JttsC(Jb4PMwij|%K}%5_AVOo0;GpAMwzXtrfMy}(gY zWZu>*kG5N{p7I9KRUrZ|gPiY*AbODPxgZQz@))s=GBE?b4s~@tz$z}kuy+MG+z_Ri z_Sd1h-;OpMnu?mc*Mhr;k3;vq_sRe*-=Yw8Fr+nM<;8LW@>id_wzDtl-V3Dqg&488VDt>XBr0Oe8<8{~Wvb*1*}5fu?zQGBvr zf@oQ@UgcoO&+tHt_dMlBrkmK#=wDUO zL-rS)d~u%F#{p}UgIq1_SW+ZT?Sb2IF&VDpFjK9E{Go-2wf@dS_}WJ&Gy*VDi8wk$ z-)#n|3n=>tvtgG+g4WaK!;ANb7~8s?`YP9<)64t*RxLU+)6eouPIVNVf)!u%?2NuR z9bUz9sSqA*F?xgT&Z7J{tc-9#_BqE7e@Yy73yRM83mKKMd%%tFE_1qi6PeZ~6J86# zBOSev{_`u4fD%5P%hS%%bM#v554R-=-{aLz{0;afvgH$MWIexPYLImLYRxP2rg@q7pR~xBTK)iA zLl@3PQ{TD20W+SD<@I=CcdEl?M&bMwW&xKkoRCPFE8w_&r;S~WgEzw#$v{?}Q)lf2 zERN)+KEt9en@8LY{CdD%;sxdH9>%zfOLn^P)i5Z?HguE?TH@?Ie>} z=wZP7%d%7Q)D`NFiVPX^*bYAv7NY45=Hh1+@iqN5V-Mw>2PC5xkv_}OvmEmdRr$Kh zC*=+cNDtH}HDVQN2Ao26YCE$6lN&a&DElF`9sVam{#K#`%8U7+T3}Z(X1&uj31~+u zSSOw&@nAHpmDu_Eh(xMOgO4>QGyn&ZZQ?2!+j06Dyk2Z&tlG0*Iv_L}W9aO_1&ot< z1}IakLl;g<mF9{k_`h!0 z?Fn^dltVUiWw_Zf#K_~&$;_`Zc*^TH!*Xf}!?gKutH{OaUR1dui>lAl2e9s?5+hzZ z?IaE9WD(@+Q2;cl(xg!*7|l&9GizuCTgo1^dup;QblTC%3cfz~1vFdkF~(1ZP@3Fz zTKJ1+U6v1Yki4bD_LS>No*KYLQ1sL5?XI#Wcc8*O&2SqS3Z#tS$FSy8Ng#$X;9#Q8 zA)O_K6=g~+T5d7~kGmXVNKqpcQ5*BK^FXzFTj&^N-%aMh?&K2;_?=|n^u>sv@^@|- zuJzA_Z%lX&8KVmMS8q;?Rbbc?f|f7|{Oo#EqjJkx-w?n}V$yq15V>r4g5N$JVJe=3 zyKFvdK8!MYq$YeRyQh8bJrkcSV`ZCjw_c{}_b**KS=Y2~S&<@Zwkh}7meRNnHGg$Yy3 zH{@Xc_;e{_To_iJrFXj|aw(GVLgk$5{+F@33U#>T*N*V3dc{`0K!VX>FJg%wB@+}HS17ZY$ z#)*q04Q>XNTwL`d93MlOx6g|aY6TbRlPz{%*vD)?FZQ`VP@B5iNC~myc<#~cMalH5 z_U@4%h2>|D0h?USrMo3-D|K5Bh=JPbgW)k379_J#XUh|#9aV?LzAVN|EUETp%rkw8 z0g`KgWs&!NfkyFZlf`BX^dB-ccyV=3_A9;ll_+N84f+3u<4)R z)oLcl<11hhAF3<0v}n}<`)hcfAl%7beXu>*cA(f5l28^Z%5nh$5Iwn}3D3bm(kzet zbTI7HX3MlCt1ElG$W>FBOHb7=6|ufZ<0xA6UcsDA*_G7bVAYW1{CuiZ2G~7XKxUbl zvLHOIADxKTZkXVc5MI{4v#{4NuBQkHtXPLwXq<95jXsb;S6DCG0X1ZvK+&O_ z^3fF6kGi3Kh>Km%MzP;9)Np$C#w54=L7kM!=K~6 zC|*9Xl!`BH<3q#b!u_$D)u-L*Xs^*C<_^c6c~FKjG+#GsPj}3?P(g1j+U&1m^bX=j z0aOTs;=|M+O-b8}@bcBumwF;B5TCXpL736m&1cz6jHqYS`4vL^i7~wE`vwZJ7iTQ? z$mJkgN=k10i3ahdu6jODwmQ)tKx*sLK`_d5ag@c<;ObK%hv0}wQ=9!r!~R&)@}{@M zCSA2n@*<#AbM>nm7S|$K9O8C@_Q^WZx=d9pq8AF%iVX+La|*C&AJALuj}UvtKEF;6 z_shi=Wct719ru2HPtLfZVEAGip$)^gHbgIlV!glSk?8C=;DMg?fmgajZu=75JFTOOMqfaG=D$|RI_OK{$f16{t@>M&sa)l-gj1pPaDc@P=Uv*pwp~tw$n2aMV8SY zd5*IUS$J1w#LYGSirsUMMe;W7{O~8P%gQWVwptyU=i4aRZmrrP9h_hzjpvjm^{E7{ zkUC$m))>SE4>&1=mgy^A)G4>Bqs4&KG)n{(eO+N)w=Fvw{pP>r(hzTPkQx#4U38fX z4Ze6_>0Te}TQbYOJSb}42Wt5SCsC!?F@`n8O~Bo)kZRRKR1)K!JL zd%C8J5_rZnWf{#ENTra;Dyn|ye9@9rGvlmLjAN{~y=%T+eiE}8CYeI5O-^CjT3NAp zx2;oMIjrZidh^JhzID4|u~7zPb`2 z&@UtT$VF52S6i%GE%mM#st3rFTK{CqA7085+fv~NCDk*pYPKyhC#t~O;3=W_!FiNL zLyFT^zlnj0P2APy0 zQ3lg< z2jA+SgLS65cYJ@hk?tHiS6(l{l=!Wssb)Qu9E}zlQRs=@O5f`pmMN7+P1aI!9#(Q) zIX$1^II~h6_z+y8!9)|Bq&4mjVP)9w!q z_2y)83rzMLi)`p206&a;f8zjjW`3(7*{W|W3Txn54S?v-x8$nRu9T4XoyM5)4*{&) z)t3FeBPwkW;%1(Ogm8UHsjdqfcu+V7pPDY~Kx0nCs+Hnry(FD<=TuFbeu|q^7>_wW zMdGsOK-7?QUiQ7t)hb6R{Ol9LNf@B`G5;T(wC5S17QfLLowewYKb|;afgb&q*&B`S zs8eV|72`)rwRr3|Iph?#6M7rtmd_k2_mqi5*`e=Zo;fU?xp5N^W3Lscso?GVL|A{! z;(g}0hHKh*G%3=91;Gy9A%%g(J;5=CcT&UvR>JGZq#A>I@Sa?KB>jdoII})f6BgIb zv`$?GX4l}N@Al&4+>&j|SP?Bm`ueq=vUjO=C=R+VGh?}zKv;+zUvJ6_G z^U1D5pql_x%1uYFH3u?m?{)&9qB~8mz_2B@Bl|(N`^iL7pd}i2D58tp*Mh%m(&`We zw7PBpI*)=dTmn3nc9MO*o!5%38unzD+dUmaar>v*E0|)L)imAf$#UgKP6NlT!?ye; z5ASumA;>pV87rM zb&Q@7@KuVX#$6TwYy&S(R!1$Hp{%m^BlQ;`$Ua@|5Ykrq)@Pb zsD4IG01HITl1lw+z^H($ZPp_1wO!hx$Lr{#RgZ2+i zm~pcNU-AfGdU(f9n+r>}#$~n|#8(53oN!_;+zEwAR5Xq`?qa*LtfZrJ;YIvzj*1#s*_`Hy7ORuaOT4vv-5!4;>e#|hZood z4Ce}09yi%ugEnjd^7ZLkjY=fGQnf*t%lXcGO7IUchO7pgyuorUJ-13xrSwD|JGI4F z!KSe{_!W2)cLCq~rvJKXr}Upkuok>G42D`pn2#ZRy-CUaUEeLQvNJa!OgcZ{7@Nu$DiN&Y-ssH2eB_ME%p2ecZeY_>TbYj4dJyxf~?%mmL7M1w|e%Fpo;p3wA!F-Q5cK|fw z4UAyu1z6@kJ?p5pEY;u!*O)6p`<^iR4O*&xxFZNQ;CRd$N969Qx?fc=8Yavy?dXsw z5?Ox~*35~~M=z>#fPzDIla^r7JZJ00JiCP-?UHBOfy8Y|U5vc=#@yU4^mL?JBEYyl zK+1k$J#wcUMc%)@NpS;57i^q+HuGQS)xZDYj3BUb4GP)T!~XsEe}+)|4g_e&{_*Uc z-v<5X*L#}43?{mS|MQXm{kA~-djmsDsnXq0`TZmP{b~ci=XNn~OyD11+VA6kwgiU$ zu7B)R?Em_?{#^2J6ZYpW|10Q$==&4c|6X=~*79#lhWStW`hCs($=JWe$e$whTUGy4 zmw)R={&ZjeE;GmI|Fj~%r`12L$We^ky?}jl>==EI<~?JZzbpd e Date: Fri, 6 Sep 2024 19:27:13 +0200 Subject: [PATCH 19/88] readme link fix - broken link fix --- docs/UseOverviewGuide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/UseOverviewGuide.md b/docs/UseOverviewGuide.md index 6d619c7f65..778b5d2c0e 100644 --- a/docs/UseOverviewGuide.md +++ b/docs/UseOverviewGuide.md @@ -4,7 +4,7 @@ Below we will first outline what you need to get started, the different ways you can use DeepLabCut, and then the full workflow. Note, we highly recommend you also read and follow our [Nature Protocols paper](https://www.nature.com/articles/s41596-019-0176-0), which is (still) fully relevant to standard DeepLabCut. ```{Hint} -💡📚 If you are new to Python and DeepLabCut, you might consider checking our [beginner guide](https://deeplabcut.github.io/DeepLabCut/docs/beginners-guide.html) once you are ready to jump into using the DeepLabCut App! +💡📚 If you are new to Python and DeepLabCut, you might consider checking our [beginner guide](https://deeplabcut.github.io/DeepLabCut/docs/beginner-guides/beginners-guide.html) once you are ready to jump into using the DeepLabCut App! ``` From c7f20bd9795f64850fac5081064c57174388a1b6 Mon Sep 17 00:00:00 2001 From: Mackenzie Mathis Date: Mon, 9 Sep 2024 18:38:27 +0200 Subject: [PATCH 20/88] adding docs for using data labeled elsewhere (formatting) (#2736) * Create OtherData.md - new doc, old wiki page! * Update _toc.yml - add to toc --- _toc.yml | 1 + docs/recipes/OtherData.md | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 docs/recipes/OtherData.md diff --git a/_toc.yml b/_toc.yml index 486846eb1c..ba781450c5 100644 --- a/_toc.yml +++ b/_toc.yml @@ -51,6 +51,7 @@ parts: chapters: - file: docs/tutorial - file: docs/convert_maDLC + - file: docs/recipes/OtherData - file: docs/recipes/io - file: docs/recipes/nn - file: docs/recipes/post diff --git a/docs/recipes/OtherData.md b/docs/recipes/OtherData.md new file mode 100644 index 0000000000..1d9e648d70 --- /dev/null +++ b/docs/recipes/OtherData.md @@ -0,0 +1,33 @@ +# How to use data labeled outside of DeepLabCut +- and/or if you merge projects across scorers (see below): + + + +## Using data labeled elsewhere: + +Some users may have annotation data in different formats, yet want to use the DLC pipeline. In this case, you need to convert the data to our format. Simply, you can format your data in an excel sheet (.csv file) or pandas array (.h5 file). + +Here is a guide to do this via the ".csv" route: (the pandas array route is identical, just format the pandas array in the same way). + +**Step 1**: create a project as describe in the user guide: https://github.com/AlexEMG/DeepLabCut/blob/master/docs/UseOverviewGuide.md#create-a-new-project + +**Step 2**: edit the ``config.yaml`` file to include the body part names, please take care that spelling, spacing, and capitalization are IDENTICAL to the "labeled data body part names". + +**Step 3**: Please inspect the excel formatted sheet (.csv) from our [demo project](https://github.com/AlexEMG/DeepLabCut/tree/master/examples/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1) +- i.e. this file: https://github.com/AlexEMG/DeepLabCut/blob/master/examples/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/CollectedData_Mackenzie.csv + +**Step 4**: Edit the .csv file such that it contains the X, Y pixel coordinates, the body part names, the scorer name as well as the relative path to the image: e.g. /labeled-data/somefolder/img017.jpg +Then make sure the scorer name, and body parts are the same in the config.yaml file. + +Also add for each folder a video to the `video_set` in the config.yaml file. This can also be a dummy variable, but should be e.g. +C://somefolder.avi if the folder is called somefolder. See demo config.yaml file for proper formatting. + +**Step 5**: When you are done, run ``deeplabcut.convertcsv2h5('path_to_config.yaml', scorer= 'experimenter')`` + + - The scorer name must be identical to the input name for experimenter that you used when you created the project. This will automatically update "Mackenzie" to your name in the example demo notebook. + +## If you merge projects: + +**Step 1**: rename the CSV files to be the target name. + +**Step 2**: run and pass the target name ``deeplabcut.convertcsv2h5('path_to_config.yaml', scorer= 'experimenter')``. This will overwrite the H5 file so the data is all merged under the target name. From 6f09586837735cdb9bf391412330b154e5c2d89a Mon Sep 17 00:00:00 2001 From: Ben Dichter Date: Fri, 18 Oct 2024 17:39:05 -0400 Subject: [PATCH 21/88] Update standardDeepLabCut_UserGuide.md (#2761) --- docs/standardDeepLabCut_UserGuide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/standardDeepLabCut_UserGuide.md b/docs/standardDeepLabCut_UserGuide.md index 884b38a4f8..39188b3d48 100644 --- a/docs/standardDeepLabCut_UserGuide.md +++ b/docs/standardDeepLabCut_UserGuide.md @@ -401,7 +401,7 @@ dynamic: triple containing (state, detectiontreshold, margin) If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e., any body part > detectiontreshold), then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This window is expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost; i.e., Date: Sun, 27 Oct 2024 22:58:31 +0100 Subject: [PATCH 22/88] Create citation.md (#2767) * Create citation.md * Update _toc.yml * Update README.md --- README.md | 83 ++-------------------------- _toc.yml | 3 ++ docs/citation.md | 138 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 145 insertions(+), 79 deletions(-) create mode 100644 docs/citation.md diff --git a/README.md b/README.md index 4613fcb54d..28ef4e09a8 100644 --- a/README.md +++ b/README.md @@ -127,84 +127,9 @@ This is an actively developed package and we welcome community development and i | The DeepLabCut [AI Residency Program](https://www.deeplabcutairesidency.org/) | To come and work with us next summer👏 | Annually | DLC Team | -## References: - -If you use this code or data we kindly ask that you please [cite Mathis et al, 2018](https://www.nature.com/articles/s41593-018-0209-y) and, if you use the Python package (DeepLabCut2.x) please also cite [Nath, Mathis et al, 2019](https://doi.org/10.1038/s41596-019-0176-0). If you utilize the MobileNetV2s or EfficientNets please cite [Mathis, Biasi et al. 2021](https://openaccess.thecvf.com/content/WACV2021/papers/Mathis_Pretraining_Boosts_Out-of-Domain_Robustness_for_Pose_Estimation_WACV_2021_paper.pdf). If you use versions 2.2beta+ or 2.2rc1+, please cite [Lauer et al. 2022](https://www.nature.com/articles/s41592-022-01443-0). - -DOIs (#ProTip, for helping you find citations for software, check out [CiteAs.org](http://citeas.org/)!): - -- Mathis et al 2018: [10.1038/s41593-018-0209-y](https://doi.org/10.1038/s41593-018-0209-y) -- Nath, Mathis et al 2019: [10.1038/s41596-019-0176-0](https://doi.org/10.1038/s41596-019-0176-0) -- Lauer et al 2022: [10.1038/s41592-022-01443-0](https://doi.org/10.1038/s41592-022-01443-0) - - -Please check out the following references for more details: - - @article{Mathisetal2018, - title = {DeepLabCut: markerless pose estimation of user-defined body parts with deep learning}, - author = {Alexander Mathis and Pranav Mamidanna and Kevin M. Cury and Taiga Abe and Venkatesh N. Murthy and Mackenzie W. Mathis and Matthias Bethge}, - journal = {Nature Neuroscience}, - year = {2018}, - url = {https://www.nature.com/articles/s41593-018-0209-y}} - - @article{NathMathisetal2019, - title = {Using DeepLabCut for 3D markerless pose estimation across species and behaviors}, - author = {Nath*, Tanmay and Mathis*, Alexander and Chen, An Chi and Patel, Amir and Bethge, Matthias and Mathis, Mackenzie W}, - journal = {Nature Protocols}, - year = {2019}, - url = {https://doi.org/10.1038/s41596-019-0176-0}} - - @InProceedings{Mathis_2021_WACV, - author = {Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W.}, - title = {Pretraining Boosts Out-of-Domain Robustness for Pose Estimation}, - booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)}, - month = {January}, - year = {2021}, - pages = {1859-1868}} - - @article{Lauer2022MultianimalPE, - title={Multi-animal pose estimation, identification and tracking with DeepLabCut}, - author={Jessy Lauer and Mu Zhou and Shaokai Ye and William Menegas and Steffen Schneider and Tanmay Nath and Mohammed Mostafizur Rahman and Valentina Di Santo and Daniel Soberanes and Guoping Feng and Venkatesh N. Murthy and George Lauder and Catherine Dulac and M. Mathis and Alexander Mathis}, - journal={Nature Methods}, - year={2022}, - volume={19}, - pages={496 - 504}} - - @article{insafutdinov2016eccv, - title = {DeeperCut: A Deeper, Stronger, and Faster Multi-Person Pose Estimation Model}, - author = {Eldar Insafutdinov and Leonid Pishchulin and Bjoern Andres and Mykhaylo Andriluka and Bernt Schiele}, - booktitle = {ECCV'16}, - url = {http://arxiv.org/abs/1605.03170}} - -Review & Educational articles: - - @article{Mathis2020DeepLT, - title={Deep learning tools for the measurement of animal behavior in neuroscience}, - author={Mackenzie W. Mathis and Alexander Mathis}, - journal={Current Opinion in Neurobiology}, - year={2020}, - volume={60}, - pages={1-11}} - - @article{Mathis2020Primer, - title={A Primer on Motion Capture with Deep Learning: Principles, Pitfalls, and Perspectives}, - author={Alexander Mathis and Steffen Schneider and Jessy Lauer and Mackenzie W. Mathis}, - journal={Neuron}, - year={2020}, - volume={108}, - pages={44-65}} - -Other open-access pre-prints related to our work on DeepLabCut: - - @article{MathisWarren2018speed, - author = {Mathis, Alexander and Warren, Richard A.}, - title = {On the inference speed and video-compression robustness of DeepLabCut}, - year = {2018}, - doi = {10.1101/457242}, - publisher = {Cold Spring Harbor Laboratory}, - URL = {https://www.biorxiv.org/content/early/2018/10/30/457242}, - eprint = {https://www.biorxiv.org/content/early/2018/10/30/457242.full.pdf}, - journal = {bioRxiv}} +## References \& Citations: + +Please see our [dedicated page](https://deeplabcut.github.io/DeepLabCut/docs/citation.html) on how to **cite DeepLabCut** 🙏 and our sugestions for your Methods section! ## License: @@ -278,4 +203,4 @@ importing a project into the new data format for DLC 2.0 ## Funding - We are grateful for the follow support over the years! This software project was supported in part by the Essential Open Source Software for Science (EOSS) program at Chan Zuckerberg Initiative (cycles 1, 3, 3-DEI, 6). We also thank the Rowland Institute at Harvard for funding from 2017-2020, and EPFL from 2020-present. + We are grateful for the follow support over the years! This software project was supported in part by the Essential Open Source Software for Science (EOSS) program at Chan Zuckerberg Initiative (cycles 1, 3, 3-DEI, 4), and jointly with the Kavli Foundation for EOSS Cycle 6! We also thank the Rowland Institute at Harvard for funding from 2017-2020, and EPFL from 2020-present. diff --git a/_toc.yml b/_toc.yml index ba781450c5..471655454f 100644 --- a/_toc.yml +++ b/_toc.yml @@ -71,3 +71,6 @@ parts: - file: docs/MISSION_AND_VALUES - file: docs/roadmap - file: docs/Governance +- caption: Citations for DeepLabCut + chapters: + - file: docs/citation diff --git a/docs/citation.md b/docs/citation.md new file mode 100644 index 0000000000..c427b1e223 --- /dev/null +++ b/docs/citation.md @@ -0,0 +1,138 @@ +# How to Cite DeepLabCut + +Thank you for using DeepLabCut! Here are our recommendations for citing and documenting your use of DeepLabCut in your Methods section: + + +If you use this code or data we kindly ask that you please [cite Mathis et al, 2018](https://www.nature.com/articles/s41593-018-0209-y) +and, if you use the Python package (DeepLabCut2.x+) please also cite [Nath, Mathis et al, 2019](https://doi.org/10.1038/s41596-019-0176-0). +If you utilize the MobileNetV2s or EfficientNets please cite [Mathis, Biasi et al. 2021](https://openaccess.thecvf.com/content/WACV2021/papers/Mathis_Pretraining_Boosts_Out-of-Domain_Robustness_for_Pose_Estimation_WACV_2021_paper.pdf). +If you use multi-animal versions 2.2beta+ or 2.2rc1+, please cite [Lauer et al. 2022](https://www.nature.com/articles/s41592-022-01443-0). +If you use our SuperAnimal models, please cite [Ye et al. 2024](https://www.nature.com/articles/s41467-024-48792-2). + +DOIs (#ProTip, for helping you find citations for software, check out [CiteAs.org](http://citeas.org/)!): + +- Mathis et al 2018: [10.1038/s41593-018-0209-y](https://doi.org/10.1038/s41593-018-0209-y) +- Nath, Mathis et al 2019: [10.1038/s41596-019-0176-0](https://doi.org/10.1038/s41596-019-0176-0) +- Lauer et al 2022: [10.1038/s41592-022-01443-0](https://doi.org/10.1038/s41592-022-01443-0) +- Ye et al 2024: [10.1038/s41467-024-48792-2](https://www.nature.com/articles/s41467-024-48792-2) + +## Formatted citations: + + @article{Mathisetal2018, + title = {DeepLabCut: markerless pose estimation of user-defined body parts with deep learning}, + author = {Alexander Mathis and Pranav Mamidanna and Kevin M. Cury and Taiga Abe and Venkatesh N. Murthy and Mackenzie W. Mathis and Matthias Bethge}, + journal = {Nature Neuroscience}, + year = {2018}, + url = {https://www.nature.com/articles/s41593-018-0209-y}} + + @article{NathMathisetal2019, + title = {Using DeepLabCut for 3D markerless pose estimation across species and behaviors}, + author = {Nath*, Tanmay and Mathis*, Alexander and Chen, An Chi and Patel, Amir and Bethge, Matthias and Mathis, Mackenzie W}, + journal = {Nature Protocols}, + year = {2019}, + url = {https://doi.org/10.1038/s41596-019-0176-0}} + + @InProceedings{Mathis_2021_WACV, + author = {Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W.}, + title = {Pretraining Boosts Out-of-Domain Robustness for Pose Estimation}, + booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)}, + month = {January}, + year = {2021}, + pages = {1859-1868}} + + @article{Lauer2022MultianimalPE, + title={Multi-animal pose estimation, identification and tracking with DeepLabCut}, + author={Jessy Lauer and Mu Zhou and Shaokai Ye and William Menegas and Steffen Schneider and Tanmay Nath and Mohammed Mostafizur Rahman and Valentina Di Santo and Daniel Soberanes and Guoping Feng and Venkatesh N. Murthy and George Lauder and Catherine Dulac and M. Mathis and Alexander Mathis}, + journal={Nature Methods}, + year={2022}, + volume={19}, + pages={496 - 504}} + + @article{Ye2024SuperAnimal, + title={SuperAnimal pretrained pose estimation models for behavioral analysis}, + author={Shaokai Ye and Anastasiia Filippova and Jessy Lauer and Steffen Schneider and Maxime Vidal and and Tian Qiu and Alexander Mathis and Mackenzie W. Mathis}, + journal={Nature Communications}, + year={2024}, + volume={15}} + + +### Review & Educational articles: + + @article{Mathis2020DeepLT, + title={Deep learning tools for the measurement of animal behavior in neuroscience}, + author={Mackenzie W. Mathis and Alexander Mathis}, + journal={Current Opinion in Neurobiology}, + year={2020}, + volume={60}, + pages={1-11}} + + @article{Mathis2020Primer, + title={A Primer on Motion Capture with Deep Learning: Principles, Pitfalls, and Perspectives}, + author={Alexander Mathis and Steffen Schneider and Jessy Lauer and Mackenzie W. Mathis}, + journal={Neuron}, + year={2020}, + volume={108}, + pages={44-65}} + +### Other open-access pre-prints related to our work on DeepLabCut: + + @article{MathisWarren2018speed, + author = {Mathis, Alexander and Warren, Richard A.}, + title = {On the inference speed and video-compression robustness of DeepLabCut}, + year = {2018}, + doi = {10.1101/457242}, + publisher = {Cold Spring Harbor Laboratory}, + URL = {https://www.biorxiv.org/content/early/2018/10/30/457242}, + eprint = {https://www.biorxiv.org/content/early/2018/10/30/457242.full.pdf}, + journal = {bioRxiv}} + + + +## Methods Suggestion: + +For body part tracking we used DeepLabCut (version 2.X.X)* [Mathis et al, 2018, Nath et al, 2019, Lauer et al. 2022]. Specifically, we labeled X number of frames taken from X videos/animals (then X% was used for training (default is 95%). We used a X-based neural network (i.e. X = ResNet-50, ResNet-101, MobileNetV2-0.35, MobileNetV2-0.5, MobileNetV2-0.75, MobileNetV2-1***) with default parameters* for X number of training iterations. We validated with X number of shuffles, and found the test error was: X pixels, train: X pixels (image size was X by X). We then used a p-cutoff of X (i.e. 0.9) to condition the X,Y coordinates for future analysis. This network was then used to analyze videos from similar experimental settings. + +> Mathis, A. et al. Deeplabcut: markerless pose estimation +> of user-defined body parts with deep learning. Nature +> Neuroscience 21, 1281–1289 (2018). + +> Nath, T. et al. Using deeplabcut for 3d markerless pose +> estimation across species and behaviors. Nature Protocols +> 14, 2152–2176 (2019). + +*If any defaults were changed in *`pose_config.yaml`*, mention them here. + +i.e. common things one might change: +* the loader (options are `default`, `imgaug`, `tensorpack`, `deterministic`). +* the `post_dist_threshold` (default is 17 and determines training resolution). +* optimizer: do you use the default `SGD` or `ADAM`? + +*** here, you could add additional citations. +If you use ResNets, consider citing Insafutdinov et al 2016 & He et al 2016. If you use the MobileNetV2s consider citing Mathis et al 2019, and Sandler et al, 2018. + + +> Mathis, A. et al. Pretraining boosts out-of-domain robustness for pose estimation +> arXiv 1909.11229 (2019) + +> Insafutdinov, E., Pishchulin, L., Andres, B., Andriluka, +> M. & Schiele, B. DeeperCut: A deeper, stronger, and +> faster multi-person pose estimation model. In European +> Conference on Computer Vision, 34–50 (Springer, 2016). + +> Sandler, M., Howard, A., Zhu, M., Zhmoginov, A. & +> Chen, L.-C. Mobilenetv2: Inverted residuals and linear +> bottlenecks. In Proceedings of the IEEE Conference +> on Computer Vision and Pattern Recognition, 4510–4520 +> (2018). + +> He, K., Zhang, X., Ren, S. & Sun, J. Deep residual +> learning for image recognition. In Proceedings of the +> IEEE conference on computer vision and pattern recognition, +> 770–778 (2016). URL https://arxiv.org/abs/ +> 1512.03385. + +## Graphics + +We also have the network graphic freely available on SciDraw.io if you'd like to use it! https://scidraw.io/drawing/290 + +You are welcome to use our logo in your works as well. From b7a64677880c6fa44fd3e42d1e87b738c2b28969 Mon Sep 17 00:00:00 2001 From: n-poulsen <45132115+n-poulsen@users.noreply.github.com> Date: Mon, 28 Oct 2024 13:09:04 +0100 Subject: [PATCH 23/88] fix tmpdir file creation (#2768) --- tests/test_inferenceutils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_inferenceutils.py b/tests/test_inferenceutils.py index d533049547..5b4c126f58 100644 --- a/tests/test_inferenceutils.py +++ b/tests/test_inferenceutils.py @@ -176,9 +176,9 @@ def test_assembler(tmpdir_factory, real_assemblies): 1 for a in real_assemblies.values() for _ in a ) - output_name = tmpdir_factory.mktemp("data").join("fake.h5") - ass.to_h5(output_name) - ass.to_pickle(str(output_name).replace("h5", "pickle")) + output_dir = tmpdir_factory.mktemp("data") + ass.to_h5(output_dir.join("fake.h5")) + ass.to_pickle(output_dir.join("fake.pickle")) def test_assembler_with_single_bodypart(real_assemblies): @@ -288,9 +288,9 @@ def test_assembler_with_identity(tmpdir_factory, real_assemblies): eq.append(np.all(ids == ids[0])) assert all(eq) - output_name = tmpdir_factory.mktemp("data").join("fake.h5") - ass.to_h5(output_name) - ass.to_pickle(str(output_name).replace("h5", "pickle")) + output_dir = tmpdir_factory.mktemp("data") + ass.to_h5(output_dir.join("fake.h5")) + ass.to_pickle(output_dir.join("fake.pickle")) def test_assembler_calibration(real_assemblies): From 41aa1b6fab125e51e810d9976837cea51994c3ba Mon Sep 17 00:00:00 2001 From: n-poulsen <45132115+n-poulsen@users.noreply.github.com> Date: Mon, 11 Nov 2024 15:34:03 +0100 Subject: [PATCH 24/88] bug fix: None has no lower() attribute (#2780) --- deeplabcut/gui/tabs/train_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeplabcut/gui/tabs/train_network.py b/deeplabcut/gui/tabs/train_network.py index 2fa57567da..54fe602054 100644 --- a/deeplabcut/gui/tabs/train_network.py +++ b/deeplabcut/gui/tabs/train_network.py @@ -75,7 +75,7 @@ def _update_snapshot_selection_widgets_visibility(self): self.resume_from_snapshot_label.show() self.snapshot_selection_widget.show() # Display detector snapshot selection widget only if in Top-Down mode - if self._shuffle_display.pose_cfg.get("method").lower() == "td": + if self._shuffle_display.pose_cfg.get("method", "").lower() == "td": self.detector_snapshot_selection_widget.show() else: self.detector_snapshot_selection_widget.hide() From ee0e5611071ad1c557683e72584a0adcbcdb22e7 Mon Sep 17 00:00:00 2001 From: maximpavliv <37336830+maximpavliv@users.noreply.github.com> Date: Thu, 14 Nov 2024 11:41:33 +0100 Subject: [PATCH 25/88] Use architecture-specific snapshots prefixes in video adaptation (#2781) --- deeplabcut/modelzoo/video_inference.py | 30 ++++++++++++------- .../pose_estimation_pytorch/apis/utils.py | 4 ++- .../runners/snapshots.py | 12 ++++---- .../pose_estimation_pytorch/runners/train.py | 6 +++- 4 files changed, 33 insertions(+), 19 deletions(-) diff --git a/deeplabcut/modelzoo/video_inference.py b/deeplabcut/modelzoo/video_inference.py index 971bf6065c..da5efe5c84 100644 --- a/deeplabcut/modelzoo/video_inference.py +++ b/deeplabcut/modelzoo/video_inference.py @@ -154,7 +154,7 @@ def video_inference_superanimal( NotImplementedError: If the model is not found in the modelzoo. Warning: If the superanimal_name will be deprecated in the future. - + (Model Explanation) SuperAnimal-Quadruped: `superanimal_quadruped` models aim to work across a large range of quadruped animals, from horses, dogs, sheep, rodents, to elephants. The camera perspective is @@ -276,9 +276,7 @@ def video_inference_superanimal( _video_inference_superanimal, ) - weight_folder = ( - get_snapshot_folder_path() / f"{superanimal_name}_{model_name}" - ) + weight_folder = get_snapshot_folder_path() / f"{superanimal_name}_{model_name}" if not weight_folder.exists(): download_huggingface_model( superanimal_name, target_dir=str(weight_folder), rename_mapping=None @@ -299,6 +297,11 @@ def video_inference_superanimal( pseudo_threshold, ) elif framework == "pytorch": + if detector_name is None: + raise ValueError( + "You have to specify a detector_name when using the Pytorch framework." + ) + from deeplabcut.pose_estimation_pytorch.modelzoo.inference import ( _video_inference_superanimal, ) @@ -373,10 +376,9 @@ def video_inference_superanimal( video_to_frames(video_path, pseudo_dataset_folder, cropping=cropping) anno_folder = pseudo_dataset_folder / "annotations" - if ( - (anno_folder / "train.json").exists() - and (anno_folder / "test.json").exists() - ): + if (anno_folder / "train.json").exists() and ( + anno_folder / "test.json" + ).exists(): print( f"{anno_folder} exists, skipping the annotation construction. " f"Delete the folder if you want to re-construct pseudo annotations" @@ -405,6 +407,12 @@ def video_inference_superanimal( bbox_threshold=bbox_threshold, ) + model_snapshot_prefix = f"snapshot-{model_name}" + detector_snapshot_prefix = f"snapshot-{detector_name}" + + config["runner"]["snapshot_prefix"] = model_snapshot_prefix + config["detector"]["runner"]["snapshot_prefix"] = detector_snapshot_prefix + # the model config's parameters need to be updated for adaptation training model_config_path = model_folder / "pytorch_config.yaml" with open(model_config_path, "w") as f: @@ -412,9 +420,11 @@ def video_inference_superanimal( yaml.dump(config, f) adapted_detector_checkpoint = ( - model_folder / f"snapshot-detector-{detector_epochs:03}.pt" + model_folder / f"{detector_snapshot_prefix}-{detector_epochs:03}.pt" + ) + adapted_pose_checkpoint = ( + model_folder / f"{model_snapshot_prefix}-{pose_epochs:03}.pt" ) - adapted_pose_checkpoint = model_folder / f"snapshot-{pose_epochs:03}.pt" if ( adapted_detector_checkpoint.exists() diff --git a/deeplabcut/pose_estimation_pytorch/apis/utils.py b/deeplabcut/pose_estimation_pytorch/apis/utils.py index cc3eda46f2..a5a971e4eb 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/utils.py +++ b/deeplabcut/pose_estimation_pytorch/apis/utils.py @@ -156,7 +156,9 @@ def get_model_snapshots( ValueError: If the index given is not valid ValueError: If index=="best" but there is no saved best model """ - snapshot_manager = TorchSnapshotManager(model_folder=model_folder, task=task) + snapshot_manager = TorchSnapshotManager( + model_folder=model_folder, snapshot_prefix=task.snapshot_prefix + ) if isinstance(index, str) and index.lower() == "best": best_snapshot = snapshot_manager.best() if best_snapshot is None: diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 3f7386bb57..341b3c4be4 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -19,8 +19,6 @@ import numpy as np import torch -from deeplabcut.pose_estimation_pytorch.task import Task - @dataclass(frozen=True) class Snapshot: @@ -43,7 +41,7 @@ class TorchSnapshotManager: """Class handling model checkpoint I/O Attributes: - task: The task that the model is performing. + snapshot_prefix: The prefix to use when saving snapshots. model_folder: The path to the directory where model snapshots should be stored. key_metric: If defined, the metric is used to save the best model. Otherwise no best model is used. @@ -60,7 +58,7 @@ class TorchSnapshotManager: model: nn.Module loader = DLCLoader(...) snapshot_manager = TorchSnapshotManager( - Task.BOTTOM_UP, + "snapshot", loader.model_folder, key_metric="test.mAP", ) @@ -76,7 +74,7 @@ class TorchSnapshotManager: }) """ - task: Task + snapshot_prefix: str model_folder: Path key_metric: str | None = None key_metric_asc: bool = True @@ -191,7 +189,7 @@ def _sort_key(snapshot: Snapshot) -> int: def _sort_key_best_as_last(snapshot: Snapshot) -> tuple[int, int]: return 1 if snapshot.best else 0, snapshot.epochs - pattern = r"^(" + self.task.snapshot_prefix + r"(-best)?-\d+\.pt)$" + pattern = r"^(" + self.snapshot_prefix + r"(-best)?-\d+\.pt)$" snapshots = [ Snapshot.from_path(f) for f in self.model_folder.iterdir() @@ -216,4 +214,4 @@ def snapshot_path(self, epoch: int, best: bool = False) -> Path: uid = f"{epoch:03}" if best: uid = f"best-{uid}" - return self.model_folder / f"{self.task.snapshot_prefix}-{uid}.pt" + return self.model_folder / f"{self.snapshot_prefix}-{uid}.pt" diff --git a/deeplabcut/pose_estimation_pytorch/runners/train.py b/deeplabcut/pose_estimation_pytorch/runners/train.py index ce64865c1b..23fb74b518 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/train.py +++ b/deeplabcut/pose_estimation_pytorch/runners/train.py @@ -597,11 +597,15 @@ def build_training_runner( optim_cls = getattr(torch.optim, optim_cfg["type"]) optimizer = optim_cls(params=model.parameters(), **optim_cfg["params"]) scheduler = build_scheduler(runner_config.get("scheduler"), optimizer) + # if no custom snapshot prefix is defined, use the default one + snapshot_prefix = runner_config.get("snapshot_prefix") + if snapshot_prefix is None or len(snapshot_prefix) == 0: + snapshot_prefix = task.snapshot_prefix kwargs = dict( model=model, optimizer=optimizer, snapshot_manager=TorchSnapshotManager( - task=task, + snapshot_prefix=snapshot_prefix, model_folder=model_folder, key_metric=runner_config.get("key_metric"), key_metric_asc=runner_config.get("key_metric_asc"), From c41a1382cfc1e5790f18bfe4845bb9294d5e3572 Mon Sep 17 00:00:00 2001 From: n-poulsen <45132115+n-poulsen@users.noreply.github.com> Date: Thu, 14 Nov 2024 11:43:11 +0100 Subject: [PATCH 26/88] Video Analysis with Identity - Lowering memory footprint (#2785) --- .../config/make_pose_config.py | 9 ++-- .../data/postprocessor.py | 47 ++++++++++++------- .../post_processing/identity.py | 5 +- .../data/test_postprocessor.py | 1 + 4 files changed, 41 insertions(+), 21 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/config/make_pose_config.py b/deeplabcut/pose_estimation_pytorch/config/make_pose_config.py index 42f2009037..65cdff2d1d 100644 --- a/deeplabcut/pose_estimation_pytorch/config/make_pose_config.py +++ b/deeplabcut/pose_estimation_pytorch/config/make_pose_config.py @@ -198,12 +198,15 @@ def make_pytorch_test_config( The test configuration file. """ bodyparts = model_config["metadata"]["bodyparts"] + unique_bodyparts = model_config["metadata"]["unique_bodyparts"] + all_joint_names = bodyparts + unique_bodyparts + test_config = dict( dataset=model_config["metadata"]["project_path"], dataset_type="multi-animal-imgaug", # required for downstream tracking - num_joints=len(bodyparts), - all_joints=[[i] for i in range(len(bodyparts))], - all_joints_names=bodyparts, + num_joints=len(all_joint_names), + all_joints=[[i] for i in range(len(all_joint_names))], + all_joints_names=all_joint_names, net_type=model_config["net_type"], global_scale=1, scoremap_dir="test", diff --git a/deeplabcut/pose_estimation_pytorch/data/postprocessor.py b/deeplabcut/pose_estimation_pytorch/data/postprocessor.py index 609ff00ef9..0fe76343d6 100644 --- a/deeplabcut/pose_estimation_pytorch/data/postprocessor.py +++ b/deeplabcut/pose_estimation_pytorch/data/postprocessor.py @@ -67,7 +67,6 @@ def build_bottom_up_postprocessor( keys_to_rescale.append("unique_bodyparts") if with_identity: - # TODO: do we really want to return the heatmaps? keys_to_concatenate["identity_heatmap"] = ("identity", "heatmap") empty_shapes["identity_heatmap"] = (1, 1, max_individuals) @@ -85,6 +84,7 @@ def build_bottom_up_postprocessor( identity_key="identity_scores", identity_map_key="identity_heatmap", pose_key="bodyparts", + keep_id_maps=False, ) ) @@ -96,8 +96,6 @@ def build_bottom_up_postprocessor( PadOutputs( max_individuals={ "bodyparts": max_individuals, - "unique_bodyparts": 0, # no need to pad - "identity_heatmap": 0, # no need to pad "identity_scores": max_individuals, }, pad_value=-1, @@ -146,7 +144,6 @@ def build_top_down_postprocessor( "bodyparts": max_individuals, "bboxes": max_individuals, "bbox_scores": max_individuals, - "unique_bodyparts": 0, # no need to pad }, pad_value=-1, ), @@ -257,7 +254,10 @@ def __call__( ) -> tuple[dict[str, np.ndarray], Context]: for name in predictions: output = predictions[name] - if len(output) < self.max_individuals[name]: + if ( + name in self.max_individuals + and len(output) < self.max_individuals[name] + ): pad_size = self.max_individuals[name] - len(output) tail_shape = output.shape[1:] padding = self.pad_value * np.ones((pad_size, *tail_shape)) @@ -404,10 +404,15 @@ def __call__( class PredictKeypointIdentities(Postprocessor): """Assigns predicted identities to keypoints + The identity maps have shape (h, w, num_ids). + Attributes: - identity_key: - identity_map_key: shape (h, w, num_ids) - pose_key: + identity_key: Key with which to add predicted identities in the predictions dict + identity_map_key: Key for the identity maps in the predictions dict + pose_key: Key for the bodyparts in the predictions dict + keep_id_maps: Whether to keep identity heatmaps in the output dictionary. + Setting this value to True can be useful for debugging, but can lead to + memory issues when running video analysis on long videos. """ def __init__( @@ -415,28 +420,36 @@ def __init__( identity_key: str, identity_map_key: str, pose_key: str, + keep_id_maps: bool = False, ) -> None: self.identity_key = identity_key self.identity_map_key = identity_map_key self.pose_key = pose_key + self.keep_id_maps = keep_id_maps def __call__( self, predictions: dict[str, np.ndarray], context: Context ) -> tuple[dict[str, np.ndarray], Context]: - individuals = predictions[self.pose_key] + pose = predictions[self.pose_key] + num_preds, num_keypoints, _ = pose.shape + identity_heatmap = predictions[self.identity_map_key] # (h, w, num_ids) h, w, num_ids = identity_heatmap.shape - num_individuals, num_keypoints, _ = individuals.shape - assembly_id_scores = [] - for individual_keypoints in individuals: + id_score_matrix = np.zeros((num_preds, num_keypoints, num_ids)) + for pred_idx, individual_keypoints in enumerate(pose): heatmap_indices = np.rint(individual_keypoints).astype(int) xs = np.clip(heatmap_indices[:, 0], 0, w - 1) ys = np.clip(heatmap_indices[:, 1], 0, h - 1) - id_scores = [] - for x, y in zip(xs, ys): - id_scores.append(identity_heatmap[y, x, :]) - assembly_id_scores.append(np.stack(id_scores)) - predictions[self.identity_key] = np.stack(assembly_id_scores) + # get the score from each identity heatmap at each predicted keypoint + for kpt_idx, (x, y) in enumerate(zip(xs, ys)): + id_score_matrix[pred_idx, kpt_idx] = identity_heatmap[y, x, :] + + predictions[self.identity_key] = id_score_matrix + if not self.keep_id_maps: + # delete the heatmaps as this saves memory + id_heatmaps = predictions.pop(self.identity_map_key) + del id_heatmaps + return predictions, context diff --git a/deeplabcut/pose_estimation_pytorch/post_processing/identity.py b/deeplabcut/pose_estimation_pytorch/post_processing/identity.py index c5df326165..224a837ef6 100644 --- a/deeplabcut/pose_estimation_pytorch/post_processing/identity.py +++ b/deeplabcut/pose_estimation_pytorch/post_processing/identity.py @@ -35,7 +35,10 @@ def assign_identity( predictions_with_identity = [] for pred, scores in zip(predictions, identity_scores): - cost_matrix = np.product(scores, axis=1) + # average of ID scores, weighted by keypoint confidence + pose_conf = pred[:, :, 2:3] + cost_matrix = np.mean(pose_conf * scores, axis=1) + row_ind, col_ind = linear_sum_assignment(cost_matrix, maximize=True) new_order = np.zeros_like(row_ind) for old_pos, new_pos in zip(row_ind, col_ind): diff --git a/tests/pose_estimation_pytorch/data/test_postprocessor.py b/tests/pose_estimation_pytorch/data/test_postprocessor.py index b178e8d175..f8f210036b 100644 --- a/tests/pose_estimation_pytorch/data/test_postprocessor.py +++ b/tests/pose_estimation_pytorch/data/test_postprocessor.py @@ -203,6 +203,7 @@ def test_assign_id_scores(data): identity_key="keypoint_identity", identity_map_key="identity_map", pose_key="bodyparts", + keep_id_maps=True, ) bodyparts = np.array(data["bodyparts"]) id_heatmap = np.array(data["id_heatmap"]) From f2663fb8810cae70affe9bab0aab1795c2d99065 Mon Sep 17 00:00:00 2001 From: maximpavliv <37336830+maximpavliv@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:09:10 +0100 Subject: [PATCH 27/88] Timeout retrieving latest package version from the Web (#2782) --- deeplabcut/gui/utils.py | 16 +++++++++++++++- deeplabcut/gui/window.py | 17 ++++++++++++++--- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/deeplabcut/gui/utils.py b/deeplabcut/gui/utils.py index cd5369fb1a..4f0407c698 100644 --- a/deeplabcut/gui/utils.py +++ b/deeplabcut/gui/utils.py @@ -8,7 +8,10 @@ # # Licensed under GNU Lesser General Public License v3.0 # +from typing import Tuple + from PySide6 import QtCore +import re class Worker(QtCore.QObject): @@ -38,6 +41,17 @@ def stop_thread(): return worker, thread +def parse_version(version: str) -> Tuple[int, int, int]: + """ + Parses a version string into a tuple of (major, minor, patch). + """ + match = re.search(r"(\d+)\.(\d+)\.(\d+)", version) + if match: + return tuple(int(part) for part in match.groups()) + else: + raise ValueError(f"Invalid version format: {version}") + + def is_latest_deeplabcut_version(): import json import urllib.request @@ -46,4 +60,4 @@ def is_latest_deeplabcut_version(): url = "https://pypi.org/pypi/deeplabcut/json" contents = urllib.request.urlopen(url).read() latest_version = json.loads(contents)["info"]["version"] - return VERSION == latest_version, latest_version + return parse_version(VERSION) >= parse_version(latest_version), latest_version diff --git a/deeplabcut/gui/window.py b/deeplabcut/gui/window.py index 0d1116b002..de40d326fa 100644 --- a/deeplabcut/gui/window.py +++ b/deeplabcut/gui/window.py @@ -16,6 +16,7 @@ from pathlib import Path from typing import List from urllib.error import URLError +from concurrent.futures import ThreadPoolExecutor, TimeoutError import qdarkstyle import deeplabcut @@ -40,11 +41,21 @@ from PySide6.QtCore import Qt, QTimer +def call_with_timeout(func, timeout, *args, **kwargs): + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(func, *args, **kwargs) + return future.result(timeout=timeout) + + def _check_for_updates(silent=True): try: - is_latest, latest_version = utils.is_latest_deeplabcut_version() - is_latest_plugin, latest_plugin_version = misc.is_latest_version() - except URLError: # Handle internet connectivity issues + is_latest, latest_version = call_with_timeout( + utils.is_latest_deeplabcut_version, 1 + ) + is_latest_plugin, latest_plugin_version = call_with_timeout( + misc.is_latest_version, 1 + ) + except (URLError, TimeoutError): # Handle internet connectivity issues is_latest = is_latest_plugin = True if is_latest and is_latest_plugin: From 681d968a2ba6bd91f2e2ed7e86a6cc57e3ed703c Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 19 Nov 2024 09:51:34 +0000 Subject: [PATCH 28/88] Fix: correct the early return error when save_epochs=1 and delelte the redunant snapshots at the end --- .../runners/snapshots.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 341b3c4be4..bc5a30a44c 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,41 +113,43 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] + + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } torch.save(parsed_state_dict, save_path) + # Handle previous best model if current_best is not None: - # rename if the current best should have been saved, otherwise delete if current_best.epochs % self.save_epochs == 0: new_name = self.snapshot_path(epoch=current_best.epochs) current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - return - - if not (last or epoch % self.save_epochs == 0): - return - + else: + # Save regular snapshot if needed + should_save = last or epoch % self.save_epochs == 0 + if should_save: + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) + + # Clean up old snapshots if needed existing_snapshots = [s for s in self.snapshots() if not s.best] if len(existing_snapshots) >= self.max_snapshots: - num_to_delete = 1 + len(existing_snapshots) - self.max_snapshots + num_to_delete = len(existing_snapshots) - self.max_snapshots to_delete = existing_snapshots[:num_to_delete] for snapshot in to_delete: snapshot.path.unlink(missing_ok=False) - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) - def best(self) -> Snapshot | None: """Returns: the path to the best snapshot, if it exists""" snapshots = self.snapshots() From 329402fa5293876caf84ef2c1db9bd2e5aebe87a Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:46:52 +0000 Subject: [PATCH 29/88] add plot_gt_and_prediction function --- .../pose_estimation_pytorch/apis/evaluate.py | 229 ++++++++++++++++++ 1 file changed, 229 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index f2b3bfea6b..4eb7381772 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -18,6 +18,7 @@ import numpy as np import pandas as pd from tqdm import tqdm +import matplotlib.pyplot as plt import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization @@ -37,6 +38,14 @@ from deeplabcut.pose_estimation_pytorch.task import Task from deeplabcut.utils import auxiliaryfunctions from deeplabcut.utils.visualization import plot_evaluation_results +from deeplabcut.utils import auxfun_videos +from deeplabcut.utils.visualization import ( + create_minimal_figure, + get_cmap, + make_multianimal_labeled_image, + save_labeled_frame, + erase_artists, +) def predict( @@ -167,6 +176,224 @@ def evaluate( return results, predictions +import random +# def plot_predictions( +# loader: Loader, +# predictions: dict[str, dict[str, np.ndarray]], +# plotting: str = "bodypart", +# sample: int | None = None, +# sample_random: bool = False, +# ) -> None: + +def plot_predictions( + loader: Loader, + predictions: dict[str, dict[str, np.ndarray]], + plotting: str = "bodypart", + sample: int | None = None, + sample_random: bool = False, +) -> None: + """ + Process COCO format data and visualize using plot_evaluation_results + + Args: + loader: COCOLoader instance containing dataset info + predictions: Model predictions dictionary + plotting: How to color the points ("bodypart" or "individual") + sample: Number of images to visualize (None for all) + sample_random: Whether to sample images randomly + """ + + # Get paths and create output folder + project_root = loader.project_root + output_folder = Path(project_root) / "labeled_frames" + output_folder.mkdir(exist_ok=True) + + # 2. Get ground truth data + ground_truth = loader.load_data(mode="test") + + # 3. Create image list for sampling + image_ids = [img['id'] for img in ground_truth['images']] + if sample is not None: + if sample_random: + image_ids = random.sample(image_ids, min(sample, len(image_ids))) + else: + image_ids = image_ids[:sample] + + # 4. Create DataFrame structure + data = [] + + # Process ground truth + for img_id in image_ids: + img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) + img_name = img_info['file_name'] + + # Get ground truth annotations + gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] + + # Get predictions for this image + pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] + + # Process each keypoint + for gt_ann, pred_ann in zip(gt_anns, pred_anns): + gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) + pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) + + # Get keypoint names + keypoint_names = ground_truth['categories'][0]['keypoints'] + + # Add ground truth points + for idx, (x, y, v) in enumerate(gt_kpts): + if v > 0: # visible keypoint + data.append({ + 'image': img_name, + 'scorer': 'ground_truth', + 'individual': f"instance_{gt_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': 1.0 + }) + + # Add predictions + for idx, (x, y, score) in enumerate(pred_kpts): + if score > 0: # detected keypoint + data.append({ + 'image': img_name, + 'scorer': 'dlc_model', + 'individual': f"instance_{pred_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': score + }) + + # 5. Create MultiIndex DataFrame + df = pd.DataFrame(data) + df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) + df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) + + # 6. Call plot_evaluation_results + plot_evaluation_results( + df_combined=df_combined, + project_root=project_root, + scorer='ground_truth', + model_name='dlc_model', + output_folder=str(output_folder), + in_train_set=False, # Since we're using test data + mode=plotting, + plot_unique_bodyparts=False, # whether we should plot unique bodyparts + colormap='rainbow', # default values + dot_size=12, # default values + alpha_value=0.7, # default values + p_cutoff=0.6 # default values + ) + +def plot_gt_and_predictions( + image_path: str | Path, + output_dir: str | Path, + gt_bodyparts: np.ndarray, + pred_bodyparts: np.ndarray, # (num_predicted_animals, num_keypoints, 3) + gt_unique_bodyparts: np.ndarray | None = None, + pred_unique_bodyparts: np.ndarray | None = None, + mode: str = "bodypart", + colormap: str = "rainbow", + dot_size: int = 12, + alpha_value: float = 0.7, + p_cutoff: float = 0.6, +): + """Plot ground truth and predictions on an image. + + Args: + image_path: Path to the image + gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) + pred_bodyparts: Predicted keypoints array (num_animals, num_keypoints, 3) + output_dir: Directory where labeled images will be saved + gt_unique_bodyparts: Ground truth unique bodyparts if any + pred_unique_bodyparts: Predicted unique bodyparts if any + mode: How to color the points ("bodypart" or "individual") + colormap: Matplotlib colormap name + dot_size: Size of the plotted points + alpha_value: Transparency of the points + p_cutoff: Confidence threshold for showing predictions + """ + # Ensure output directory exists + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # Read the image + frame = auxfun_videos.imread(str(image_path), mode="skimage") + num_pred, num_keypoints = pred_bodyparts.shape[:2] + + # Create figure and set dimensions + fig, ax = create_minimal_figure() + h, w, _ = np.shape(frame) + fig.set_size_inches(w / 100, h / 100) + ax.set_xlim(0, w) + ax.set_ylim(0, h) + ax.invert_yaxis() + ax.imshow(frame, "gray") + + # Set up colors based on mode + if mode == "bodypart": + num_colors = num_keypoints + if pred_unique_bodyparts is not None: + num_colors += pred_unique_bodyparts.shape[1] + colors = get_cmap(num_colors, name=colormap) + + predictions = pred_bodyparts.swapaxes(0, 1) + ground_truth = gt_bodyparts.swapaxes(0, 1) + elif mode == "individual": + colors = get_cmap(num_pred + 1, name=colormap) + predictions = pred_bodyparts + ground_truth = gt_bodyparts + else: + raise ValueError(f"Invalid mode: {mode}") + + # Plot regular bodyparts + ax = make_multianimal_labeled_image( + frame, + ground_truth, + predictions[:, :, :2], + predictions[:, :, 2:], + colors, + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Plot unique bodyparts if present + if pred_unique_bodyparts is not None and gt_unique_bodyparts is not None: + if mode == "bodypart": + unique_predictions = pred_unique_bodyparts.swapaxes(0, 1) + unique_ground_truth = gt_unique_bodyparts.swapaxes(0, 1) + else: + unique_predictions = pred_unique_bodyparts + unique_ground_truth = gt_unique_bodyparts + + ax = make_multianimal_labeled_image( + frame, + unique_ground_truth, + unique_predictions[:, :, :2], + unique_predictions[:, :, 2:], + colors[num_keypoints:], + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Save the labeled image + save_labeled_frame( + fig, + str(image_path), + str(output_dir), + belongs_to_train=False, + ) + erase_artists(ax) + plt.close() + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, @@ -289,6 +516,7 @@ def evaluate_snapshot( df_ground_truth, left_index=True, right_index=True ) unique_bodyparts = loader.get_dataset_parameters().unique_bpts + plot_evaluation_results( df_combined=df_combined, project_root=cfg["project_path"], @@ -502,6 +730,7 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From 6bdf05c1761550c2236d3f90429f0fb0399d4918 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:58:40 +0000 Subject: [PATCH 30/88] delete the initial attempt at function --- .../pose_estimation_pytorch/apis/evaluate.py | 113 ------------------ 1 file changed, 113 deletions(-) mode change 100644 => 100755 deeplabcut/pose_estimation_pytorch/apis/evaluate.py diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py old mode 100644 new mode 100755 index 4eb7381772..a19ea9e0d7 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -174,119 +174,6 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions - - -import random -# def plot_predictions( -# loader: Loader, -# predictions: dict[str, dict[str, np.ndarray]], -# plotting: str = "bodypart", -# sample: int | None = None, -# sample_random: bool = False, -# ) -> None: - -def plot_predictions( - loader: Loader, - predictions: dict[str, dict[str, np.ndarray]], - plotting: str = "bodypart", - sample: int | None = None, - sample_random: bool = False, -) -> None: - """ - Process COCO format data and visualize using plot_evaluation_results - - Args: - loader: COCOLoader instance containing dataset info - predictions: Model predictions dictionary - plotting: How to color the points ("bodypart" or "individual") - sample: Number of images to visualize (None for all) - sample_random: Whether to sample images randomly - """ - - # Get paths and create output folder - project_root = loader.project_root - output_folder = Path(project_root) / "labeled_frames" - output_folder.mkdir(exist_ok=True) - - # 2. Get ground truth data - ground_truth = loader.load_data(mode="test") - - # 3. Create image list for sampling - image_ids = [img['id'] for img in ground_truth['images']] - if sample is not None: - if sample_random: - image_ids = random.sample(image_ids, min(sample, len(image_ids))) - else: - image_ids = image_ids[:sample] - - # 4. Create DataFrame structure - data = [] - - # Process ground truth - for img_id in image_ids: - img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) - img_name = img_info['file_name'] - - # Get ground truth annotations - gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] - - # Get predictions for this image - pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] - - # Process each keypoint - for gt_ann, pred_ann in zip(gt_anns, pred_anns): - gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) - pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) - - # Get keypoint names - keypoint_names = ground_truth['categories'][0]['keypoints'] - - # Add ground truth points - for idx, (x, y, v) in enumerate(gt_kpts): - if v > 0: # visible keypoint - data.append({ - 'image': img_name, - 'scorer': 'ground_truth', - 'individual': f"instance_{gt_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': 1.0 - }) - - # Add predictions - for idx, (x, y, score) in enumerate(pred_kpts): - if score > 0: # detected keypoint - data.append({ - 'image': img_name, - 'scorer': 'dlc_model', - 'individual': f"instance_{pred_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': score - }) - - # 5. Create MultiIndex DataFrame - df = pd.DataFrame(data) - df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) - df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) - - # 6. Call plot_evaluation_results - plot_evaluation_results( - df_combined=df_combined, - project_root=project_root, - scorer='ground_truth', - model_name='dlc_model', - output_folder=str(output_folder), - in_train_set=False, # Since we're using test data - mode=plotting, - plot_unique_bodyparts=False, # whether we should plot unique bodyparts - colormap='rainbow', # default values - dot_size=12, # default values - alpha_value=0.7, # default values - p_cutoff=0.6 # default values - ) def plot_gt_and_predictions( image_path: str | Path, From a3e99c472a82191a0c25240b46b5f25273ca930b Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 17:31:15 +0000 Subject: [PATCH 31/88] add def visualize_coco_predictions(*) --- .../pose_estimation_pytorch/apis/evaluate.py | 75 ++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index a19ea9e0d7..0173d41cb8 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -19,7 +19,8 @@ import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt - +import json +import os import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -174,7 +175,79 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions + +def visualize_coco_predictions( + predictions: dict, + num_samples: int = 1, + test_file_json: str | Path = "test.json", + output_dir: str | Path | None = None, + draw_skeleton: bool = True, +) -> None: + """ + Visualize predictions using DeepLabCut's plot_gt_and_predictions function + Args: + predictions: Dictionary with image paths as keys and prediction data as values. + Each prediction contains: + - bodyparts: numpy array of shape (1, 37, 3) + - bboxes: numpy array of shape (1, 4) + - bbox_scores: numpy array of shape (1,) + num_samples: Number of samples to visualize + test_file_json: Path to test set JSON file + output_dir: Directory to save visualization outputs. If None, will create + a directory next to test_file_json + draw_skeleton: Whether to draw skeleton connections between keypoints + """ + # Load ground truth data + with open(test_file_json, "r") as f: + ground_truth = json.load(f) + + if output_dir is None: + output_dir = os.path.join(os.path.dirname(test_file_json), "predictions_visualizations") + os.makedirs(output_dir, exist_ok=True) + + image_paths = list(predictions.keys()) + if num_samples: + image_paths = image_paths[:num_samples] + + # Process each image + for image_path in image_paths: + pred_data = predictions[image_path] + img_info = next((img for img in ground_truth['images'] + if img['file_name'] == os.path.basename(image_path)), None) + if img_info is None: + print(f"Warning: Could not find image info for {image_path}") + continue + + gt_anns = [ann for ann in ground_truth['annotations'] + if ann['image_id'] == img_info['id']] + + if not gt_anns: + print(f"Warning: No ground truth annotations found for {image_path}") + continue + + gt_keypoints = np.array(gt_anns[0]['keypoints']).reshape(1, -1, 3) + vis_mask = gt_keypoints[:, :, 2] != -1 + + visible_gt = gt_keypoints[vis_mask] + visible_gt = visible_gt[None, :, :2] + + pred_keypoints = pred_data['bodyparts'] # Keep batch dimension + visible_pred = pred_keypoints + visible_pred = pred_keypoints[vis_mask].copy() + visible_pred = np.expand_dims(visible_pred, axis=0) + + try: + plot_gt_and_predictions( + image_path=image_path, + output_dir=output_dir, + gt_bodyparts=visible_gt, + pred_bodyparts=visible_pred + ) + print(f"Successfully plotted predictions for {image_path}") + except Exception as e: + print(f"Error plotting predictions for {image_path}: {str(e)}") + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, From 1b9809095ccfc615a7228e7519b606a5260f81c9 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 25 Nov 2024 12:12:04 +0100 Subject: [PATCH 32/88] Revert "Fix: correct the early return error when save_epochs=1 and delelte the redunant snapshots at the end" This reverts commit 31d8652ff552756954907af27b8d866be08fe07a. --- .../runners/snapshots.py | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index bc5a30a44c..341b3c4be4 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,43 +113,41 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] - - # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } torch.save(parsed_state_dict, save_path) - # Handle previous best model if current_best is not None: + # rename if the current best should have been saved, otherwise delete if current_best.epochs % self.save_epochs == 0: new_name = self.snapshot_path(epoch=current_best.epochs) current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - else: - # Save regular snapshot if needed - should_save = last or epoch % self.save_epochs == 0 - if should_save: - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) - - # Clean up old snapshots if needed + return + + if not (last or epoch % self.save_epochs == 0): + return + existing_snapshots = [s for s in self.snapshots() if not s.best] if len(existing_snapshots) >= self.max_snapshots: - num_to_delete = len(existing_snapshots) - self.max_snapshots + num_to_delete = 1 + len(existing_snapshots) - self.max_snapshots to_delete = existing_snapshots[:num_to_delete] for snapshot in to_delete: snapshot.path.unlink(missing_ok=False) + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) + def best(self) -> Snapshot | None: """Returns: the path to the best snapshot, if it exists""" snapshots = self.snapshots() From b0c1fd9613c45c4b659e74ddaa35723aec286926 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 25 Nov 2024 12:20:22 +0100 Subject: [PATCH 33/88] isort and black -> evaluate.py --- .../pose_estimation_pytorch/apis/evaluate.py | 78 +++++++++++-------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 0173d41cb8..32dc2c0b8a 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -11,16 +11,17 @@ from __future__ import annotations import argparse +import json +import os from pathlib import Path from typing import Iterable import albumentations as A +import matplotlib.pyplot as plt import numpy as np import pandas as pd from tqdm import tqdm -import matplotlib.pyplot as plt -import json -import os + import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -37,15 +38,14 @@ from deeplabcut.pose_estimation_pytorch.runners import InferenceRunner from deeplabcut.pose_estimation_pytorch.runners.snapshots import Snapshot from deeplabcut.pose_estimation_pytorch.task import Task -from deeplabcut.utils import auxiliaryfunctions -from deeplabcut.utils.visualization import plot_evaluation_results -from deeplabcut.utils import auxfun_videos +from deeplabcut.utils import auxfun_videos, auxiliaryfunctions from deeplabcut.utils.visualization import ( create_minimal_figure, + erase_artists, get_cmap, make_multianimal_labeled_image, + plot_evaluation_results, save_labeled_frame, - erase_artists, ) @@ -176,6 +176,7 @@ def evaluate( return results, predictions + def visualize_coco_predictions( predictions: dict, num_samples: int = 1, @@ -185,12 +186,12 @@ def visualize_coco_predictions( ) -> None: """ Visualize predictions using DeepLabCut's plot_gt_and_predictions function - + Args: predictions: Dictionary with image paths as keys and prediction data as values. Each prediction contains: - bodyparts: numpy array of shape (1, 37, 3) - - bboxes: numpy array of shape (1, 4) + - bboxes: numpy array of shape (1, 4) - bbox_scores: numpy array of shape (1,) num_samples: Number of samples to visualize test_file_json: Path to test set JSON file @@ -203,7 +204,9 @@ def visualize_coco_predictions( ground_truth = json.load(f) if output_dir is None: - output_dir = os.path.join(os.path.dirname(test_file_json), "predictions_visualizations") + output_dir = os.path.join( + os.path.dirname(test_file_json), "predictions_visualizations" + ) os.makedirs(output_dir, exist_ok=True) image_paths = list(predictions.keys()) @@ -212,42 +215,52 @@ def visualize_coco_predictions( # Process each image for image_path in image_paths: - pred_data = predictions[image_path] - img_info = next((img for img in ground_truth['images'] - if img['file_name'] == os.path.basename(image_path)), None) + pred_data = predictions[image_path] + img_info = next( + ( + img + for img in ground_truth["images"] + if img["file_name"] == os.path.basename(image_path) + ), + None, + ) if img_info is None: print(f"Warning: Could not find image info for {image_path}") continue - - gt_anns = [ann for ann in ground_truth['annotations'] - if ann['image_id'] == img_info['id']] - + + gt_anns = [ + ann + for ann in ground_truth["annotations"] + if ann["image_id"] == img_info["id"] + ] + if not gt_anns: print(f"Warning: No ground truth annotations found for {image_path}") continue - gt_keypoints = np.array(gt_anns[0]['keypoints']).reshape(1, -1, 3) + gt_keypoints = np.array(gt_anns[0]["keypoints"]).reshape(1, -1, 3) vis_mask = gt_keypoints[:, :, 2] != -1 - + visible_gt = gt_keypoints[vis_mask] visible_gt = visible_gt[None, :, :2] - - pred_keypoints = pred_data['bodyparts'] # Keep batch dimension - visible_pred = pred_keypoints + + pred_keypoints = pred_data["bodyparts"] # Keep batch dimension + visible_pred = pred_keypoints visible_pred = pred_keypoints[vis_mask].copy() visible_pred = np.expand_dims(visible_pred, axis=0) - + try: plot_gt_and_predictions( image_path=image_path, output_dir=output_dir, gt_bodyparts=visible_gt, - pred_bodyparts=visible_pred + pred_bodyparts=visible_pred, ) print(f"Successfully plotted predictions for {image_path}") except Exception as e: print(f"Error plotting predictions for {image_path}: {str(e)}") - + + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, @@ -262,7 +275,7 @@ def plot_gt_and_predictions( p_cutoff: float = 0.6, ): """Plot ground truth and predictions on an image. - + Args: image_path: Path to the image gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) @@ -299,7 +312,7 @@ def plot_gt_and_predictions( if pred_unique_bodyparts is not None: num_colors += pred_unique_bodyparts.shape[1] colors = get_cmap(num_colors, name=colormap) - + predictions = pred_bodyparts.swapaxes(0, 1) ground_truth = gt_bodyparts.swapaxes(0, 1) elif mode == "individual": @@ -330,7 +343,7 @@ def plot_gt_and_predictions( else: unique_predictions = pred_unique_bodyparts unique_ground_truth = gt_unique_bodyparts - + ax = make_multianimal_labeled_image( frame, unique_ground_truth, @@ -342,7 +355,7 @@ def plot_gt_and_predictions( p_cutoff, ax=ax, ) - + # Save the labeled image save_labeled_frame( fig, @@ -352,8 +365,8 @@ def plot_gt_and_predictions( ) erase_artists(ax) plt.close() - - + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, @@ -409,7 +422,7 @@ def evaluate_snapshot( parameters = PoseDatasetParameters( bodyparts=project_bodyparts, unique_bpts=parameters.unique_bpts, - individuals=parameters.individuals + individuals=parameters.individuals, ) predictions = {} @@ -690,7 +703,6 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) - if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From 94a310de5bfe811f41b75b0e8634e437a7d28027 Mon Sep 17 00:00:00 2001 From: n-poulsen <45132115+n-poulsen@users.noreply.github.com> Date: Thu, 21 Nov 2024 17:09:40 +0100 Subject: [PATCH 34/88] Load scheduler state when resuming training (#2788) --- .../pose_estimation_pytorch/runners/base.py | 19 +- .../runners/schedulers.py | 39 +++- .../pose_estimation_pytorch/runners/train.py | 133 ++++++++--- docs/pytorch/pytorch_config.md | 15 +- .../other/test_schedulers.py | 89 -------- .../runners/test_runners_train.py | 211 ++++++++++++++++++ .../runners/test_schedulers.py | 194 ++++++++++++++++ 7 files changed, 563 insertions(+), 137 deletions(-) delete mode 100644 tests/pose_estimation_pytorch/other/test_schedulers.py create mode 100644 tests/pose_estimation_pytorch/runners/test_runners_train.py create mode 100644 tests/pose_estimation_pytorch/runners/test_schedulers.py diff --git a/deeplabcut/pose_estimation_pytorch/runners/base.py b/deeplabcut/pose_estimation_pytorch/runners/base.py index 987a9f55f0..f25def2856 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/base.py +++ b/deeplabcut/pose_estimation_pytorch/runners/base.py @@ -63,21 +63,20 @@ def load_snapshot( snapshot_path: str | Path, device: str, model: ModelType, - optimizer: torch.optim.Optimizer | None = None, - ) -> int: - """ + ) -> dict: + """Loads the state dict for a model from a file + + This method loads a file containing a DeepLabCut PyTorch model snapshot onto + a given device, and sets the model weights using the state_dict. + Args: snapshot_path: the path containing the model weights to load device: the device on which the model should be loaded model: the model for which the weights are loaded - optimizer: if defined, the optimizer weights to load Returns: - the number of epochs the model was trained for + The content of the snapshot file. """ snapshot = torch.load(snapshot_path, map_location=device) - model.load_state_dict(snapshot['model']) - if optimizer is not None and 'optimizer' in snapshot: - optimizer.load_state_dict(snapshot["optimizer"]) - - return snapshot.get("metadata", {}).get("epoch", 0) + model.load_state_dict(snapshot["model"]) + return snapshot diff --git a/deeplabcut/pose_estimation_pytorch/runners/schedulers.py b/deeplabcut/pose_estimation_pytorch/runners/schedulers.py index a8e5615876..3a257d813a 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/schedulers.py +++ b/deeplabcut/pose_estimation_pytorch/runners/schedulers.py @@ -16,10 +16,10 @@ class LRListScheduler(_LRScheduler): """ - Definition of the class object Scheduler. - You can achieve increased performance and faster training by using a learning rate that changes - during training. A scheduler makes the learning rate adaptative. Given a list of learning rates - and milestones modifies the learning rate accordingly during training + You can achieve increased performance and faster training by using a learning rate + that changes during training. A scheduler makes the learning rate adaptive. Given a + list of learning rates and milestones modifies the learning rate accordingly during + training. """ def __init__(self, optimizer, milestones, lr_list, last_epoch=-1) -> None: @@ -78,3 +78,34 @@ def build_scheduler( scheduler = getattr(torch.optim.lr_scheduler, scheduler_cfg["type"]) return scheduler(optimizer=optimizer, **scheduler_cfg["params"]) + + +def load_scheduler_state( + scheduler: torch.optim.lr_scheduler.LRScheduler, + state_dict: dict, +) -> None: + """ + Args: + scheduler: The scheduler for which to load the state dict. + state_dict: The state dict to load + + Raises: + ValueError: if the state dict fails to load. + """ + try: + scheduler.load_state_dict(state_dict) + except Exception as err: + raise ValueError(f"Failed to load state dict: {err}") + + param_groups = scheduler.optimizer.param_groups + resume_lrs = scheduler.get_last_lr() + + if len(param_groups) != len(resume_lrs): + raise ValueError( + f"Number of optimizer parameter groups ({len(param_groups)}) did not match " + f"number of learning rates to resume from ({len(scheduler.get_last_lr())})." + ) + + # Update the learning rate for the optimizer based on the scheduler + for group, resume_lr in zip(param_groups, resume_lrs): + group['lr'] = resume_lr diff --git a/deeplabcut/pose_estimation_pytorch/runners/train.py b/deeplabcut/pose_estimation_pytorch/runners/train.py index 23fb74b518..1362770db7 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/train.py +++ b/deeplabcut/pose_estimation_pytorch/runners/train.py @@ -23,6 +23,7 @@ from torch.nn.parallel import DataParallel import deeplabcut.core.metrics as metrics +import deeplabcut.pose_estimation_pytorch.runners.schedulers as schedulers from deeplabcut.pose_estimation_pytorch.models.detectors import BaseDetector from deeplabcut.pose_estimation_pytorch.models.model import PoseModel from deeplabcut.pose_estimation_pytorch.runners.base import ModelType, Runner @@ -31,47 +32,61 @@ CSVLogger, ImageLoggerMixin, ) -from deeplabcut.pose_estimation_pytorch.runners.schedulers import build_scheduler from deeplabcut.pose_estimation_pytorch.runners.snapshots import TorchSnapshotManager from deeplabcut.pose_estimation_pytorch.task import Task class TrainingRunner(Runner, Generic[ModelType], metaclass=ABCMeta): - """Runner base class + """Base TrainingRunner class. - A runner takes a model and runs actions on it, such as training or inference + A TrainingRunner is used to fit models to datasets. Subclasses must implement the + ``step(self, batch, mode)`` method, which performs a single training or validation + step on a batch of data. The step is different depending on the model type (e.g. + a pose model step vs. an object detector step). + + Args: + model: The model to fit. + optimizer: The optimizer to use to fit the model. + snapshot_manager: Manages how snapshots are saved to disk during training. + device: The device on which to run training (e.g. 'cpu', 'cuda', 'cuda:0'). + gpus: Used to specify the GPU indices for multi-GPU training (e.g. [0, 1, 2, 3] + to train on 4 GPUs). When a GPUs list is given, the device must be 'cuda'. + eval_interval: The interval at which the model will be evaluated while training + (e.g. `eval_interva=5` means the model will be evaluated every 5 epochs). + snapshot_path: If continuing to train a model, the path to the snapshot to + resume training from. + scheduler: The learning rate scheduler (or it's configuration), if one should be + used. + load_scheduler_state_dict: When resuming training (snapshot_path is not None), + attempts to load the scheduler state dict from the snapshot. If you've + modified your scheduler, set this to False or the old scheduler parameters + might be used. + logger: Logger to monitor training (e.g. a WandBLogger). + log_filename: Name of the file in which to store training stats. """ def __init__( self, model: ModelType, - optimizer: torch.optim.Optimizer, + optimizer: dict | torch.optim.Optimizer, snapshot_manager: TorchSnapshotManager, device: str = "cpu", gpus: list[int] | None = None, eval_interval: int = 1, snapshot_path: str | Path | None = None, - scheduler: torch.optim.lr_scheduler.LRScheduler | None = None, + scheduler: dict | torch.optim.lr_scheduler.LRScheduler | None = None, + load_scheduler_state_dict: bool = True, logger: BaseLogger | None = None, log_filename: str = "learning_stats.csv", ): - """ - Args: - model: the model to run actions on - optimizer: the optimizer to use when fitting the model - snapshot_manager: the module to use to manage snapshots - device: the device to use (e.g. {'cpu', 'cuda:0', 'mps'}) - gpus: the list of GPU indices to use for multi-GPU training - eval_interval: how often evaluation is run on the test set (in epochs) - snapshot_path: if defined, the path of a snapshot from which to load - pretrained weights - scheduler: scheduler for adjusting the lr of the optimizer - logger: logger to monitor training (e.g WandB logger) - log_filename: name of the file in which to store training stats - """ super().__init__( model=model, device=device, gpus=gpus, snapshot_path=snapshot_path ) + if isinstance(optimizer, dict): + optimizer = build_optimizer(model, optimizer) + if isinstance(scheduler, dict): + scheduler = schedulers.build_scheduler(scheduler, optimizer) + self.eval_interval = eval_interval self.optimizer = optimizer self.scheduler = scheduler @@ -88,28 +103,34 @@ def __init__( # some models cannot compute a validation loss (e.g. detectors) self._print_valid_loss = True - if self.snapshot_path is not None and self.snapshot_path != "": - self.starting_epoch = self.load_snapshot( - self.snapshot_path, - self.device, - self.model, - self.optimizer, - ) + if self.snapshot_path: + snapshot = self.load_snapshot(self.snapshot_path, self.device, self.model) + self.starting_epoch = snapshot.get("metadata", {}).get("epoch", 0) + + if "optimizer" in snapshot: + self.optimizer.load_state_dict(snapshot["optimizer"]) + + self._load_scheduler_state_dict(load_scheduler_state_dict, snapshot) self._metadata = dict(epoch=self.starting_epoch, metrics=dict(), losses=dict()) self._epoch_ground_truth = {} self._epoch_predictions = {} def state_dict(self) -> dict: + """Returns: the state dict for the runner""" model = self.model if self._data_parallel: model = self.model.module - return { - "metadata": self._metadata, - "model": model.state_dict(), - "optimizer": self.optimizer.state_dict(), - } + state_dict_ = dict( + metadata=self._metadata, + model=model.state_dict(), + optimizer=self.optimizer.state_dict(), + ) + if self.scheduler is not None: + state_dict_["scheduler"] = self.scheduler.state_dict() + + return state_dict_ @abstractmethod def step( @@ -256,7 +277,9 @@ def _epoch( if len(epoch_loss) > 0: epoch_loss = np.mean(epoch_loss).item() - self.history[f"{mode}_loss"].append(epoch_loss) + else: + epoch_loss = 0 + self.history[f"{mode}_loss"].append(epoch_loss) metrics_to_log = {} if perf_metrics: @@ -279,6 +302,29 @@ def _epoch( return epoch_loss + def _load_scheduler_state_dict(self, load_state_dict: bool, snapshot: dict) -> None: + if self.scheduler is None: + return + + loaded_state_dict = False + if load_state_dict and "scheduler" in snapshot: + try: + schedulers.load_scheduler_state(self.scheduler, snapshot["scheduler"]) + loaded_state_dict = True + except ValueError as err: + logging.warning( + "Failed to load the scheduler state_dict. The scheduler will " + "restart at epoch 0. This is expected if the scheduler " + "configuration was edited since the original snapshot was " + f"trained. Error: {err}" + ) + + if not loaded_state_dict and self.starting_epoch > 0: + logging.info( + f"Setting the scheduler starting epoch to {self.starting_epoch}" + ) + self.scheduler.last_epoch = self.starting_epoch + class PoseTrainingRunner(TrainingRunner[PoseModel]): """Runner to train pose estimation models""" @@ -596,11 +642,13 @@ def build_training_runner( optim_cfg = runner_config["optimizer"] optim_cls = getattr(torch.optim, optim_cfg["type"]) optimizer = optim_cls(params=model.parameters(), **optim_cfg["params"]) - scheduler = build_scheduler(runner_config.get("scheduler"), optimizer) + scheduler = schedulers.build_scheduler(runner_config.get("scheduler"), optimizer) + # if no custom snapshot prefix is defined, use the default one snapshot_prefix = runner_config.get("snapshot_prefix") if snapshot_prefix is None or len(snapshot_prefix) == 0: snapshot_prefix = task.snapshot_prefix + kwargs = dict( model=model, optimizer=optimizer, @@ -618,9 +666,28 @@ def build_training_runner( eval_interval=runner_config.get("eval_interval"), snapshot_path=snapshot_path, scheduler=scheduler, + load_scheduler_state_dict=runner_config.get("load_scheduler_state_dict", True), logger=logger, ) if task == Task.DETECT: return DetectorTrainingRunner(**kwargs) return PoseTrainingRunner(**kwargs) + + +def build_optimizer( + model: nn.Module, + optimizer_config: dict, +) -> torch.optim.Optimizer: + """Builds an optimizer from a configuration. + + Args: + model: The model to optimize. + optimizer_config: The configuration for the optimizer. + + Returns: + The optimizer for the model built according to the given configuration. + """ + optim_cls = getattr(torch.optim, optimizer_config["type"]) + optimizer = optim_cls(params=model.parameters(), **optimizer_config["params"]) + return optimizer diff --git a/docs/pytorch/pytorch_config.md b/docs/pytorch/pytorch_config.md index b1b23954d3..a1ce5768c9 100644 --- a/docs/pytorch/pytorch_config.md +++ b/docs/pytorch/pytorch_config.md @@ -288,6 +288,7 @@ runner: ... scheduler: # optional: a learning rate scheduler ... + load_scheduler_state_dict: true/false # whether to load scheduler state when resuming training from a snapshot, snapshots: # parameters for the TorchSnapshotManager max_snapshots: 5 # the maximum number of snapshots to save (the "best" model does not count as one of them) save_epochs: 25 # the interval between each snapshot save @@ -327,7 +328,7 @@ https://pytorch.org/docs/stable/optim.html). Examples: lr: 1e-4 ``` -**Scheduler**: YYou can use [any scheduler]( +**Scheduler**: You can use [any scheduler]( https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate) defined in `torch.optim.lr_scheduler`, where the arguments given are arguments of the scheduler. The default scheduler is an LRListScheduler, which changes the learning rates at each @@ -410,6 +411,12 @@ continue to train from the 10th epoch on. resume_training_from: /Users/john/dlc-project-2021-06-22/dlc-models-pytorch/iteration-0/dlcJun22-trainset95shuffle0/train/snapshot-010.pt ``` +When continuing to train a model, you may want to modify the learning rate scheduling +that was being used (by editing the configuration under the `scheduler` key). When doing +so, you *must set `load_scheduler_state_dict: false`* in your `runner` config! +Otherwise, the parameters for the scheduler your started training with will be loaded +from the state dictionary, and your edits might not be kept! + ## Training Top-Down Models Top-down models are split into two main elements: a detector (localizing individuals in @@ -479,3 +486,9 @@ detector: # weights from which to resume training resume_training_from: /Users/john/dlc-project-2021-06-22/dlc-models-pytorch/iteration-0/dlcJun22-trainset95shuffle0/train/snapshot-detector-020.pt ``` + +When continuing to train a detector, you may want to modify the learning rate scheduling +that was being used (by editing the configuration under the `scheduler` key). When doing +so, you *must set `load_scheduler_state_dict: false`* in your `detector`: `runner` +config! Otherwise, the parameters for the scheduler your started training with will be +loaded from the state dictionary, and your edits might not be kept! diff --git a/tests/pose_estimation_pytorch/other/test_schedulers.py b/tests/pose_estimation_pytorch/other/test_schedulers.py deleted file mode 100644 index a043020f70..0000000000 --- a/tests/pose_estimation_pytorch/other/test_schedulers.py +++ /dev/null @@ -1,89 +0,0 @@ -# -# DeepLabCut Toolbox (deeplabcut.org) -# © A. & M.W. Mathis Labs -# https://github.com/DeepLabCut/DeepLabCut -# -# Please see AUTHORS for contributors. -# https://github.com/DeepLabCut/DeepLabCut/blob/main/AUTHORS -# -# Licensed under GNU Lesser General Public License v3.0 -# - -import random - -import pytest -import torch -from torch.optim import SGD - -import deeplabcut.pose_estimation_pytorch.runners.schedulers as deeplabcut_torch_schedulers - - -def generate_random_lr_list(num_floats: int): - """Summary: - Generate list of lists including random numbers. - - Args: - num_floats: number of floats we want to include in our list - - Returns: - ran_list: random list of sorted numbers, being first number bigger than the last - - Examples: - input: num_float = 2 - output: [[0.96420871896179], [0.3917365732012833]] - """ - ran_list = [] - for i in range(num_floats): - random_floats = [random.random()] - ran_list.append(random_floats) - return sorted(ran_list, reverse=True) - - -milestones = random.sample(range(0, 999), 2) -milestones.sort() -data = [([10, 430], [[0.05], [0.005]]), (milestones, generate_random_lr_list(2))] -# testing for default values in pytorch_config and also for random values with pytest parametrize - - -@pytest.mark.parametrize("milestones, lr_list", data) -def test_scheduler(milestones, lr_list): - """Summary: - Testing schedulers.py. - Given a list of milestones and a list of learning rates, this function tests - if the length of each list is the same. Furthermore, it will assess if - the current learning rate (output from the function we are testing) is a float - and corresponds to the expected learning rate given the milestones. - - Args: - milestones: list of epochs indices (number of epochs) - lr_list: learning rates list - - Returns: - None - - Examples: - input: - milestones = [10,25,50] - lr_list = [[0.00001],[0.000005],[0.000001]] - """ - - assert len(milestones) == len(lr_list) - - optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=0.01) - lrlistscheduler = deeplabcut_torch_schedulers.LRListScheduler( - optimizer, milestones=milestones, lr_list=lr_list - ) - - index_rng = range(milestones[0], milestones[1]) - for i in range((milestones[-1]) + 1): - if i < milestones[0]: - expected_lr = [0.01] - elif i in index_rng: - expected_lr = lr_list[0] - else: - expected_lr = lr_list[1] - - current_lr = lrlistscheduler.get_lr()[0] - assert lrlistscheduler.get_lr() == expected_lr - assert isinstance(current_lr, float) - lrlistscheduler.step() diff --git a/tests/pose_estimation_pytorch/runners/test_runners_train.py b/tests/pose_estimation_pytorch/runners/test_runners_train.py new file mode 100644 index 0000000000..5e7e71989e --- /dev/null +++ b/tests/pose_estimation_pytorch/runners/test_runners_train.py @@ -0,0 +1,211 @@ +# +# DeepLabCut Toolbox (deeplabcut.org) +# © A. & M.W. Mathis Labs +# https://github.com/DeepLabCut/DeepLabCut +# +# Please see AUTHORS for contributors. +# https://github.com/DeepLabCut/DeepLabCut/blob/main/AUTHORS +# +# Licensed under GNU Lesser General Public License v3.0 +# +from dataclasses import dataclass +from unittest.mock import Mock, patch + +import numpy as np +import pytest +import torch + +import deeplabcut.pose_estimation_pytorch.runners.schedulers as schedulers +import deeplabcut.pose_estimation_pytorch.runners.train as train_runners + + +@dataclass +class SchedulerTestConfig: + cfg: dict + init_lr: float + expected_lrs: list[float] + + +TEST_SCHEDULERS = [ + SchedulerTestConfig( + cfg=dict( + type="LRListScheduler", + params=dict(milestones=[2, 5], lr_list=[[0.5], [0.1]]) + ), + init_lr=1.0, + expected_lrs=[1.0, 1.0, 0.5, 0.5, 0.5, 0.1, 0.1, 0.1], + ), + SchedulerTestConfig( + cfg=dict(type="LRListScheduler", params=dict(milestones=[1], lr_list=[[0.1]])), + init_lr=0.1, + expected_lrs=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], + ), + SchedulerTestConfig( + cfg=dict(type="LRListScheduler", params=dict(milestones=[1], lr_list=[[0.5]])), + init_lr=0.1, + expected_lrs=[0.1, 0.5, 0.5, 0.5], + ), + SchedulerTestConfig( + cfg=dict(type="StepLR", params=dict(step_size=3, gamma=0.1)), + init_lr=1.0, + expected_lrs=[1.0, 1.0, 1.0, 0.1, 0.1, 0.1, 0.01, 0.01, 0.01, 0.001], + ), +] + + +@patch("deeplabcut.pose_estimation_pytorch.runners.train.CSVLogger", Mock()) +@pytest.mark.parametrize("runner_cls", [ + train_runners.PoseTrainingRunner, train_runners.DetectorTrainingRunner, +]) +@pytest.mark.parametrize("test_cfg", TEST_SCHEDULERS) +def test_training_with_scheduler(runner_cls, test_cfg: SchedulerTestConfig) -> None: + runner = _fit_runner_and_check_lrs( + runner_cls, + test_cfg.init_lr, + test_cfg.cfg, + test_cfg.expected_lrs, + ) + assert runner.current_epoch == len(test_cfg.expected_lrs) + + +@patch("deeplabcut.pose_estimation_pytorch.runners.train.CSVLogger", Mock()) +@pytest.mark.parametrize("runner_cls", [ + train_runners.PoseTrainingRunner, train_runners.DetectorTrainingRunner, +]) +@pytest.mark.parametrize("test_cfg", TEST_SCHEDULERS) +def test_resuming_training_scheduler_every_epoch( + runner_cls, + test_cfg: SchedulerTestConfig, +): + snapshot_to_load = None + for epoch, expected_lr in enumerate(test_cfg.expected_lrs): + runner = _fit_runner_and_check_lrs( + runner_cls, + test_cfg.init_lr, + test_cfg.cfg, + [expected_lr], # trains for 1 epoch + snapshot_to_load=snapshot_to_load, + ) + snapshot_to_load = dict( + metadata=dict(epoch=epoch + 1), scheduler=runner.scheduler.state_dict() + ) + + +@patch("deeplabcut.pose_estimation_pytorch.runners.train.CSVLogger", Mock()) +@pytest.mark.parametrize("runner_cls", [ + train_runners.PoseTrainingRunner, train_runners.DetectorTrainingRunner, +]) +@pytest.mark.parametrize( + "test_cfg, resume_epoch", + [ + ( + SchedulerTestConfig( + cfg=dict( + type="LRListScheduler", + params=dict(milestones=[2, 5], lr_list=[[0.5], [0.1]]) + ), + init_lr=1.0, + expected_lrs=[1.0, 1.0, 0.5, 1.0, 1.0, 0.1, 0.1, 0.1], + ), + 3, # cut after the 3rd epoch - restart at LR=1 until epoch 5 + ), + ( + SchedulerTestConfig( + cfg=dict(type="StepLR", params=dict(step_size=4, gamma=0.1)), + init_lr=1.0, + expected_lrs=(4 * [1.0]) + (4 * [0.1]) + (4 * [0.01]) + (4 * [0.001]), + ), + 3, # cut after the 3rd epoch - restart at LR=1 and update at 4 correctly + ), + ( + SchedulerTestConfig( + cfg=dict(type="StepLR", params=dict(step_size=4, gamma=0.1)), + init_lr=1.0, + expected_lrs=(4 * [1.0]) + [0.1, 1, 1, 1] + (4 * [0.1]), + ), + 5, # cut after the 5th epoch - restart at LR=1 and update again at 8 + ) + ] +) +def test_resuming_training_with_no_scheduler_state( + runner_cls, + test_cfg: SchedulerTestConfig, + resume_epoch: int +): + """ + Without a scheduler config, there is no way to set the initial LR. All we can do is + set the last_epoch value, and adjust correctly at milestones going forward. + """ + runner = _fit_runner_and_check_lrs( + runner_cls, + test_cfg.init_lr, + test_cfg.cfg, + test_cfg.expected_lrs[:resume_epoch], + ) + assert runner.current_epoch == resume_epoch + + runner = _fit_runner_and_check_lrs( + runner_cls, + test_cfg.init_lr, + test_cfg.cfg, + expected_lrs=test_cfg.expected_lrs[resume_epoch:], + snapshot_to_load=dict(metadata=dict(epoch=resume_epoch)), + ) + assert runner.current_epoch == len(test_cfg.expected_lrs) + + +def _fit_runner_and_check_lrs( + runner_cls, + init_lr: float, + scheduler_cfg: dict, + expected_lrs: list[float], + snapshot_to_load: dict | None = None, +) -> train_runners.TrainingRunner: + runner_kwargs = dict(device="cpu", eval_interval=1_000_000) + optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=init_lr) + scheduler = schedulers.build_scheduler(scheduler_cfg, optimizer) + num_epochs = len(expected_lrs) + + with patch( + "deeplabcut.pose_estimation_pytorch.runners.Runner.load_snapshot" + ) as mock_load_snapshot: + snapshot_path = None + mock_load_snapshot.return_value = dict() + if snapshot_to_load is not None: + snapshot_path = "fake_snapshot.pt" + mock_load_snapshot.return_value = snapshot_to_load + + print() + print(f"Scheduler: {scheduler}") + print(f"Starting training for {num_epochs} epochs") + runner = runner_cls( + model=Mock(), + optimizer=optimizer, + snapshot_manager=Mock(), + scheduler=scheduler, + snapshot_path=snapshot_path, + **runner_kwargs + ) + + # Mock the step call; check that the learning rate is correct for the epoch + def step(*args, **kwargs): + # the current_epoch value is indexed at 1 + total_epoch = (runner.current_epoch - 1) + epoch = total_epoch - runner.starting_epoch + _assert_learning_rates_match(total_epoch, optimizer, expected_lrs[epoch]) + optimizer.step() + return dict(total_loss=0) + + train_loader, val_loader = [Mock()], [Mock()] + runner.step = step + runner.fit(train_loader, val_loader, epochs=num_epochs, display_iters=1000) + + return runner + + +def _assert_learning_rates_match(e, optimizer, expected): + current_lrs = [g["lr"] for g in optimizer.param_groups] + print(f"Epoch {e}: LR={current_lrs}, expected={expected}") + for lr in current_lrs: + assert isinstance(lr, float) + np.testing.assert_almost_equal(lr, expected) diff --git a/tests/pose_estimation_pytorch/runners/test_schedulers.py b/tests/pose_estimation_pytorch/runners/test_schedulers.py new file mode 100644 index 0000000000..d8eb06b81c --- /dev/null +++ b/tests/pose_estimation_pytorch/runners/test_schedulers.py @@ -0,0 +1,194 @@ +# +# DeepLabCut Toolbox (deeplabcut.org) +# © A. & M.W. Mathis Labs +# https://github.com/DeepLabCut/DeepLabCut +# +# Please see AUTHORS for contributors. +# https://github.com/DeepLabCut/DeepLabCut/blob/main/AUTHORS +# +# Licensed under GNU Lesser General Public License v3.0 +# + +import random +from dataclasses import dataclass + +import numpy as np +import pytest +import torch + +import deeplabcut.pose_estimation_pytorch.runners.schedulers as schedulers + + +def generate_random_lr_list(num_floats: int): + """Summary: + Generate list of lists including random numbers. + + Args: + num_floats: number of floats we want to include in our list + + Returns: + ran_list: random list of sorted numbers, being first number bigger than the last + + Examples: + input: num_float = 2 + output: [[0.96420871896179], [0.3917365732012833]] + """ + ran_list = [] + for i in range(num_floats): + random_floats = [random.random()] + ran_list.append(random_floats) + return sorted(ran_list, reverse=True) + + +@pytest.mark.parametrize( + "milestones, lr_list", + [ + ([10, 430], [[0.05], [0.005]]), + (list(sorted(random.sample(range(0, 999), 2))), generate_random_lr_list(2)) + ] +) +def test_scheduler(milestones, lr_list): + """Summary: + Testing schedulers.py. + Given a list of milestones and a list of learning rates, this function tests + if the length of each list is the same. Furthermore, it will assess if + the current learning rate (output from the function we are testing) is a float + and corresponds to the expected learning rate given the milestones. + + Args: + milestones: list of epochs indices (number of epochs) + lr_list: learning rates list + + Returns: + None + + Examples: + input: + milestones = [10,25,50] + lr_list = [[0.00001],[0.000005],[0.000001]] + """ + + assert len(milestones) == len(lr_list) + + optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=0.01) + s = schedulers.LRListScheduler(optimizer, milestones=milestones, lr_list=lr_list) + + index_rng = range(milestones[0], milestones[1]) + for i in range((milestones[-1]) + 1): + if i < milestones[0]: + expected_lr = [0.01] + elif i in index_rng: + expected_lr = lr_list[0] + else: + expected_lr = lr_list[1] + + current_lr = s.get_lr()[0] + assert s.get_lr() == expected_lr + assert isinstance(current_lr, float) + optimizer.step() + s.step() + + +@dataclass +class SchedulerTestConfig: + cfg: dict + init_lr: float + expected_lrs: list[float] + + +TEST_SCHEDULERS = [ + SchedulerTestConfig( + cfg=dict( + type="LRListScheduler", + params=dict(milestones=[2, 5], lr_list=[[0.5], [0.1]]) + ), + init_lr=1.0, + expected_lrs=[1.0, 1.0, 0.5, 0.5, 0.5, 0.1, 0.1, 0.1], + ), + SchedulerTestConfig( + cfg=dict(type="LRListScheduler", params=dict(milestones=[1], lr_list=[[0.1]])), + init_lr=0.1, + expected_lrs=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], + ), + SchedulerTestConfig( + cfg=dict(type="LRListScheduler", params=dict(milestones=[1], lr_list=[[0.5]])), + init_lr=0.1, + expected_lrs=[0.1, 0.5, 0.5, 0.5], + ), + SchedulerTestConfig( + cfg=dict(type="StepLR", params=dict(step_size=3, gamma=0.1)), + init_lr=1.0, + expected_lrs=[1.0, 1.0, 1.0, 0.1, 0.1, 0.1, 0.01, 0.01, 0.01, 0.001], + ), +] + + +@pytest.mark.parametrize("test_cfg", TEST_SCHEDULERS) +def test_build_scheduler(test_cfg: SchedulerTestConfig) -> None: + optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=test_cfg.init_lr) + s = schedulers.build_scheduler(test_cfg.cfg, optimizer) + print() + print(f"Scheduler: {s}") + num_epochs = len(test_cfg.expected_lrs) + for e in range(num_epochs): + _assert_learning_rates_match(e, optimizer, test_cfg.expected_lrs[e]) + optimizer.step() + s.step() + + +@pytest.mark.parametrize("test_cfg", TEST_SCHEDULERS) +def test_resume_scheduler_after_each_epoch(test_cfg: SchedulerTestConfig) -> None: + optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=test_cfg.init_lr) + s = schedulers.build_scheduler(test_cfg.cfg, optimizer) + print() + print(f"Scheduler: {s}") + num_epochs = len(test_cfg.expected_lrs) + for e in range(num_epochs): + _assert_learning_rates_match(e, optimizer, test_cfg.expected_lrs[e]) + optimizer.step() + s.step() + + optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=test_cfg.init_lr) + new_scheduler = schedulers.build_scheduler(test_cfg.cfg, optimizer) + schedulers.load_scheduler_state(new_scheduler, s.state_dict()) + s = new_scheduler + + +@pytest.mark.parametrize( + "test_cfg, middle_epoch", + [ + (TEST_SCHEDULERS[0], 3), + (TEST_SCHEDULERS[1], 5), + (TEST_SCHEDULERS[2], 2), + (TEST_SCHEDULERS[3], 2), + (TEST_SCHEDULERS[3], 3), + (TEST_SCHEDULERS[3], 4), + ], +) +def test_two_stage_training(test_cfg: SchedulerTestConfig, middle_epoch: int) -> None: + num_epochs = len(test_cfg.expected_lrs) + optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=test_cfg.init_lr) + s = schedulers.build_scheduler(test_cfg.cfg, optimizer) + + print() + print(f"Scheduler: {s}") + for e in range(middle_epoch): + _assert_learning_rates_match(e, optimizer, test_cfg.expected_lrs[e]) + optimizer.step() + s.step() + + optimizer = torch.optim.SGD([torch.randn(2, 2)], lr=test_cfg.init_lr) + new_scheduler = schedulers.build_scheduler(test_cfg.cfg, optimizer) + schedulers.load_scheduler_state(new_scheduler, s.state_dict()) + s = new_scheduler + for e in range(middle_epoch, num_epochs): + _assert_learning_rates_match(e, optimizer, test_cfg.expected_lrs[e]) + s.step() + + +def _assert_learning_rates_match(e, optimizer, expected): + current_lrs = [g["lr"] for g in optimizer.param_groups] + print(f"Epoch {e}: LR={current_lrs}, expected={expected}") + for lr in current_lrs: + assert isinstance(lr, float) + np.testing.assert_almost_equal(lr, expected) From 54432127af3ab312b6b9c7504235a182a3e56424 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 19:22:26 +0000 Subject: [PATCH 35/88] Fix: correct the early return error when save_epochs=1 and delelte the redunant snapshots at the end --- .../runners/snapshots.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 341b3c4be4..bc5a30a44c 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,41 +113,43 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] + + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } torch.save(parsed_state_dict, save_path) + # Handle previous best model if current_best is not None: - # rename if the current best should have been saved, otherwise delete if current_best.epochs % self.save_epochs == 0: new_name = self.snapshot_path(epoch=current_best.epochs) current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - return - - if not (last or epoch % self.save_epochs == 0): - return - + else: + # Save regular snapshot if needed + should_save = last or epoch % self.save_epochs == 0 + if should_save: + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) + + # Clean up old snapshots if needed existing_snapshots = [s for s in self.snapshots() if not s.best] if len(existing_snapshots) >= self.max_snapshots: - num_to_delete = 1 + len(existing_snapshots) - self.max_snapshots + num_to_delete = len(existing_snapshots) - self.max_snapshots to_delete = existing_snapshots[:num_to_delete] for snapshot in to_delete: snapshot.path.unlink(missing_ok=False) - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) - def best(self) -> Snapshot | None: """Returns: the path to the best snapshot, if it exists""" snapshots = self.snapshots() From 380796358288aa04c4f0e2cf4d722facba618996 Mon Sep 17 00:00:00 2001 From: Ti Wang <81274389+xiu-cs@users.noreply.github.com> Date: Fri, 22 Nov 2024 13:47:09 +0100 Subject: [PATCH 36/88] Update deeplabcut/pose_estimation_pytorch/runners/snapshots.py Co-authored-by: n-poulsen <45132115+n-poulsen@users.noreply.github.com> --- .../runners/snapshots.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index bc5a30a44c..45bf339d0b 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -130,17 +130,15 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - else: + elif last or epoch % self.save_epochs == 0: # Save regular snapshot if needed - should_save = last or epoch % self.save_epochs == 0 - if should_save: - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) # Clean up old snapshots if needed existing_snapshots = [s for s in self.snapshots() if not s.best] From 4e7ed49e981bee1e822b05c11b7072d6b06f669c Mon Sep 17 00:00:00 2001 From: n-poulsen <45132115+n-poulsen@users.noreply.github.com> Date: Fri, 22 Nov 2024 09:26:47 +0100 Subject: [PATCH 37/88] DeepLabCut 3.0 - Video Analysis with `use_shelve=True` (#2790) --- deeplabcut/compat.py | 11 +- .../apis/analyze_videos.py | 263 +++++++++++------- .../apis/convert_detections_to_tracklets.py | 18 +- .../data/postprocessor.py | 30 ++ .../modelzoo/inference.py | 7 +- .../post_processing/identity.py | 33 +-- .../runners/inference.py | 24 +- .../runners/shelving.py | 176 ++++++++++++ docs/pytorch/user_guide.md | 32 +-- .../post_processing/test_identity.py | 9 +- 10 files changed, 435 insertions(+), 168 deletions(-) create mode 100644 deeplabcut/pose_estimation_pytorch/runners/shelving.py diff --git a/deeplabcut/compat.py b/deeplabcut/compat.py index 43fba81ef1..7088053ea8 100644 --- a/deeplabcut/compat.py +++ b/deeplabcut/compat.py @@ -696,7 +696,6 @@ def analyze_videos( See issue: https://forum.image.sc/t/how-to-stop-running-out-of-vram/30551/2 use_shelve: bool, optional, default=False - Currently not supported by the PyTorch engine. By default, data are dumped in a pickle file at the end of the video analysis. Otherwise, data are written to disk on the fly using a "shelf"; i.e., a pickle-based, persistent, database-like object by default, resulting in @@ -848,17 +847,13 @@ def analyze_videos( from deeplabcut.pose_estimation_pytorch.apis import analyze_videos _update_device(gputouse, torch_kwargs) - if use_shelve: - raise NotImplementedError( - f"The 'use_shelve' option is not yet implemented with {engine}" - ) - if batchsize is not None: if "batch_size" in torch_kwargs: print( f"You called analyze_videos with parameters ``batchsize={batchsize}" f"`` and batch_size={torch_kwargs['batch_size']}. Only one is " - f"needed/used. Using batch size {torch_kwargs['batch_size']}") + f"needed/used. Using batch size {torch_kwargs['batch_size']}" + ) else: torch_kwargs["batch_size"] = batchsize @@ -871,6 +866,8 @@ def analyze_videos( save_as_csv=save_as_csv, destfolder=destfolder, modelprefix=modelprefix, + use_shelve=use_shelve, + robust_nframes=robust_nframes, auto_track=auto_track, identity_only=identity_only, overwrite=False, diff --git a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py index 503f7f55c9..383be0a933 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py +++ b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py @@ -34,7 +34,7 @@ list_videos_in_folder, parse_snapshot_index_for_analysis, ) -from deeplabcut.pose_estimation_pytorch.post_processing.identity import assign_identity +import deeplabcut.pose_estimation_pytorch.runners.shelving as shelving from deeplabcut.pose_estimation_pytorch.runners import InferenceRunner from deeplabcut.pose_estimation_pytorch.task import Task from deeplabcut.refine_training_dataset.stitch import stitch_tracklets @@ -95,8 +95,9 @@ def video_inference( task: Task, pose_runner: InferenceRunner, detector_runner: InferenceRunner | None = None, - with_identity: bool = False, cropping: list[int] | None = None, + shelf_writer: shelving.ShelfWriter | None = None, + robust_nframes: bool = False, ) -> list[dict[str, np.ndarray]]: """Runs inference on a video @@ -106,18 +107,27 @@ def video_inference( pose_runner: The pose runner to run inference with detector_runner: When ``task==Task.TOP_DOWN``, the detector runner to obtain bounding boxes for the video. - with_identity: Whether identity predictions should be made with the model. cropping: Optionally, video inference can be run on a cropped version of the video. To do so, pass a list containing 4 elements to specify which area of the video should be analyzed: ``[xmin, xmax, ymin, ymax]``. + shelf_writer: By default, data are dumped in a pickle file at the end of the + video analysis. Passing a shelf manager writes data to disk on-the-fly + using a "shelf" (a pickle-based, persistent, database-like object by + default, resulting in constant memory footprint). The returned list is + then empty. + robust_nframes: Evaluate a video's number of frames in a robust manner. This + option is slower (as the whole video is read frame-by-frame), but does not + rely on metadata, hence its robustness against file corruption. Returns: - Predictions for each frame in the video. + Predictions for each frame in the video. If a shelf_manager is given, this list + will be empty and the predictions will exclusively be stored in the file written + by the shelf. """ if not isinstance(video, VideoIterator): video = VideoIterator(str(video), cropping=cropping) - n_frames = video.get_n_frames() + n_frames = video.get_n_frames(robust=robust_nframes) vid_w, vid_h = video.dimensions print(f"Starting to analyze {video.video_path}") print( @@ -138,17 +148,14 @@ def video_inference( video.set_context(bbox_predictions) print(f"Running pose prediction with batch size {pose_runner.batch_size}") - predictions = pose_runner.inference(images=tqdm(video)) + if shelf_writer is not None: + shelf_writer.open() - if with_identity: - bodypart_predictions = assign_identity( - [p["bodyparts"] for p in predictions], - [p["identity_scores"] for p in predictions], - ) - for i, p_with_id in enumerate(bodypart_predictions): - predictions[i]["bodyparts"] = p_with_id + predictions = pose_runner.inference(images=tqdm(video), shelf_writer=shelf_writer) + if shelf_writer is not None: + shelf_writer.close() - if len(predictions) != n_frames: + if shelf_writer is None and len(predictions) != n_frames: tip_url = "https://deeplabcut.github.io/DeepLabCut/docs/recipes/io.html" header = "#tips-on-video-re-encoding-and-preprocessing" logging.warning( @@ -175,16 +182,19 @@ def analyze_videos( batch_size: int | None = None, detector_batch_size: int | None = None, modelprefix: str = "", + use_shelve: bool = False, + robust_nframes: bool = False, transform: A.Compose | None = None, auto_track: bool | None = True, identity_only: bool | None = False, overwrite: bool = False, cropping: list[int] | None = None, + save_as_df: bool = False, ) -> str: """Makes prediction based on a trained network. # TODO: - - other options missing options such as shelve + - other options missing options such as calibrate - pass detector path or detector runner The index of the trained network is specified by parameters in the config file @@ -201,7 +211,8 @@ def analyze_videos( shuffle: An integer specifying the shuffle index of the training dataset used for training the network. trainingsetindex: Integer specifying which TrainingsetFraction to use. - save_as_csv: Saves the predictions in a .csv file. + save_as_csv: For multi-animal projects and when `auto_track=True`, passed + along to the `stitch_tracklets` method to save tracks as CSV. device: the device to use for video analysis destfolder: specifies the destination folder for analysis data. If ``None``, the path of the video is used. Note that for subsequent analysis this @@ -224,6 +235,13 @@ def analyze_videos( value from the project config as a default. transform: Optional custom transforms to apply to the video overwrite: Overwrite any existing videos + use_shelve: By default, data are dumped in a pickle file at the end of the video + analysis. Otherwise, data are written to disk on the fly using a "shelf"; + i.e., a pickle-based, persistent, database-like object by default, resulting + in constant memory footprint. + robust_nframes: Evaluate a video's number of frames in a robust manner. This + option is slower (as the whole video is read frame-by-frame), but does not + rely on metadata, hence its robustness against file corruption. auto_track: By default, tracking and stitching are automatically performed, producing the final h5 data file. This is equivalent to the behavior for single-animal projects. @@ -233,11 +251,14 @@ def analyze_videos( identity_only: sub-call for auto_track. If ``True`` and animal identity was learned by the model, assembly and tracking rely exclusively on identity prediction. - cropping: list or None, optional, default=None - List of cropping coordinates as [x1, x2, y1, y2]. - Note that the same cropping parameters will then be used for all videos. - If different video crops are desired, run ``analyze_videos`` on individual - videos with the corresponding cropping coordinates. + cropping: List of cropping coordinates as [x1, x2, y1, y2]. Note that the same + cropping parameters will then be used for all videos. If different video + crops are desired, run ``analyze_videos`` on individual videos with the + corresponding cropping coordinates. + save_as_df: Cannot be used when `use_shelve` is True. Saves the video + predictions (before tracking results) to an H5 file containing a pandas + DataFrame. If ``save_as_csv==True`` than the full predictions will also be + saved in a CSV file. Returns: The scorer used to analyze the videos @@ -329,7 +350,6 @@ def analyze_videos( # Reading video and init variables videos = list_videos_in_folder(videos, videotype) - results = [] for video in videos: if destfolder is None: output_path = video.parent @@ -337,19 +357,30 @@ def analyze_videos( output_path = Path(destfolder) output_prefix = video.stem + dlc_scorer - output_h5 = output_path / f"{output_prefix}.h5" output_pkl = output_path / f"{output_prefix}_full.pickle" + video_iterator = VideoIterator(video) + + shelf_writer = None + if use_shelve: + shelf_writer = shelving.ShelfWriter( + pose_cfg=pose_cfg, + filepath=output_pkl, + num_frames=video_iterator.get_n_frames(robust=robust_nframes), + ) + if not overwrite and output_pkl.exists(): print(f"Video {video} already analyzed at {output_pkl}!") else: runtime = [time.time()] predictions = video_inference( - video=video, + video=video_iterator, pose_runner=pose_runner, task=pose_task, detector_runner=detector_runner, cropping=cropping, + shelf_writer=shelf_writer, + robust_nframes=robust_nframes, ) runtime.append(time.time()) metadata = _generate_metadata( @@ -360,50 +391,40 @@ def analyze_videos( batch_size=batch_size, cropping=cropping, runtime=(runtime[0], runtime[1]), - video=VideoReader(str(video)), - ) - output_data = _generate_output_data(pose_cfg, predictions) - _ = auxfun_multianimal.SaveFullMultiAnimalData( - output_data, metadata, str(output_h5) - ) - - pred_bodyparts = np.stack([p["bodyparts"][..., :3] for p in predictions]) - pred_unique_bodyparts = None - if len(predictions) > 0 and "unique_bodyparts" in predictions[0]: - pred_unique_bodyparts = np.stack( - [p["unique_bodyparts"] for p in predictions] - ) - - df = create_df_from_prediction( - pred_bodyparts=pred_bodyparts, - pred_unique_bodyparts=pred_unique_bodyparts, - cfg=cfg, - model_cfg=model_cfg, - dlc_scorer=dlc_scorer, - output_path=output_path, - output_prefix=output_prefix, - save_as_csv=save_as_csv, + video=video_iterator, + robust_nframes=robust_nframes, ) - results.append((str(video), df)) - if cfg["multianimalproject"]: - pred_bodypart_ids = None - if with_identity: - # reshape from (num_assemblies, num_bpts, num_individuals) - # to (num_assemblies, num_bpts) by taking the maximum - # likelihood individual for each bodypart - pred_bodypart_ids = np.stack( - [np.argmax(p["identity_scores"], axis=2) for p in predictions] + with open(output_path / f"{output_prefix}_meta.pickle", "wb") as f: + pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL) + + if use_shelve and save_as_df: + print("Can't `save_as_df` as `use_shelve=True`. Skipping.") + + if not use_shelve: + output_data = _generate_output_data(pose_cfg, predictions) + with open(output_pkl, "wb") as f: + pickle.dump(output_data, f, pickle.HIGHEST_PROTOCOL) + + if save_as_df: + create_df_from_prediction( + predictions=predictions, + cfg=cfg, + model_cfg=model_cfg, + dlc_scorer=dlc_scorer, + output_path=output_path, + output_prefix=output_prefix, + save_as_csv=save_as_csv, ) - _save_assemblies( - output_path, - output_prefix, - pred_bodyparts, - pred_bodypart_ids, - pred_unique_bodyparts, - with_identity, + if cfg["multianimalproject"]: + _generate_assemblies_file( + full_data_path=output_pkl, + output_path=output_path / f"{output_prefix}_assemblies.pickle", + num_bodyparts=len(bodyparts), + num_unique_bodyparts=len(unique_bodyparts), ) + if auto_track: convert_detections2tracklets( config=config, @@ -422,6 +443,7 @@ def analyze_videos( shuffle, trainingsetindex, destfolder=str(output_path), + save_as_csv=save_as_csv, ) print( @@ -436,8 +458,7 @@ def analyze_videos( def create_df_from_prediction( - pred_bodyparts: np.ndarray, - pred_unique_bodyparts: np.ndarray | None, + predictions: list[dict[str, np.ndarray]], dlc_scorer: str, cfg: dict, model_cfg: dict, @@ -445,6 +466,15 @@ def create_df_from_prediction( output_prefix: str | Path, save_as_csv: bool = False, ) -> pd.DataFrame: + pred_bodyparts = np.stack( + [p["bodyparts"][..., :3] for p in predictions] + ) + pred_unique_bodyparts = None + if len(predictions) > 0 and "unique_bodyparts" in predictions[0]: + pred_unique_bodyparts = np.stack( + [p["unique_bodyparts"] for p in predictions] + ) + output_h5 = Path(output_path) / f"{output_prefix}.h5" output_pkl = Path(output_path) / f"{output_prefix}_full.pickle" @@ -485,37 +515,65 @@ def create_df_from_prediction( return df -def _save_assemblies( +def _generate_assemblies_file( + full_data_path: Path, output_path: Path, - output_prefix: str, - pred_bodyparts: np.ndarray, - pred_bodypart_ids: np.ndarray, - pred_unique_bodyparts: np.ndarray, - with_identity: bool, + num_bodyparts: int, + num_unique_bodyparts: int, ) -> None: - output_ass = output_path / f"{output_prefix}_assemblies.pickle" - assemblies = {} - for i, bpt in enumerate(pred_bodyparts): - if with_identity: - extra_column = np.expand_dims(pred_bodypart_ids[i], axis=-1) + """Generates the assemblies file from predictions""" + if full_data_path.exists(): + with open(full_data_path, "rb") as f: + data = pickle.load(f) + + else: + data = shelving.ShelfReader(full_data_path) + data.open() + + num_frames = data["metadata"]["nframes"] + str_width = data["metadata"].get("key_str_width") + if str_width is None: + keys = [k for k in data.keys() if k != "metadata"] + str_width = len(keys[0]) - len("frame") + + assemblies = dict(single=dict()) + for frame_index in range(num_frames): + frame_key = "frame" + str(frame_index).zfill(str_width) + predictions = data[frame_key] + + keypoint_preds = predictions["coordinates"][0] + keypoint_scores = predictions["confidence"] + + bpts = np.stack(keypoint_preds[:num_bodyparts]) + scores = np.stack(keypoint_scores[:num_bodyparts]) + preds = np.concatenate([bpts, scores], axis=-1) + + keypoint_id_scores = predictions.get("identity") + if keypoint_id_scores is not None: + keypoint_id_scores = np.stack(keypoint_id_scores[:num_bodyparts]) + keypoint_pred_ids = np.argmax(keypoint_id_scores, axis=2) + keypoint_pred_ids = np.expand_dims(keypoint_pred_ids, axis=-1) else: - extra_column = np.full( - (bpt.shape[0], bpt.shape[1], 1), - -1.0, - dtype=np.float32, - ) - ass = np.concatenate((bpt, extra_column), axis=-1) - assemblies[i] = ass + num_bpts, num_preds = preds.shape[:2] + keypoint_pred_ids = -np.ones((num_bpts, num_preds, 1)) - if pred_unique_bodyparts is not None: - assemblies["single"] = {} - for i, unique_bpt in enumerate(pred_unique_bodyparts): - extra_column = np.full((unique_bpt.shape[1], 1), -1.0, dtype=np.float32) - ass = np.concatenate((unique_bpt[0], extra_column), axis=-1) - assemblies["single"][i] = ass + # reshape to (num_preds, num_bpts, 4) + preds = np.concatenate([preds, keypoint_pred_ids], axis=-1) + preds = preds.transpose((1, 0, 2)) + assemblies[frame_index] = preds + + if num_unique_bodyparts > 0: + unique_bpts = np.stack(keypoint_preds[num_bodyparts:]) + unique_scores = np.stack(keypoint_scores[num_bodyparts:]) + unique_preds = np.concatenate([unique_bpts, unique_scores], axis=-1) + unique_preds = unique_preds.transpose((1, 0, 2)) + assemblies["single"][frame_index] = unique_preds[0] # single prediction - with open(output_ass, "wb") as handle: - pickle.dump(assemblies, handle, protocol=pickle.HIGHEST_PROTOCOL) + with open(output_path, "wb") as file: + pickle.dump(assemblies, file, pickle.HIGHEST_PROTOCOL) + + if isinstance(data, shelving.ShelfReader): + data.close() def _validate_destfolder(destfolder: str | None) -> None: @@ -539,7 +597,8 @@ def _generate_metadata( batch_size: int, cropping: list[int] | None, runtime: tuple[float, float], - video: VideoReader, + video: VideoIterator, + robust_nframes: bool = False, ) -> dict: w, h = video.dimensions if cropping is None: @@ -561,7 +620,7 @@ def _generate_metadata( "fps": video.fps, "batch_size": batch_size, "frame_dimensions": (w, h), - "nframes": video.get_n_frames(), + "nframes": video.get_n_frames(robust=robust_nframes), "iteration (active-learning)": cfg["iteration"], "training set fraction": train_fraction, "cropping": cropping is not None, @@ -577,6 +636,7 @@ def _generate_output_data( pose_config: dict, predictions: list[dict[str, np.ndarray]], ) -> dict: + str_width = int(np.ceil(np.log10(len(predictions)))) output = { "metadata": { "nms radius": pose_config.get("nmsradius"), @@ -593,17 +653,11 @@ def _generate_output_data( for i in range(len(pose_config["all_joints"])) ], "nframes": len(predictions), + "key_str_width": str_width, } } - str_width = int(np.ceil(np.log10(len(predictions)))) for frame_num, frame_predictions in enumerate(predictions): - # TODO: Do we want to keep the same format as in the TensorFlow version? - # On the one hand, it's "more" backwards compatible. - # On the other, might as well simplify the code. These files should only be loaded - # by the PyTorch version, and only predictions made by PyTorch models should be - # loaded using them - key = "frame" + str(frame_num).zfill(str_width) # shape (num_assemblies, num_bpts, 3) bodyparts = frame_predictions["bodyparts"] @@ -613,10 +667,12 @@ def _generate_output_data( scores = [bpt[:, 2:3] for bpt in bodyparts] # full pickle has bodyparts and unique bodyparts in same array + num_unique = 0 if "unique_bodyparts" in frame_predictions: unique_bpts = frame_predictions["unique_bodyparts"].transpose((1, 0, 2)) coordinates += [bpt[:, :2] for bpt in unique_bpts] scores += [bpt[:, 2:] for bpt in unique_bpts] + num_unique = len(unique_bpts) output[key] = { "coordinates": (coordinates,), @@ -631,4 +687,11 @@ def _generate_output_data( id_scores = id_scores.transpose((1, 0, 2)) output[key]["identity"] = [bpt_id_scores for bpt_id_scores in id_scores] + if num_unique > 0: + # needed for create_video_with_all_detections to display unique bpts + num_assem, num_ind = id_scores.shape[1:] + output[key]["identity"] += [ + -1 * np.ones((num_assem, num_ind)) for i in range(num_unique) + ] + return output diff --git a/deeplabcut/pose_estimation_pytorch/apis/convert_detections_to_tracklets.py b/deeplabcut/pose_estimation_pytorch/apis/convert_detections_to_tracklets.py index 8d4a677292..63d7bf939d 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/convert_detections_to_tracklets.py +++ b/deeplabcut/pose_estimation_pytorch/apis/convert_detections_to_tracklets.py @@ -148,7 +148,6 @@ def convert_detections2tracklets( np.vstack([scorers, bodypart_labels, xyl_value]), names=["scorer", "bodyparts", "coords"], ) - image_names = [fn for fn in data if fn != "metadata"] if track_method == "box": mot_tracker = trackingutils.SORTBox( @@ -184,31 +183,32 @@ def convert_detections2tracklets( "analyzing the video!" ) + num_frames = data["metadata"]["nframes"] ass = auxiliaryfunctions.read_pickle(ass_filename) # Initialize storage of the 'single' individual track if cfg["uniquebodyparts"]: tracklets["single"] = {} _single = {} - for index, image_name in enumerate(image_names): + for index in range(num_frames): single_detection = ass["single"].get(index) if single_detection is None: continue - imindex = int(re.findall(r"\d+", image_name)[0]) - _single[imindex] = np.asarray(single_detection) + _single[index] = np.asarray(single_detection) tracklets["single"].update(_single) if inference_cfg["topktoretain"] == 1: tracklets[0] = {} - for index, image_name in tqdm(enumerate(image_names)): + for index in tqdm(range(num_frames)): assemblies = ass.get(index) if assemblies is None: continue - tracklets[0][image_name] = np.asarray(assemblies[0].data) + + tracklets[0][index] = np.asarray(assemblies[0].data) else: keep = set(multi_bpts).difference(ignore_bodyparts or []) keep_inds = sorted(multi_bpts.index(bpt) for bpt in keep) - for index, image_name in tqdm(enumerate(image_names)): + for index in tqdm(range(num_frames)): assemblies = ass.get(index) if assemblies is None or len(assemblies) == 0: continue @@ -232,9 +232,7 @@ def convert_detections2tracklets( xy = animals[:, keep_inds, :2] trackers = mot_tracker.track(xy) - trackingutils.fill_tracklets( - tracklets, trackers, animals, image_name - ) + trackingutils.fill_tracklets(tracklets, trackers, animals, index) tracklets["header"] = df_index with open(track_filename, "wb") as f: diff --git a/deeplabcut/pose_estimation_pytorch/data/postprocessor.py b/deeplabcut/pose_estimation_pytorch/data/postprocessor.py index 0fe76343d6..2f391bde66 100644 --- a/deeplabcut/pose_estimation_pytorch/data/postprocessor.py +++ b/deeplabcut/pose_estimation_pytorch/data/postprocessor.py @@ -18,6 +18,7 @@ import numpy as np from deeplabcut.pose_estimation_pytorch.data.preprocessor import Context +from deeplabcut.pose_estimation_pytorch.post_processing.identity import assign_identity class Postprocessor(ABC): @@ -101,6 +102,14 @@ def build_bottom_up_postprocessor( pad_value=-1, ), ] + + if with_identity: + components.append( + AssignIndividualIdentities( + identity_key="identity_scores", pose_key="bodyparts", + ) + ) + return ComposePostprocessor(components=components) @@ -453,3 +462,24 @@ def __call__( del id_heatmaps return predictions, context + + +class AssignIndividualIdentities(Postprocessor): + """Assigns predicted identities to individuals + + Attributes: + identity_key: Key with which to add predicted identities in the predictions dict + pose_key: Key for the bodyparts in the predictions dict + """ + + def __init__(self, identity_key: str, pose_key: str) -> None: + self.identity_key = identity_key + self.pose_key = pose_key + + def __call__( + self, predictions: dict[str, np.ndarray], context: Context + ) -> tuple[dict[str, np.ndarray], Context]: + map_ = assign_identity(predictions["bodyparts"], predictions["identity_scores"]) + predictions["bodyparts"] = predictions["bodyparts"][map_] + predictions["identity_scores"] = predictions["identity_scores"][map_] + return predictions, context diff --git a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py index e97f50c716..405075babc 100644 --- a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py +++ b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py @@ -143,19 +143,14 @@ def _video_inference_superanimal( detector_runner=detector_runner, ) - pred_bodyparts = np.stack([p["bodyparts"][..., :3] for p in predictions]) - pred_unique_bodyparts = None - bbox = cropping if cropping is None: vid_w, vid_h = video.dimensions bbox = (0, vid_w, 0, vid_h) print(f"Saving results to {dest_folder}") - df = create_df_from_prediction( - pred_bodyparts=pred_bodyparts, - pred_unique_bodyparts=pred_unique_bodyparts, + predictions=predictions, dlc_scorer=dlc_scorer, cfg=dict(multianimalproject=True), model_cfg=model_cfg, diff --git a/deeplabcut/pose_estimation_pytorch/post_processing/identity.py b/deeplabcut/pose_estimation_pytorch/post_processing/identity.py index 224a837ef6..9f81ab4619 100644 --- a/deeplabcut/pose_estimation_pytorch/post_processing/identity.py +++ b/deeplabcut/pose_estimation_pytorch/post_processing/identity.py @@ -16,16 +16,17 @@ def assign_identity( - predictions: list[np.ndarray], - identity_scores: list[np.ndarray], -) -> list[np.ndarray]: + predictions: np.ndarray, identity_scores: np.ndarray +) -> np.ndarray: """ Args: - predictions: shape (num_individuals, num_bodyparts, 3) - identity_scores: shape (num_individuals, num_bodyparts, num_individuals) + predictions: Pose predictions for an image, with shape (num_individuals, + num_bodyparts, 3) + identity_scores: Identity predictions for keypoints in an image, of shape + (num_individuals, num_bodyparts, num_individuals). Returns: - predictions with assigned identity, of shape (num_individuals, num_bodyparts, 3) + The ordering to use to match predictions to identities. """ if not len(predictions) == len(identity_scores): raise ValueError( @@ -33,17 +34,13 @@ def assign_identity( f" ({len(predictions)} != {len(identity_scores)}" ) - predictions_with_identity = [] - for pred, scores in zip(predictions, identity_scores): - # average of ID scores, weighted by keypoint confidence - pose_conf = pred[:, :, 2:3] - cost_matrix = np.mean(pose_conf * scores, axis=1) + # average of ID scores, weighted by keypoint confidence + pose_conf = predictions[:, :, 2:3] + cost_matrix = np.mean(pose_conf * identity_scores, axis=1) - row_ind, col_ind = linear_sum_assignment(cost_matrix, maximize=True) - new_order = np.zeros_like(row_ind) - for old_pos, new_pos in zip(row_ind, col_ind): - new_order[new_pos] = old_pos + row_ind, col_ind = linear_sum_assignment(cost_matrix, maximize=True) + new_order = np.zeros_like(row_ind) + for old_pos, new_pos in zip(row_ind, col_ind): + new_order[new_pos] = old_pos - predictions_with_identity.append(pred[new_order]) - - return predictions_with_identity + return new_order diff --git a/deeplabcut/pose_estimation_pytorch/runners/inference.py b/deeplabcut/pose_estimation_pytorch/runners/inference.py index d09f8648e3..1ef929f862 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/inference.py +++ b/deeplabcut/pose_estimation_pytorch/runners/inference.py @@ -11,7 +11,6 @@ from __future__ import annotations from abc import ABCMeta, abstractmethod -from collections import defaultdict from pathlib import Path from typing import Any, Generic, Iterable @@ -19,6 +18,7 @@ import torch import torch.nn as nn +import deeplabcut.pose_estimation_pytorch.runners.shelving as shelving from deeplabcut.pose_estimation_pytorch.data.postprocessor import Postprocessor from deeplabcut.pose_estimation_pytorch.data.preprocessor import Preprocessor from deeplabcut.pose_estimation_pytorch.models.detectors import BaseDetector @@ -82,6 +82,7 @@ def inference( self, images: Iterable[str | np.ndarray] | Iterable[tuple[str | np.ndarray, dict[str, Any]]], + shelf_writer: shelving.ShelfWriter | None = None, ) -> list[dict[str, np.ndarray]]: """Run model inference on the given dataset @@ -90,6 +91,11 @@ def inference( Args: images: the images to run inference on, optionally with context + shelf_writer: by default, data are saved in a list and returned at the end + of inference. Passing a shelf manager writes data to disk on-the-fly + using a "shelf" (a pickle-based, persistent, database-like object by + default, resulting in constant memory footprint). The returned list is + then empty. Returns: a dict containing head predictions for each image @@ -107,12 +113,12 @@ def inference( for data in images: self._prepare_inputs(data) self._process_full_batches() - results += self._extract_results() + results += self._extract_results(shelf_writer) # Process the last batch even if not full if self._inputs_waiting_for_processing(): self._process_batch() - results += self._extract_results() + results += self._extract_results(shelf_writer) return results @@ -149,7 +155,7 @@ def _process_full_batches(self) -> None: while self._batch is not None and len(self._batch) >= self.batch_size: self._process_batch() - def _extract_results(self) -> list: + def _extract_results(self, shelf_writer: shelving.ShelfWriter) -> list: """Obtains results that were obtained from processing a batch.""" results = [] while ( @@ -165,10 +171,18 @@ def _extract_results(self) -> list: # TODO: typing update - the post-processor can remove a dict level image_predictions, _ = self.postprocessor(image_predictions, context) + if shelf_writer is not None: + shelf_writer.add_prediction( + bodyparts=image_predictions["bodyparts"], + unique_bodyparts=image_predictions.get("unique_bodyparts"), + identity_scores=image_predictions.get("identity_scores"), + ) + else: + results.append(image_predictions) + self._contexts = self._contexts[1:] self._image_batch_sizes = self._image_batch_sizes[1:] self._predictions = self._predictions[num_predictions:] - results.append(image_predictions) return results diff --git a/deeplabcut/pose_estimation_pytorch/runners/shelving.py b/deeplabcut/pose_estimation_pytorch/runners/shelving.py new file mode 100644 index 0000000000..17090b4b9b --- /dev/null +++ b/deeplabcut/pose_estimation_pytorch/runners/shelving.py @@ -0,0 +1,176 @@ +# +# DeepLabCut Toolbox (deeplabcut.org) +# © A. & M.W. Mathis Labs +# https://github.com/DeepLabCut/DeepLabCut +# +# Please see AUTHORS for contributors. +# https://github.com/DeepLabCut/DeepLabCut/blob/main/AUTHORS +# +# Licensed under GNU Lesser General Public License v3.0 +# +"""Modules used to read/write shelve data during video analysis in DeepLabCut 3.0""" +import pickle +import shelve +from abc import ABC +from pathlib import Path + +import numpy as np + + +class ShelfManager(ABC): + """Class to manage shelf data""" + + def __init__(self, filepath: str | Path, flag: str = "r") -> None: + self.filepath = Path(filepath) + self.flag = flag + + self._db: shelve.Shelf | None = None + self._open: bool = False + + def open(self) -> None: + """Opens the shelf""" + self._db = shelve.open( + str(self.filepath), + flag=self.flag, + protocol=pickle.DEFAULT_PROTOCOL, + ) + self._open = True + + def close(self) -> None: + """Closes the shelf""" + if not self._open: + return + + try: + self._db.close() + except AttributeError: + pass + + self._open = False + + def keys(self) -> list[str]: + if not self._open: + raise ValueError(f"You must call open() before reading keys!") + + return [k for k in self._db] + + +class ShelfReader(ShelfManager): + """Reads data from a shelf""" + + def __getitem__(self, item: str) -> dict: + """Reads an item from the shelf. + + Args: + item: The key of the item to read. + + Returns: + The item. + """ + if not self._open: + raise ValueError(f"You must call open() before reading data!") + + return self._db[item] + + +class ShelfWriter(ShelfManager): + """Writes data to a shelf on-the-fly during video analysis. + + Args: + pose_cfg: The test pose config for the model. + filepath: The path where the data should be saved. + num_frames: The number of frames in the video. Used to set the number of + leading 0s in the keys of the dictionary. Default is 5 if the number of + frames is not given. + + Attributes: + filepath: The path to the shelf. + """ + + def __init__( + self, pose_cfg: dict, filepath: str | Path, num_frames: int | None = None + ): + super().__init__(filepath, flag="c") + self._pose_cfg = pose_cfg + self._num_frames = num_frames + self._frame_index = 0 + + self._str_width = 5 + if num_frames is not None: + self._str_width = int(np.ceil(np.log10(num_frames))) + + def add_prediction( + self, + bodyparts: np.ndarray, + unique_bodyparts: np.ndarray | None = None, + identity_scores: np.ndarray | None = None, + ) -> None: + """Adds the prediction for a frame to the shelf + + Args: + bodyparts: The predicted bodyparts. + unique_bodyparts: The predicted unique bodyparts, if there are any. + identity_scores: The predicted identities, if there are any. + """ + if not self._open: + raise ValueError(f"You must call open() before adding data!") + + key = "frame" + str(self._frame_index).zfill(self._str_width) + + # convert bodyparts to shape (num_bpts, num_assemblies, 3) + bodyparts = bodyparts.transpose((1, 0, 2)) + coordinates = [bpt[:, :2] for bpt in bodyparts] + scores = [bpt[:, 2:3] for bpt in bodyparts] + + # full pickle has bodyparts and unique bodyparts in same array + if unique_bodyparts is not None: + unique_bpts = unique_bodyparts.transpose((1, 0, 2)) + coordinates += [bpt[:, :2] for bpt in unique_bpts] + scores += [bpt[:, 2:] for bpt in unique_bpts] + + output = dict(coordinates=(coordinates,), confidence=scores, costs=None) + if identity_scores is not None: + # Reshape id scores from (num_assemblies, num_bpts, num_individuals) + # to the original DLC full pickle format: (num_bpts, num_assem, num_ind) + id_scores = identity_scores.transpose((1, 0, 2)) + output["identity"] = [bpt_id_scores for bpt_id_scores in id_scores] + + if unique_bodyparts is not None: + # needed for create_video_with_all_detections to display unique bpts + num_unique = unique_bodyparts.shape[1] + num_assem, num_ind = id_scores.shape[1:] + output["identity"] += [ + -1 * np.ones((num_assem, num_ind)) for i in range(num_unique) + ] + + self._db[key] = output + self._frame_index += 1 + + def close(self) -> None: + """Opens the shelf""" + if self._open and self._frame_index > 0: + self._db["metadata"]["nframes"] = self._frame_index + + super().close() + + def open(self) -> None: + """Opens the shelf""" + super().open() + self._frame_index = 0 + + all_joints = self._pose_cfg["all_joints"] + paf_graph = self._pose_cfg.get("partaffinityfield_graph", []) + + self._db["metadata"] = { + "nms radius": self._pose_cfg.get("nmsradius"), + "minimal confidence": self._pose_cfg.get("minconfidence"), + "sigma": self._pose_cfg.get("sigma", 1), + "PAFgraph": paf_graph, + "PAFinds": self._pose_cfg.get("paf_best", np.arange(len(paf_graph))), + "all_joints": [[i] for i in range(len(all_joints))], + "all_joints_names": [ + self._pose_cfg["all_joints_names"][i] for i in range(len(all_joints)) + ], + "nframes": self._num_frames, + "key_str_width": self._str_width, + } diff --git a/docs/pytorch/user_guide.md b/docs/pytorch/user_guide.md index d85aee5d0f..09d6191d1b 100644 --- a/docs/pytorch/user_guide.md +++ b/docs/pytorch/user_guide.md @@ -74,19 +74,19 @@ as well as indications which options are not yet implemented, and which paramete are not valid for the DLC 3.0 API. -| API Method | Implemented | Parameters not yet implemented | Parameters invalid for pytorch | -|--------------------------------|:-----------:|-----------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------| -| `train_network` | 🟢 | `keepdeconvweights` | `maxiters`, `saveiters`, `allow_growth`, `autotune` | -| `return_train_network_path` | 🟢 | | | -| `evaluate_network` | 🟢 | `comparisonbodyparts`, `rescale`, `per_keypoint_evaluation` | | -| `return_evaluate_network_data` | 🔴 | | `TFGPUinference`, `allow_growth` | -| `analyze_videos` | 🟢 | `use_shelve`, `save_as_csv`, `in_random_order`, `dynamic`, `robust_nframes`, `n_tracks`, `calibrate` | | -| `create_tracking_dataset` | 🔴 | | | -| `analyze_time_lapse_frames` | 🟠 | the name has changed to `analyze_images` to better reflect what it actually does (no video needed) | | -| `convert_detections2tracklets` | 🟢 | `greedy`, `calibrate`, `window_size` | | -| `extract_maps` | 🔴 | | | -| `visualize_scoremaps` | 🔴 | | | -| `visualize_locrefs` | 🔴 | | | -| `visualize_paf` | 🔴 | | | -| `extract_save_all_maps` | 🔴 | | | -| `export_model` | 🔴 | | | \ No newline at end of file +| API Method | Implemented | Parameters not yet implemented | Parameters invalid for pytorch | +|--------------------------------|:-----------:|-----------------------------------------------------------------------------------------------------|-----------------------------------------------------| +| `train_network` | 🟢 | `keepdeconvweights` | `maxiters`, `saveiters`, `allow_growth`, `autotune` | +| `return_train_network_path` | 🟢 | | | +| `evaluate_network` | 🟢 | `comparisonbodyparts`, `rescale`, `per_keypoint_evaluation` | | +| `return_evaluate_network_data` | 🔴 | | `TFGPUinference`, `allow_growth` | +| `analyze_videos` | 🟢 | `in_random_order`, `dynamic`, `n_tracks`, `calibrate` | | +| `create_tracking_dataset` | 🔴 | | | +| `analyze_time_lapse_frames` | 🟠 | the name has changed to `analyze_images` to better reflect what it actually does (no video needed) | | +| `convert_detections2tracklets` | 🟢 | `greedy`, `calibrate`, `window_size` | | +| `extract_maps` | 🔴 | | | +| `visualize_scoremaps` | 🔴 | | | +| `visualize_locrefs` | 🔴 | | | +| `visualize_paf` | 🔴 | | | +| `extract_save_all_maps` | 🔴 | | | +| `export_model` | 🔴 | | | \ No newline at end of file diff --git a/tests/pose_estimation_pytorch/post_processing/test_identity.py b/tests/pose_estimation_pytorch/post_processing/test_identity.py index e0acb800f3..42ae454341 100644 --- a/tests/pose_estimation_pytorch/post_processing/test_identity.py +++ b/tests/pose_estimation_pytorch/post_processing/test_identity.py @@ -49,13 +49,10 @@ def test_single_identity_assignment(prediction, identity_scores, output_order): predictions = np.array(prediction) identity_scores = np.array(identity_scores) - predictions_with_id = assign_identity([predictions], [identity_scores]) + new_order = assign_identity(predictions, identity_scores) + predictions_with_id = predictions[new_order] print() print(predictions.shape) print(identity_scores.shape) - - np.testing.assert_equal( - predictions[output_order], - predictions_with_id[0], - ) + np.testing.assert_equal(predictions[output_order], predictions_with_id) From 6128ae28e4f78cb23d0ff0ca671a643db58199c0 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 25 Nov 2024 14:39:59 +0000 Subject: [PATCH 38/88] update --- deeplabcut/pose_estimation_pytorch/apis/evaluate.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 32dc2c0b8a..c51e13d22b 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -21,6 +21,7 @@ import numpy as np import pandas as pd from tqdm import tqdm +import matplotlib.pyplot as plt import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization @@ -703,6 +704,7 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From fe9e85023b558e5edf3ca1e93559dc8642b3b5c8 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 17:31:15 +0000 Subject: [PATCH 39/88] add def visualize_coco_predictions(*) --- .../pose_estimation_pytorch/apis/evaluate.py | 90 +------------------ 1 file changed, 3 insertions(+), 87 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index c51e13d22b..ed4f0eba32 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -22,7 +22,8 @@ import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt - +import json +import os import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -176,92 +177,7 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions - - -def visualize_coco_predictions( - predictions: dict, - num_samples: int = 1, - test_file_json: str | Path = "test.json", - output_dir: str | Path | None = None, - draw_skeleton: bool = True, -) -> None: - """ - Visualize predictions using DeepLabCut's plot_gt_and_predictions function - - Args: - predictions: Dictionary with image paths as keys and prediction data as values. - Each prediction contains: - - bodyparts: numpy array of shape (1, 37, 3) - - bboxes: numpy array of shape (1, 4) - - bbox_scores: numpy array of shape (1,) - num_samples: Number of samples to visualize - test_file_json: Path to test set JSON file - output_dir: Directory to save visualization outputs. If None, will create - a directory next to test_file_json - draw_skeleton: Whether to draw skeleton connections between keypoints - """ - # Load ground truth data - with open(test_file_json, "r") as f: - ground_truth = json.load(f) - - if output_dir is None: - output_dir = os.path.join( - os.path.dirname(test_file_json), "predictions_visualizations" - ) - os.makedirs(output_dir, exist_ok=True) - - image_paths = list(predictions.keys()) - if num_samples: - image_paths = image_paths[:num_samples] - - # Process each image - for image_path in image_paths: - pred_data = predictions[image_path] - img_info = next( - ( - img - for img in ground_truth["images"] - if img["file_name"] == os.path.basename(image_path) - ), - None, - ) - if img_info is None: - print(f"Warning: Could not find image info for {image_path}") - continue - - gt_anns = [ - ann - for ann in ground_truth["annotations"] - if ann["image_id"] == img_info["id"] - ] - - if not gt_anns: - print(f"Warning: No ground truth annotations found for {image_path}") - continue - - gt_keypoints = np.array(gt_anns[0]["keypoints"]).reshape(1, -1, 3) - vis_mask = gt_keypoints[:, :, 2] != -1 - - visible_gt = gt_keypoints[vis_mask] - visible_gt = visible_gt[None, :, :2] - - pred_keypoints = pred_data["bodyparts"] # Keep batch dimension - visible_pred = pred_keypoints - visible_pred = pred_keypoints[vis_mask].copy() - visible_pred = np.expand_dims(visible_pred, axis=0) - - try: - plot_gt_and_predictions( - image_path=image_path, - output_dir=output_dir, - gt_bodyparts=visible_gt, - pred_bodyparts=visible_pred, - ) - print(f"Successfully plotted predictions for {image_path}") - except Exception as e: - print(f"Error plotting predictions for {image_path}: {str(e)}") - - + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, From d6e2a0ac7620fedd3de8f282d164291271f6e509 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 19:03:51 +0100 Subject: [PATCH 40/88] =?UTF-8?q?Fix:=20correct=20the=20early=20return=20e?= =?UTF-8?q?rror=20when=20save=5Fepochs=3D1=20and=20delelte=20th=E2=80=A6?= =?UTF-8?q?=20=E2=80=A6e=20redunant=20snapshots=20at=20the=20end?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- deeplabcut/pose_estimation_pytorch/runners/snapshots.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 deeplabcut/pose_estimation_pytorch/runners/snapshots.py diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py old mode 100644 new mode 100755 From 049f7fa19871d3845c71875a24c8104b697202c5 Mon Sep 17 00:00:00 2001 From: ti Date: Fri, 22 Nov 2024 16:38:42 +0100 Subject: [PATCH 41/88] black -> snapshots.py --- deeplabcut/pose_estimation_pytorch/runners/snapshots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 45bf339d0b..74203f5be1 100755 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,11 +113,11 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] - + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } From 5611e01110abe4856c3c60f9efd8fbd7e12f7984 Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 14:48:58 +0100 Subject: [PATCH 42/88] fix the vis for multi-animal --- .../pose_estimation_pytorch/apis/evaluate.py | 195 ++++++++++++++++++ 1 file changed, 195 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index f2b3bfea6b..4849ec5945 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -167,6 +167,201 @@ def evaluate( return results, predictions +def visualize_coco_predictions( + predictions: dict, + num_samples: int = 1, + test_file_json: str | Path = "test.json", + output_dir: str | Path | None = None, + draw_skeleton: bool = True, +) -> None: + """ + Visualize predictions using DeepLabCut's plot_gt_and_predictions function + + Args: + predictions: Dictionary with image paths as keys and prediction data as values. + Each prediction contains: + - bodyparts: numpy array of shape (1, 37, 3) + - bboxes: numpy array of shape (1, 4) + - bbox_scores: numpy array of shape (1,) + num_samples: Number of samples to visualize + test_file_json: Path to test set JSON file + output_dir: Directory to save visualization outputs. If None, will create + a directory next to test_file_json + draw_skeleton: Whether to draw skeleton connections between keypoints + """ + # Load ground truth data + with open(test_file_json, "r") as f: + ground_truth = json.load(f) + + if output_dir is None: + output_dir = os.path.join( + os.path.dirname(test_file_json), "predictions_visualizations" + ) + os.makedirs(output_dir, exist_ok=True) + + image_paths = list(predictions.keys()) + if num_samples: + image_paths = image_paths[:num_samples] + + # Process each image + for image_path in image_paths: + pred_data = predictions[image_path] + img_info = next( + ( + img + for img in ground_truth["images"] + if img["file_name"] == os.path.basename(image_path) + ), + None, + ) + if img_info is None: + print(f"Warning: Could not find image info for {image_path}") + continue + + gt_anns = [ + ann + for ann in ground_truth["annotations"] + if ann["image_id"] == img_info["id"] + ] + + if not gt_anns: + print(f"Warning: No ground truth annotations found for {image_path}") + continue + + gt_keypoints = np.array(gt_anns[0]["keypoints"]).reshape(1, -1, 3) + vis_mask = gt_keypoints[:, :, 2] != -1 + + visible_gt = gt_keypoints[vis_mask] + visible_gt = visible_gt[None, :, :2] + + pred_keypoints = pred_data["bodyparts"] # Keep batch dimension + # visible_pred = pred_keypoints + print(pred_keypoints.shape) # (13,37,3) + print(vis_mask.shape) # (1,37) + + # Modify this code to handle dimension mismatch + expanded_vis_mask = np.tile(vis_mask, (pred_keypoints.shape[0], 1)) # Expand to (13,37) + visible_pred = pred_keypoints[expanded_vis_mask].reshape(pred_keypoints.shape[0], -1, 3) + # visible_pred = visible_pred[..., :2] # Only keep x,y coordinates + + try: + plot_gt_and_predictions( + image_path=image_path, + output_dir=output_dir, + gt_bodyparts=visible_gt, + pred_bodyparts=visible_pred, + ) + print(f"Successfully plotted predictions for {image_path}") + except Exception as e: + print(f"Error plotting predictions for {image_path}: {str(e)}") + + +def plot_gt_and_predictions( + image_path: str | Path, + output_dir: str | Path, + gt_bodyparts: np.ndarray, + pred_bodyparts: np.ndarray, # (num_predicted_animals, num_keypoints, 3) + gt_unique_bodyparts: np.ndarray | None = None, + pred_unique_bodyparts: np.ndarray | None = None, + mode: str = "bodypart", + colormap: str = "rainbow", + dot_size: int = 12, + alpha_value: float = 0.7, + p_cutoff: float = 0.6, +): + """Plot ground truth and predictions on an image. + + Args: + image_path: Path to the image + gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) + pred_bodyparts: Predicted keypoints array (num_animals, num_keypoints, 3) + output_dir: Directory where labeled images will be saved + gt_unique_bodyparts: Ground truth unique bodyparts if any + pred_unique_bodyparts: Predicted unique bodyparts if any + mode: How to color the points ("bodypart" or "individual") + colormap: Matplotlib colormap name + dot_size: Size of the plotted points + alpha_value: Transparency of the points + p_cutoff: Confidence threshold for showing predictions + """ + # Ensure output directory exists + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # Read the image + frame = auxfun_videos.imread(str(image_path), mode="skimage") + num_pred, num_keypoints = pred_bodyparts.shape[:2] + + # Create figure and set dimensions + fig, ax = create_minimal_figure() + h, w, _ = np.shape(frame) + fig.set_size_inches(w / 100, h / 100) + ax.set_xlim(0, w) + ax.set_ylim(0, h) + ax.invert_yaxis() + ax.imshow(frame, "gray") + + # Set up colors based on mode + if mode == "bodypart": + num_colors = num_keypoints + if pred_unique_bodyparts is not None: + num_colors += pred_unique_bodyparts.shape[1] + colors = get_cmap(num_colors, name=colormap) + + predictions = pred_bodyparts.swapaxes(0, 1) + ground_truth = gt_bodyparts.swapaxes(0, 1) + elif mode == "individual": + colors = get_cmap(num_pred + 1, name=colormap) + predictions = pred_bodyparts + ground_truth = gt_bodyparts + else: + raise ValueError(f"Invalid mode: {mode}") + + # Plot regular bodyparts + ax = make_multianimal_labeled_image( + frame, + ground_truth, + predictions[:, :, :2], + predictions[:, :, 2:], + colors, + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Plot unique bodyparts if present + if pred_unique_bodyparts is not None and gt_unique_bodyparts is not None: + if mode == "bodypart": + unique_predictions = pred_unique_bodyparts.swapaxes(0, 1) + unique_ground_truth = gt_unique_bodyparts.swapaxes(0, 1) + else: + unique_predictions = pred_unique_bodyparts + unique_ground_truth = gt_unique_bodyparts + + ax = make_multianimal_labeled_image( + frame, + unique_ground_truth, + unique_predictions[:, :, :2], + unique_predictions[:, :, 2:], + colors[num_keypoints:], + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Save the labeled image + save_labeled_frame( + fig, + str(image_path), + str(output_dir), + belongs_to_train=False, + ) + erase_artists(ax) + plt.close() + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, From da99c661ef846ded58d3425b01ada8bde3f4433d Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:46:52 +0000 Subject: [PATCH 43/88] add plot_gt_and_prediction function --- .../pose_estimation_pytorch/apis/evaluate.py | 229 ++++++++++++++++++ 1 file changed, 229 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index f2b3bfea6b..4eb7381772 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -18,6 +18,7 @@ import numpy as np import pandas as pd from tqdm import tqdm +import matplotlib.pyplot as plt import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization @@ -37,6 +38,14 @@ from deeplabcut.pose_estimation_pytorch.task import Task from deeplabcut.utils import auxiliaryfunctions from deeplabcut.utils.visualization import plot_evaluation_results +from deeplabcut.utils import auxfun_videos +from deeplabcut.utils.visualization import ( + create_minimal_figure, + get_cmap, + make_multianimal_labeled_image, + save_labeled_frame, + erase_artists, +) def predict( @@ -167,6 +176,224 @@ def evaluate( return results, predictions +import random +# def plot_predictions( +# loader: Loader, +# predictions: dict[str, dict[str, np.ndarray]], +# plotting: str = "bodypart", +# sample: int | None = None, +# sample_random: bool = False, +# ) -> None: + +def plot_predictions( + loader: Loader, + predictions: dict[str, dict[str, np.ndarray]], + plotting: str = "bodypart", + sample: int | None = None, + sample_random: bool = False, +) -> None: + """ + Process COCO format data and visualize using plot_evaluation_results + + Args: + loader: COCOLoader instance containing dataset info + predictions: Model predictions dictionary + plotting: How to color the points ("bodypart" or "individual") + sample: Number of images to visualize (None for all) + sample_random: Whether to sample images randomly + """ + + # Get paths and create output folder + project_root = loader.project_root + output_folder = Path(project_root) / "labeled_frames" + output_folder.mkdir(exist_ok=True) + + # 2. Get ground truth data + ground_truth = loader.load_data(mode="test") + + # 3. Create image list for sampling + image_ids = [img['id'] for img in ground_truth['images']] + if sample is not None: + if sample_random: + image_ids = random.sample(image_ids, min(sample, len(image_ids))) + else: + image_ids = image_ids[:sample] + + # 4. Create DataFrame structure + data = [] + + # Process ground truth + for img_id in image_ids: + img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) + img_name = img_info['file_name'] + + # Get ground truth annotations + gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] + + # Get predictions for this image + pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] + + # Process each keypoint + for gt_ann, pred_ann in zip(gt_anns, pred_anns): + gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) + pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) + + # Get keypoint names + keypoint_names = ground_truth['categories'][0]['keypoints'] + + # Add ground truth points + for idx, (x, y, v) in enumerate(gt_kpts): + if v > 0: # visible keypoint + data.append({ + 'image': img_name, + 'scorer': 'ground_truth', + 'individual': f"instance_{gt_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': 1.0 + }) + + # Add predictions + for idx, (x, y, score) in enumerate(pred_kpts): + if score > 0: # detected keypoint + data.append({ + 'image': img_name, + 'scorer': 'dlc_model', + 'individual': f"instance_{pred_ann['id']}", + 'bodypart': keypoint_names[idx], + 'x': x, + 'y': y, + 'likelihood': score + }) + + # 5. Create MultiIndex DataFrame + df = pd.DataFrame(data) + df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) + df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) + + # 6. Call plot_evaluation_results + plot_evaluation_results( + df_combined=df_combined, + project_root=project_root, + scorer='ground_truth', + model_name='dlc_model', + output_folder=str(output_folder), + in_train_set=False, # Since we're using test data + mode=plotting, + plot_unique_bodyparts=False, # whether we should plot unique bodyparts + colormap='rainbow', # default values + dot_size=12, # default values + alpha_value=0.7, # default values + p_cutoff=0.6 # default values + ) + +def plot_gt_and_predictions( + image_path: str | Path, + output_dir: str | Path, + gt_bodyparts: np.ndarray, + pred_bodyparts: np.ndarray, # (num_predicted_animals, num_keypoints, 3) + gt_unique_bodyparts: np.ndarray | None = None, + pred_unique_bodyparts: np.ndarray | None = None, + mode: str = "bodypart", + colormap: str = "rainbow", + dot_size: int = 12, + alpha_value: float = 0.7, + p_cutoff: float = 0.6, +): + """Plot ground truth and predictions on an image. + + Args: + image_path: Path to the image + gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) + pred_bodyparts: Predicted keypoints array (num_animals, num_keypoints, 3) + output_dir: Directory where labeled images will be saved + gt_unique_bodyparts: Ground truth unique bodyparts if any + pred_unique_bodyparts: Predicted unique bodyparts if any + mode: How to color the points ("bodypart" or "individual") + colormap: Matplotlib colormap name + dot_size: Size of the plotted points + alpha_value: Transparency of the points + p_cutoff: Confidence threshold for showing predictions + """ + # Ensure output directory exists + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # Read the image + frame = auxfun_videos.imread(str(image_path), mode="skimage") + num_pred, num_keypoints = pred_bodyparts.shape[:2] + + # Create figure and set dimensions + fig, ax = create_minimal_figure() + h, w, _ = np.shape(frame) + fig.set_size_inches(w / 100, h / 100) + ax.set_xlim(0, w) + ax.set_ylim(0, h) + ax.invert_yaxis() + ax.imshow(frame, "gray") + + # Set up colors based on mode + if mode == "bodypart": + num_colors = num_keypoints + if pred_unique_bodyparts is not None: + num_colors += pred_unique_bodyparts.shape[1] + colors = get_cmap(num_colors, name=colormap) + + predictions = pred_bodyparts.swapaxes(0, 1) + ground_truth = gt_bodyparts.swapaxes(0, 1) + elif mode == "individual": + colors = get_cmap(num_pred + 1, name=colormap) + predictions = pred_bodyparts + ground_truth = gt_bodyparts + else: + raise ValueError(f"Invalid mode: {mode}") + + # Plot regular bodyparts + ax = make_multianimal_labeled_image( + frame, + ground_truth, + predictions[:, :, :2], + predictions[:, :, 2:], + colors, + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Plot unique bodyparts if present + if pred_unique_bodyparts is not None and gt_unique_bodyparts is not None: + if mode == "bodypart": + unique_predictions = pred_unique_bodyparts.swapaxes(0, 1) + unique_ground_truth = gt_unique_bodyparts.swapaxes(0, 1) + else: + unique_predictions = pred_unique_bodyparts + unique_ground_truth = gt_unique_bodyparts + + ax = make_multianimal_labeled_image( + frame, + unique_ground_truth, + unique_predictions[:, :, :2], + unique_predictions[:, :, 2:], + colors[num_keypoints:], + dot_size, + alpha_value, + p_cutoff, + ax=ax, + ) + + # Save the labeled image + save_labeled_frame( + fig, + str(image_path), + str(output_dir), + belongs_to_train=False, + ) + erase_artists(ax) + plt.close() + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, @@ -289,6 +516,7 @@ def evaluate_snapshot( df_ground_truth, left_index=True, right_index=True ) unique_bodyparts = loader.get_dataset_parameters().unique_bpts + plot_evaluation_results( df_combined=df_combined, project_root=cfg["project_path"], @@ -502,6 +730,7 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From 2643d6b7eec6d0a5e389ad2ee79e9793a24fd191 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 16:58:40 +0000 Subject: [PATCH 44/88] delete the initial attempt at function --- .../pose_estimation_pytorch/apis/evaluate.py | 113 ------------------ 1 file changed, 113 deletions(-) mode change 100644 => 100755 deeplabcut/pose_estimation_pytorch/apis/evaluate.py diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py old mode 100644 new mode 100755 index 4eb7381772..a19ea9e0d7 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -174,119 +174,6 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions - - -import random -# def plot_predictions( -# loader: Loader, -# predictions: dict[str, dict[str, np.ndarray]], -# plotting: str = "bodypart", -# sample: int | None = None, -# sample_random: bool = False, -# ) -> None: - -def plot_predictions( - loader: Loader, - predictions: dict[str, dict[str, np.ndarray]], - plotting: str = "bodypart", - sample: int | None = None, - sample_random: bool = False, -) -> None: - """ - Process COCO format data and visualize using plot_evaluation_results - - Args: - loader: COCOLoader instance containing dataset info - predictions: Model predictions dictionary - plotting: How to color the points ("bodypart" or "individual") - sample: Number of images to visualize (None for all) - sample_random: Whether to sample images randomly - """ - - # Get paths and create output folder - project_root = loader.project_root - output_folder = Path(project_root) / "labeled_frames" - output_folder.mkdir(exist_ok=True) - - # 2. Get ground truth data - ground_truth = loader.load_data(mode="test") - - # 3. Create image list for sampling - image_ids = [img['id'] for img in ground_truth['images']] - if sample is not None: - if sample_random: - image_ids = random.sample(image_ids, min(sample, len(image_ids))) - else: - image_ids = image_ids[:sample] - - # 4. Create DataFrame structure - data = [] - - # Process ground truth - for img_id in image_ids: - img_info = next(img for img in ground_truth['images'] if img['id'] == img_id) - img_name = img_info['file_name'] - - # Get ground truth annotations - gt_anns = [ann for ann in ground_truth['annotations'] if ann['image_id'] == img_id] - - # Get predictions for this image - pred_anns = [pred for pred in predictions if pred['image_id'] == img_id] - - # Process each keypoint - for gt_ann, pred_ann in zip(gt_anns, pred_anns): - gt_kpts = np.array(gt_ann['keypoints']).reshape(-1, 3) - pred_kpts = np.array(pred_ann['keypoints']).reshape(-1, 3) - - # Get keypoint names - keypoint_names = ground_truth['categories'][0]['keypoints'] - - # Add ground truth points - for idx, (x, y, v) in enumerate(gt_kpts): - if v > 0: # visible keypoint - data.append({ - 'image': img_name, - 'scorer': 'ground_truth', - 'individual': f"instance_{gt_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': 1.0 - }) - - # Add predictions - for idx, (x, y, score) in enumerate(pred_kpts): - if score > 0: # detected keypoint - data.append({ - 'image': img_name, - 'scorer': 'dlc_model', - 'individual': f"instance_{pred_ann['id']}", - 'bodypart': keypoint_names[idx], - 'x': x, - 'y': y, - 'likelihood': score - }) - - # 5. Create MultiIndex DataFrame - df = pd.DataFrame(data) - df_combined = df.set_index(['image', 'scorer', 'individual', 'bodypart']) - df_combined = df_combined.unstack(['scorer', 'individual', 'bodypart']) - - # 6. Call plot_evaluation_results - plot_evaluation_results( - df_combined=df_combined, - project_root=project_root, - scorer='ground_truth', - model_name='dlc_model', - output_folder=str(output_folder), - in_train_set=False, # Since we're using test data - mode=plotting, - plot_unique_bodyparts=False, # whether we should plot unique bodyparts - colormap='rainbow', # default values - dot_size=12, # default values - alpha_value=0.7, # default values - p_cutoff=0.6 # default values - ) def plot_gt_and_predictions( image_path: str | Path, From 3aa926b46878677411eb448ea35d0018cf6ffa32 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 21 Nov 2024 17:31:15 +0000 Subject: [PATCH 45/88] add def visualize_coco_predictions(*) --- .../pose_estimation_pytorch/apis/evaluate.py | 75 ++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index a19ea9e0d7..0173d41cb8 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -19,7 +19,8 @@ import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt - +import json +import os import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -174,7 +175,79 @@ def evaluate( predictions[image]["bodyparts"] = pose return results, predictions + +def visualize_coco_predictions( + predictions: dict, + num_samples: int = 1, + test_file_json: str | Path = "test.json", + output_dir: str | Path | None = None, + draw_skeleton: bool = True, +) -> None: + """ + Visualize predictions using DeepLabCut's plot_gt_and_predictions function + Args: + predictions: Dictionary with image paths as keys and prediction data as values. + Each prediction contains: + - bodyparts: numpy array of shape (1, 37, 3) + - bboxes: numpy array of shape (1, 4) + - bbox_scores: numpy array of shape (1,) + num_samples: Number of samples to visualize + test_file_json: Path to test set JSON file + output_dir: Directory to save visualization outputs. If None, will create + a directory next to test_file_json + draw_skeleton: Whether to draw skeleton connections between keypoints + """ + # Load ground truth data + with open(test_file_json, "r") as f: + ground_truth = json.load(f) + + if output_dir is None: + output_dir = os.path.join(os.path.dirname(test_file_json), "predictions_visualizations") + os.makedirs(output_dir, exist_ok=True) + + image_paths = list(predictions.keys()) + if num_samples: + image_paths = image_paths[:num_samples] + + # Process each image + for image_path in image_paths: + pred_data = predictions[image_path] + img_info = next((img for img in ground_truth['images'] + if img['file_name'] == os.path.basename(image_path)), None) + if img_info is None: + print(f"Warning: Could not find image info for {image_path}") + continue + + gt_anns = [ann for ann in ground_truth['annotations'] + if ann['image_id'] == img_info['id']] + + if not gt_anns: + print(f"Warning: No ground truth annotations found for {image_path}") + continue + + gt_keypoints = np.array(gt_anns[0]['keypoints']).reshape(1, -1, 3) + vis_mask = gt_keypoints[:, :, 2] != -1 + + visible_gt = gt_keypoints[vis_mask] + visible_gt = visible_gt[None, :, :2] + + pred_keypoints = pred_data['bodyparts'] # Keep batch dimension + visible_pred = pred_keypoints + visible_pred = pred_keypoints[vis_mask].copy() + visible_pred = np.expand_dims(visible_pred, axis=0) + + try: + plot_gt_and_predictions( + image_path=image_path, + output_dir=output_dir, + gt_bodyparts=visible_gt, + pred_bodyparts=visible_pred + ) + print(f"Successfully plotted predictions for {image_path}") + except Exception as e: + print(f"Error plotting predictions for {image_path}: {str(e)}") + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, From f0a24f7be857ebdf27675b8f7d81a243a583c569 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 25 Nov 2024 12:20:22 +0100 Subject: [PATCH 46/88] isort and black -> evaluate.py --- .../pose_estimation_pytorch/apis/evaluate.py | 78 +++++++++++-------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 0173d41cb8..32dc2c0b8a 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -11,16 +11,17 @@ from __future__ import annotations import argparse +import json +import os from pathlib import Path from typing import Iterable import albumentations as A +import matplotlib.pyplot as plt import numpy as np import pandas as pd from tqdm import tqdm -import matplotlib.pyplot as plt -import json -import os + import deeplabcut.core.metrics as metrics from deeplabcut.core.weight_init import WeightInitialization from deeplabcut.pose_estimation_pytorch import utils @@ -37,15 +38,14 @@ from deeplabcut.pose_estimation_pytorch.runners import InferenceRunner from deeplabcut.pose_estimation_pytorch.runners.snapshots import Snapshot from deeplabcut.pose_estimation_pytorch.task import Task -from deeplabcut.utils import auxiliaryfunctions -from deeplabcut.utils.visualization import plot_evaluation_results -from deeplabcut.utils import auxfun_videos +from deeplabcut.utils import auxfun_videos, auxiliaryfunctions from deeplabcut.utils.visualization import ( create_minimal_figure, + erase_artists, get_cmap, make_multianimal_labeled_image, + plot_evaluation_results, save_labeled_frame, - erase_artists, ) @@ -176,6 +176,7 @@ def evaluate( return results, predictions + def visualize_coco_predictions( predictions: dict, num_samples: int = 1, @@ -185,12 +186,12 @@ def visualize_coco_predictions( ) -> None: """ Visualize predictions using DeepLabCut's plot_gt_and_predictions function - + Args: predictions: Dictionary with image paths as keys and prediction data as values. Each prediction contains: - bodyparts: numpy array of shape (1, 37, 3) - - bboxes: numpy array of shape (1, 4) + - bboxes: numpy array of shape (1, 4) - bbox_scores: numpy array of shape (1,) num_samples: Number of samples to visualize test_file_json: Path to test set JSON file @@ -203,7 +204,9 @@ def visualize_coco_predictions( ground_truth = json.load(f) if output_dir is None: - output_dir = os.path.join(os.path.dirname(test_file_json), "predictions_visualizations") + output_dir = os.path.join( + os.path.dirname(test_file_json), "predictions_visualizations" + ) os.makedirs(output_dir, exist_ok=True) image_paths = list(predictions.keys()) @@ -212,42 +215,52 @@ def visualize_coco_predictions( # Process each image for image_path in image_paths: - pred_data = predictions[image_path] - img_info = next((img for img in ground_truth['images'] - if img['file_name'] == os.path.basename(image_path)), None) + pred_data = predictions[image_path] + img_info = next( + ( + img + for img in ground_truth["images"] + if img["file_name"] == os.path.basename(image_path) + ), + None, + ) if img_info is None: print(f"Warning: Could not find image info for {image_path}") continue - - gt_anns = [ann for ann in ground_truth['annotations'] - if ann['image_id'] == img_info['id']] - + + gt_anns = [ + ann + for ann in ground_truth["annotations"] + if ann["image_id"] == img_info["id"] + ] + if not gt_anns: print(f"Warning: No ground truth annotations found for {image_path}") continue - gt_keypoints = np.array(gt_anns[0]['keypoints']).reshape(1, -1, 3) + gt_keypoints = np.array(gt_anns[0]["keypoints"]).reshape(1, -1, 3) vis_mask = gt_keypoints[:, :, 2] != -1 - + visible_gt = gt_keypoints[vis_mask] visible_gt = visible_gt[None, :, :2] - - pred_keypoints = pred_data['bodyparts'] # Keep batch dimension - visible_pred = pred_keypoints + + pred_keypoints = pred_data["bodyparts"] # Keep batch dimension + visible_pred = pred_keypoints visible_pred = pred_keypoints[vis_mask].copy() visible_pred = np.expand_dims(visible_pred, axis=0) - + try: plot_gt_and_predictions( image_path=image_path, output_dir=output_dir, gt_bodyparts=visible_gt, - pred_bodyparts=visible_pred + pred_bodyparts=visible_pred, ) print(f"Successfully plotted predictions for {image_path}") except Exception as e: print(f"Error plotting predictions for {image_path}: {str(e)}") - + + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, @@ -262,7 +275,7 @@ def plot_gt_and_predictions( p_cutoff: float = 0.6, ): """Plot ground truth and predictions on an image. - + Args: image_path: Path to the image gt_bodyparts: Ground truth keypoints array (num_animals, num_keypoints, 3) @@ -299,7 +312,7 @@ def plot_gt_and_predictions( if pred_unique_bodyparts is not None: num_colors += pred_unique_bodyparts.shape[1] colors = get_cmap(num_colors, name=colormap) - + predictions = pred_bodyparts.swapaxes(0, 1) ground_truth = gt_bodyparts.swapaxes(0, 1) elif mode == "individual": @@ -330,7 +343,7 @@ def plot_gt_and_predictions( else: unique_predictions = pred_unique_bodyparts unique_ground_truth = gt_unique_bodyparts - + ax = make_multianimal_labeled_image( frame, unique_ground_truth, @@ -342,7 +355,7 @@ def plot_gt_and_predictions( p_cutoff, ax=ax, ) - + # Save the labeled image save_labeled_frame( fig, @@ -352,8 +365,8 @@ def plot_gt_and_predictions( ) erase_artists(ax) plt.close() - - + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, @@ -409,7 +422,7 @@ def evaluate_snapshot( parameters = PoseDatasetParameters( bodyparts=project_bodyparts, unique_bpts=parameters.unique_bpts, - individuals=parameters.individuals + individuals=parameters.individuals, ) predictions = {} @@ -690,7 +703,6 @@ def save_evaluation_results( df_scores.to_csv(combined_scores_path) - if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) From 1fdd31e765a723cb1aed116105425582314833dc Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 15:44:56 +0100 Subject: [PATCH 47/88] modify the output_dir --- deeplabcut/pose_estimation_pytorch/apis/evaluate.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 32dc2c0b8a..a29cf14da0 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -204,10 +204,11 @@ def visualize_coco_predictions( ground_truth = json.load(f) if output_dir is None: - output_dir = os.path.join( - os.path.dirname(test_file_json), "predictions_visualizations" - ) - os.makedirs(output_dir, exist_ok=True) + output_dir = Path(test_file_json).parent / "predictions_visualizations" + else: + output_dir = Path(output_dir) + + output_dir.mkdir(exist_ok=True) image_paths = list(predictions.keys()) if num_samples: From bec1cff79be7d71d0c1684ceb7cbec24b3be20ec Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 15:55:50 +0100 Subject: [PATCH 48/88] Revert "black -> snapshots.py" This reverts commit f6704073db2ce2b422e63166277a5b7d4d3e478e. --- deeplabcut/pose_estimation_pytorch/runners/snapshots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) mode change 100755 => 100644 deeplabcut/pose_estimation_pytorch/runners/snapshots.py diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py old mode 100755 new mode 100644 index 74203f5be1..45bf339d0b --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,11 +113,11 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] - + # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } From 17fe50456e166de50ec827b296f687b60cafd4a2 Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 16:01:44 +0100 Subject: [PATCH 49/88] revert changes to snapshots --- .../runners/snapshots.py | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 45bf339d0b..341b3c4be4 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -113,41 +113,41 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: ): current_best = self.best() self._best_metric = metrics[self._key] - - # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v + k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" } torch.save(parsed_state_dict, save_path) - # Handle previous best model if current_best is not None: + # rename if the current best should have been saved, otherwise delete if current_best.epochs % self.save_epochs == 0: new_name = self.snapshot_path(epoch=current_best.epochs) current_best.path.rename(new_name) else: current_best.path.unlink(missing_ok=False) - elif last or epoch % self.save_epochs == 0: - # Save regular snapshot if needed - save_path = self.snapshot_path(epoch=epoch) - parsed_state_dict = { - k: v - for k, v in state_dict.items() - if self.save_optimizer_state or k != "optimizer" - } - torch.save(parsed_state_dict, save_path) + return + + if not (last or epoch % self.save_epochs == 0): + return - # Clean up old snapshots if needed existing_snapshots = [s for s in self.snapshots() if not s.best] if len(existing_snapshots) >= self.max_snapshots: - num_to_delete = len(existing_snapshots) - self.max_snapshots + num_to_delete = 1 + len(existing_snapshots) - self.max_snapshots to_delete = existing_snapshots[:num_to_delete] for snapshot in to_delete: snapshot.path.unlink(missing_ok=False) + save_path = self.snapshot_path(epoch=epoch) + parsed_state_dict = { + k: v + for k, v in state_dict.items() + if self.save_optimizer_state or k != "optimizer" + } + torch.save(parsed_state_dict, save_path) + def best(self) -> Snapshot | None: """Returns: the path to the best snapshot, if it exists""" snapshots = self.snapshots() From bf808616dfe430e443c66f1dd8df5f2ab05dd21e Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 17:28:48 +0100 Subject: [PATCH 50/88] optimize the input params --- .../pose_estimation_pytorch/apis/evaluate.py | 71 ++++++++----------- 1 file changed, 30 insertions(+), 41 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index a29cf14da0..0ab6602655 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -179,10 +179,11 @@ def evaluate( def visualize_coco_predictions( predictions: dict, - num_samples: int = 1, - test_file_json: str | Path = "test.json", + ground_truth: dict, # Dictionary mapping image paths to keypoints output_dir: str | Path | None = None, draw_skeleton: bool = True, + num_samples: int | None = None, + random_select: bool = False, ) -> None: """ Visualize predictions using DeepLabCut's plot_gt_and_predictions function @@ -193,62 +194,50 @@ def visualize_coco_predictions( - bodyparts: numpy array of shape (1, 37, 3) - bboxes: numpy array of shape (1, 4) - bbox_scores: numpy array of shape (1,) + ground_truth: Dictionary containing ground truth 2D keypoints in format (x, y, vis_label) num_samples: Number of samples to visualize - test_file_json: Path to test set JSON file - output_dir: Directory to save visualization outputs. If None, will create - a directory next to test_file_json + output_dir: Directory to save visualization outputs draw_skeleton: Whether to draw skeleton connections between keypoints + num_samples: Number of samples to visualize. If None, visualize all samples + random_select: If True, randomly select samples; if False, use first N samples """ - # Load ground truth data - with open(test_file_json, "r") as f: - ground_truth = json.load(f) - if output_dir is None: - output_dir = Path(test_file_json).parent / "predictions_visualizations" + output_dir = Path("predictions_visualizations") else: output_dir = Path(output_dir) output_dir.mkdir(exist_ok=True) image_paths = list(predictions.keys()) - if num_samples: - image_paths = image_paths[:num_samples] + + # Sample selection + if num_samples is not None and num_samples < len(image_paths): + if random_select: + image_paths = np.random.choice(image_paths, num_samples, replace=False).tolist() + else: + image_paths = image_paths[:num_samples] # Process each image for image_path in image_paths: pred_data = predictions[image_path] - img_info = next( - ( - img - for img in ground_truth["images"] - if img["file_name"] == os.path.basename(image_path) - ), - None, - ) - if img_info is None: - print(f"Warning: Could not find image info for {image_path}") - continue - - gt_anns = [ - ann - for ann in ground_truth["annotations"] - if ann["image_id"] == img_info["id"] - ] - - if not gt_anns: - print(f"Warning: No ground truth annotations found for {image_path}") - continue - - gt_keypoints = np.array(gt_anns[0]["keypoints"]).reshape(1, -1, 3) - vis_mask = gt_keypoints[:, :, 2] != -1 - + + # Get ground truth keypoints for this image + gt_keypoints = ground_truth[image_path] # Get GT keypoints for this specific image + + # Create visibility mask from ground truth + vis_mask = gt_keypoints[:, :, 2] > 0 # Use visibility label + + # Get visible ground truth points visible_gt = gt_keypoints[vis_mask] - visible_gt = visible_gt[None, :, :2] + visible_gt = visible_gt[None, :, :2] # Keep only x,y coordinates pred_keypoints = pred_data["bodyparts"] # Keep batch dimension - visible_pred = pred_keypoints - visible_pred = pred_keypoints[vis_mask].copy() - visible_pred = np.expand_dims(visible_pred, axis=0) + print(pred_keypoints.shape) # (13,37,3) + print(vis_mask.shape) # (1,37) + + # Handle dimension mismatch + expanded_vis_mask = np.tile(vis_mask, (pred_keypoints.shape[0], 1)) + visible_pred = pred_keypoints[expanded_vis_mask].reshape(pred_keypoints.shape[0], -1, 3) try: plot_gt_and_predictions( From 694a814ff012eb72fcb4e4b98f4e8bae50deb85f Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 17:02:29 +0000 Subject: [PATCH 51/88] rename the function and fix the multi-animal ploting problem --- .../pose_estimation_pytorch/apis/evaluate.py | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 0ab6602655..7481da7f9c 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -177,7 +177,7 @@ def evaluate( return results, predictions -def visualize_coco_predictions( +def visualize_predictions( predictions: dict, ground_truth: dict, # Dictionary mapping image paths to keypoints output_dir: str | Path | None = None, @@ -221,23 +221,28 @@ def visualize_coco_predictions( for image_path in image_paths: pred_data = predictions[image_path] - # Get ground truth keypoints for this image - gt_keypoints = ground_truth[image_path] # Get GT keypoints for this specific image + # Get ground truth keypoints for this image [N, num_joints, 3] + gt_keypoints = ground_truth[image_path] - # Create visibility mask from ground truth - vis_mask = gt_keypoints[:, :, 2] > 0 # Use visibility label - - # Get visible ground truth points - visible_gt = gt_keypoints[vis_mask] - visible_gt = visible_gt[None, :, :2] # Keep only x,y coordinates - - pred_keypoints = pred_data["bodyparts"] # Keep batch dimension - print(pred_keypoints.shape) # (13,37,3) - print(vis_mask.shape) # (1,37) + # Create visibility mask from the first GT sample + # This will be used for all GT samples + vis_mask = gt_keypoints[0, :, 2] > 0 # [num_joints] + + # Get visible ground truth points for all samples + visible_gt = [] + for gt in gt_keypoints: + visible_points = gt[vis_mask, :2] # Keep only x,y coordinates for visible joints + visible_gt.append(visible_points) + visible_gt = np.stack(visible_gt) # [N, num_visible_joints, 2] + + pred_keypoints = pred_data["bodyparts"] # [N, num_visible_joints, 3] - # Handle dimension mismatch - expanded_vis_mask = np.tile(vis_mask, (pred_keypoints.shape[0], 1)) - visible_pred = pred_keypoints[expanded_vis_mask].reshape(pred_keypoints.shape[0], -1, 3) + # Apply same visibility mask to all predictions + visible_pred = [] + for pred in pred_keypoints: + visible_points = pred[vis_mask] # Keep points corresponding to visible joints + visible_pred.append(visible_points) + visible_pred = np.stack(visible_pred) # [13, num_visible_joints, 3] try: plot_gt_and_predictions( From a731901351c98535c9bbfd8aed211eebb7d21c65 Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 18:06:53 +0100 Subject: [PATCH 52/88] use num_individuals --- deeplabcut/pose_estimation_pytorch/apis/evaluate.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 7481da7f9c..ac496ea971 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -191,9 +191,9 @@ def visualize_predictions( Args: predictions: Dictionary with image paths as keys and prediction data as values. Each prediction contains: - - bodyparts: numpy array of shape (1, 37, 3) - - bboxes: numpy array of shape (1, 4) - - bbox_scores: numpy array of shape (1,) + - bodyparts: numpy array of shape (num_individuals, num_keypoints, 3) + - bboxes: numpy array of shape (num_individuals, 4) + - bbox_scores: numpy array of shape (num_individuals,) ground_truth: Dictionary containing ground truth 2D keypoints in format (x, y, vis_label) num_samples: Number of samples to visualize output_dir: Directory to save visualization outputs @@ -225,7 +225,6 @@ def visualize_predictions( gt_keypoints = ground_truth[image_path] # Create visibility mask from the first GT sample - # This will be used for all GT samples vis_mask = gt_keypoints[0, :, 2] > 0 # [num_joints] # Get visible ground truth points for all samples From 74e89544e265bb2ee64f793f45df7c0361e8b9c9 Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 18:17:35 +0100 Subject: [PATCH 53/88] optimize the codes --- .../pose_estimation_pytorch/apis/evaluate.py | 89 ++++++++++--------- 1 file changed, 48 insertions(+), 41 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index ac496ea971..1b2dc56c83 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -179,70 +179,77 @@ def evaluate( def visualize_predictions( predictions: dict, - ground_truth: dict, # Dictionary mapping image paths to keypoints + ground_truth: dict, output_dir: str | Path | None = None, draw_skeleton: bool = True, num_samples: int | None = None, - random_select: bool = False, + random_select: bool = False, ) -> None: - """ - Visualize predictions using DeepLabCut's plot_gt_and_predictions function + """Visualize model predictions alongside ground truth keypoints. + + This function processes keypoint predictions and ground truth data, applies visibility + masks, and generates visualization plots. It supports random or sequential sampling + of images for visualization. Args: - predictions: Dictionary with image paths as keys and prediction data as values. - Each prediction contains: - - bodyparts: numpy array of shape (num_individuals, num_keypoints, 3) - - bboxes: numpy array of shape (num_individuals, 4) - - bbox_scores: numpy array of shape (num_individuals,) - ground_truth: Dictionary containing ground truth 2D keypoints in format (x, y, vis_label) - num_samples: Number of samples to visualize - output_dir: Directory to save visualization outputs + predictions: Dictionary mapping image paths to prediction data. + Each prediction contains: + - bodyparts: array of shape [N, num_keypoints, 3] where 3 represents (x, y, confidence) + - bboxes: array of shape [N, 4] for bounding boxes (optional) + - bbox_scores: array of shape [N,] for bbox confidences (optional) + + ground_truth: Dictionary mapping image paths to ground truth keypoints. + Each value has shape [N, num_keypoints, 3] where 3 represents (x, y, visibility) + + output_dir: Path to save visualization outputs. + Defaults to "predictions_visualizations" + draw_skeleton: Whether to draw skeleton connections between keypoints - num_samples: Number of samples to visualize. If None, visualize all samples - random_select: If True, randomly select samples; if False, use first N samples - """ - if output_dir is None: - output_dir = Path("predictions_visualizations") - else: - output_dir = Path(output_dir) + num_samples: Number of images to visualize. If None, processes all images + + random_select: If True, randomly samples images; if False, uses first N images + """ + # Setup output directory + output_dir = Path(output_dir or "predictions_visualizations") output_dir.mkdir(exist_ok=True) + # Select images to process image_paths = list(predictions.keys()) - - # Sample selection - if num_samples is not None and num_samples < len(image_paths): + if num_samples and num_samples < len(image_paths): if random_select: - image_paths = np.random.choice(image_paths, num_samples, replace=False).tolist() + image_paths = np.random.choice( + image_paths, num_samples, replace=False + ).tolist() else: image_paths = image_paths[:num_samples] - # Process each image + # Process each selected image for image_path in image_paths: + # Get prediction and ground truth data pred_data = predictions[image_path] - - # Get ground truth keypoints for this image [N, num_joints, 3] - gt_keypoints = ground_truth[image_path] - - # Create visibility mask from the first GT sample - vis_mask = gt_keypoints[0, :, 2] > 0 # [num_joints] - - # Get visible ground truth points for all samples + gt_keypoints = ground_truth[image_path] # Shape: [N, num_keypoints, 3] + + # Create visibility mask from first GT sample + # This mask will be applied to all samples for consistency + vis_mask = gt_keypoints[0, :, 2] > 0 # Shape: [num_keypoints] + + # Process ground truth keypoints visible_gt = [] for gt in gt_keypoints: - visible_points = gt[vis_mask, :2] # Keep only x,y coordinates for visible joints + visible_points = gt[vis_mask, :2] # Keep only x,y for visible joints visible_gt.append(visible_points) - visible_gt = np.stack(visible_gt) # [N, num_visible_joints, 2] - - pred_keypoints = pred_data["bodyparts"] # [N, num_visible_joints, 3] - - # Apply same visibility mask to all predictions + visible_gt = np.stack(visible_gt) # Shape: [N, num_visible_joints, 2] + + # Process predicted keypoints + pred_keypoints = pred_data["bodyparts"] # Shape: [N, num_keypoints, 3] visible_pred = [] for pred in pred_keypoints: - visible_points = pred[vis_mask] # Keep points corresponding to visible joints + visible_points = pred[vis_mask] # Keep only visible joint predictions visible_pred.append(visible_points) - visible_pred = np.stack(visible_pred) # [13, num_visible_joints, 3] + visible_pred = np.stack(visible_pred) # Shape: [N, num_visible_joints, 3] + # Generate and save visualization try: plot_gt_and_predictions( image_path=image_path, @@ -259,7 +266,7 @@ def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, gt_bodyparts: np.ndarray, - pred_bodyparts: np.ndarray, # (num_predicted_animals, num_keypoints, 3) + pred_bodyparts: np.ndarray, gt_unique_bodyparts: np.ndarray | None = None, pred_unique_bodyparts: np.ndarray | None = None, mode: str = "bodypart", From c4b5a4a31f3595c21e94def3aaf375fe7d75d494 Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 21:12:47 +0000 Subject: [PATCH 54/88] add show_ground_truth param for future use --- .../pose_estimation_pytorch/apis/evaluate.py | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 1b2dc56c83..0df6c2321c 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -176,7 +176,6 @@ def evaluate( return results, predictions - def visualize_predictions( predictions: dict, ground_truth: dict, @@ -184,6 +183,7 @@ def visualize_predictions( draw_skeleton: bool = True, num_samples: int | None = None, random_select: bool = False, + show_ground_truth: bool = True, ) -> None: """Visualize model predictions alongside ground truth keypoints. @@ -209,6 +209,9 @@ def visualize_predictions( num_samples: Number of images to visualize. If None, processes all images random_select: If True, randomly samples images; if False, uses first N images + + show_ground_truth: If True, displays ground truth poses alongside predictions. + If False, only shows predictions but uses GT visibility mask """ # Setup output directory output_dir = Path(output_dir or "predictions_visualizations") @@ -230,16 +233,18 @@ def visualize_predictions( pred_data = predictions[image_path] gt_keypoints = ground_truth[image_path] # Shape: [N, num_keypoints, 3] - # Create visibility mask from first GT sample - # This mask will be applied to all samples for consistency - vis_mask = gt_keypoints[0, :, 2] > 0 # Shape: [num_keypoints] + # Create visibility mask from first GT sample. This mask will be applied to all samples for consistency + vis_mask = gt_keypoints[0, :, 2] > 0 - # Process ground truth keypoints - visible_gt = [] - for gt in gt_keypoints: - visible_points = gt[vis_mask, :2] # Keep only x,y for visible joints - visible_gt.append(visible_points) - visible_gt = np.stack(visible_gt) # Shape: [N, num_visible_joints, 2] + # Process ground truth keypoints if showing GT + if show_ground_truth: + visible_gt = [] + for gt in gt_keypoints: + visible_points = gt[vis_mask, :2] # Keep only x,y for visible joints + visible_gt.append(visible_points) + visible_gt = np.stack(visible_gt) # Shape: [N, num_visible_joints, 2] + else: + visible_gt = None # Process predicted keypoints pred_keypoints = pred_data["bodyparts"] # Shape: [N, num_keypoints, 3] From e33a7453cb8b3e8c97351d5c8acf1c2871652e33 Mon Sep 17 00:00:00 2001 From: ti Date: Tue, 26 Nov 2024 21:22:11 +0000 Subject: [PATCH 55/88] black --- deeplabcut/pose_estimation_pytorch/apis/evaluate.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 0df6c2321c..b370ac362b 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -176,6 +176,7 @@ def evaluate( return results, predictions + def visualize_predictions( predictions: dict, ground_truth: dict, @@ -244,7 +245,7 @@ def visualize_predictions( visible_gt.append(visible_points) visible_gt = np.stack(visible_gt) # Shape: [N, num_visible_joints, 2] else: - visible_gt = None + visible_gt = None # Process predicted keypoints pred_keypoints = pred_data["bodyparts"] # Shape: [N, num_keypoints, 3] From 6110d5b821f4b7393a1c54942f597ecb69e09560 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 2 Dec 2024 21:51:11 +0100 Subject: [PATCH 56/88] modify --- deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py | 4 +++- deeplabcut/pose_estimation_pytorch/apis/evaluate.py | 1 + deeplabcut/pose_estimation_pytorch/runners/snapshots.py | 1 - 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py index 383be0a933..4faf4c9b7a 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py +++ b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py @@ -145,12 +145,14 @@ def video_inference( print(f"Running detector with batch size {detector_runner.batch_size}") bbox_predictions = detector_runner.inference(images=tqdm(video)) + print(bbox_predictions[0].keys()) video.set_context(bbox_predictions) print(f"Running pose prediction with batch size {pose_runner.batch_size}") if shelf_writer is not None: shelf_writer.open() - + + predictions = pose_runner.inference(images=tqdm(video), shelf_writer=shelf_writer) if shelf_writer is not None: shelf_writer.close() diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index b370ac362b..ef51d55b7c 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -82,6 +82,7 @@ def predict( else: ground_truth_bboxes = loader.ground_truth_bboxes(mode=mode) context = [{"bboxes": ground_truth_bboxes[image]} for image in image_paths] + print(context[0]) images_with_context = image_paths if context is not None: diff --git a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py index 285ee2adc5..ec6a956728 100755 --- a/deeplabcut/pose_estimation_pytorch/runners/snapshots.py +++ b/deeplabcut/pose_estimation_pytorch/runners/snapshots.py @@ -117,7 +117,6 @@ def update(self, epoch: int, state_dict: dict, last: bool = False) -> None: # Save the new best model save_path = self.snapshot_path(epoch, best=True) parsed_state_dict = { - k: v k: v for k, v in state_dict.items() if self.save_optimizer_state or k != "optimizer" From 80fce453b619febd6f0b35468c0f1010467c1a75 Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Wed, 13 Nov 2024 18:01:48 +0100 Subject: [PATCH 57/88] save bboxes in _full.pickle --- deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py index 905b0448bd..6201abc2c9 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py +++ b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py @@ -689,6 +689,10 @@ def _generate_output_data( "costs": None, } + if "bboxes" in frame_predictions: + output[key]["bboxes"] = frame_predictions["bboxes"] + output[key]["bbox_scores"] = frame_predictions["bbox_scores"] + if "identity_scores" in frame_predictions: # Reshape id scores from (num_assemblies, num_bpts, num_individuals) # to the original DLC full pickle format: (num_bpts, num_assem, num_ind) From f39f03b6ad40507b7e10b4555819d651bbfc76e7 Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Fri, 15 Nov 2024 10:44:16 +0100 Subject: [PATCH 58/88] Plot bounding boxes in network evaluation --- deeplabcut/compat.py | 1 + .../pose_estimation_pytorch/apis/evaluate.py | 13 ++ .../pose_estimation_pytorch/apis/utils.py | 64 ++++++++-- deeplabcut/utils/visualization.py | 120 +++++++++++++----- 4 files changed, 159 insertions(+), 39 deletions(-) diff --git a/deeplabcut/compat.py b/deeplabcut/compat.py index 448d0211c4..91ad02dd19 100644 --- a/deeplabcut/compat.py +++ b/deeplabcut/compat.py @@ -363,6 +363,7 @@ def evaluate_network( If provided it must be either ``True``, ``False``, ``"bodypart"``, or ``"individual"``. Setting to ``True`` defaults as ``"bodypart"`` for multi-animal projects. + If a detector is used, the predicted bounding boxes will also be plotted. show_errors: bool, optional, default=True Display train and test errors. diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 40981865de..bdcf277818 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -30,6 +30,7 @@ get_model_snapshots, get_scorer_name, get_scorer_uid, + build_bboxes_dict_for_dataframe, ) from deeplabcut.pose_estimation_pytorch.data import DLCLoader, Loader from deeplabcut.pose_estimation_pytorch.data.dataset import PoseDatasetParameters @@ -428,6 +429,7 @@ def evaluate_snapshot( ) predictions = {} + bounding_boxes = {} scores = { "%Training dataset": loader.train_fraction, "Shuffle number": loader.shuffle, @@ -452,7 +454,12 @@ def evaluate_snapshot( parameters=parameters, image_name_to_index=image_to_dlc_df_index, ) + split_bounding_boxes = build_bboxes_dict_for_dataframe( + predictions=predictions_for_split, + image_name_to_index=image_to_dlc_df_index, + ) predictions[split] = df_split_predictions + bounding_boxes[split] = split_bounding_boxes for k, v in results.items(): scores[f"{split} {k}"] = round(v, 2) @@ -486,11 +493,15 @@ def evaluate_snapshot( plot_mode = "bodypart" df_ground_truth = ensure_multianimal_df_format(loader.df) + + bboxes_cutoff = loader.model_cfg.get("detector", {}).get("model", {}).get("box_score_thresh", 0.6) + for mode in ["train", "test"]: df_combined = predictions[mode].merge( df_ground_truth, left_index=True, right_index=True ) unique_bodyparts = loader.get_dataset_parameters().unique_bpts + bboxes_split = bounding_boxes[mode] plot_evaluation_results( df_combined=df_combined, @@ -505,6 +516,8 @@ def evaluate_snapshot( dot_size=cfg["dotsize"], alpha_value=cfg["alphavalue"], p_cutoff=cfg["pcutoff"], + bounding_boxes=bboxes_split, + bboxes_cutoff=bboxes_cutoff ) return df_predictions diff --git a/deeplabcut/pose_estimation_pytorch/apis/utils.py b/deeplabcut/pose_estimation_pytorch/apis/utils.py index e886ab452f..ff3203e2e1 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/utils.py +++ b/deeplabcut/pose_estimation_pytorch/apis/utils.py @@ -324,6 +324,26 @@ def ensure_multianimal_df_format(df_predictions: pd.DataFrame) -> pd.DataFrame: return df_predictions_ma +def _image_names_to_df_index( + image_names: list[str], + image_name_to_index: Callable[[str], tuple[str, ...]] | None = None, +) -> pd.MultiIndex | list[str]: + """ + Creates index for predictions dataframe. + This method is used in build_predictions_dataframe, but also in build_bboxes_dict_for_dataframe. + It is important that these two methods return objects with the same index / keys. + + Args: + image_names: list of image names + image_name_to_index, optional: a transform to apply on each image_name + """ + + if image_name_to_index is not None: + return pd.MultiIndex.from_tuples([image_name_to_index(image_name) for image_name in image_names]) + else: + return image_names + + def build_predictions_dataframe( scorer: str, predictions: dict[str, dict[str, np.ndarray]], @@ -341,23 +361,18 @@ def build_predictions_dataframe( Returns: """ + image_names = [] prediction_data = [] - index_data = [] - for image, image_predictions in predictions.items(): + for image_name, image_predictions in predictions.items(): image_data = image_predictions["bodyparts"][..., :3].reshape(-1) if "unique_bodyparts" in image_predictions: image_data = np.concatenate( [image_data, image_predictions["unique_bodyparts"][..., :3].reshape(-1)] ) - + image_names.append(image_name) prediction_data.append(image_data) - if image_name_to_index is not None: - index_data.append(image_name_to_index(image)) - if len(index_data) > 0: - index = pd.MultiIndex.from_tuples(index_data) - else: - index = list(predictions.keys()) + index = _image_names_to_df_index(image_names, image_name_to_index) return pd.DataFrame( prediction_data, @@ -370,6 +385,37 @@ def build_predictions_dataframe( ) +def build_bboxes_dict_for_dataframe( + predictions: dict[str, dict[str, np.ndarray]], + image_name_to_index: Callable[[str], tuple[str, ...]] | None = None, +) -> dict: + """ + Creates a dictionary with bounding boxes from predictions. + The keys of the dictionary are the same as the index of the dataframe created by build_predictions_dataframe. + Therefore, the structures returned by build_predictions_dataframe and by build_bboxes_dict_for_dataframe + can be accessed with the same keys. + + Args: + predictions: Dictionary containing the evaluation results + image_name_to_index, optional: a transform to apply on each image_name + + Returns: + Dictionary with sames keys as in the dataframe returned by build_predictions_dataframe, + and respective bounding boxes and scores, if any. + """ + + image_names = [] + bboxes_data = [] + for image_name, image_predictions in predictions.items(): + image_names.append(image_name) + if "bboxes" in image_predictions: + bboxes_data.append((image_predictions["bboxes"], image_predictions["bbox_scores"])) + + index = _image_names_to_df_index(image_names, image_name_to_index) + + return dict(zip(index, bboxes_data)) + + def get_inference_runners( model_config: dict, snapshot_path: str | Path, diff --git a/deeplabcut/utils/visualization.py b/deeplabcut/utils/visualization.py index 4c20ae8e77..e7c95c9a3e 100644 --- a/deeplabcut/utils/visualization.py +++ b/deeplabcut/utils/visualization.py @@ -25,15 +25,27 @@ import numpy as np import pandas as pd from matplotlib.collections import LineCollection +from matplotlib.colors import Colormap +import matplotlib.patches as patches from skimage import io, color from tqdm import trange from deeplabcut.utils import auxiliaryfunctions, auxfun_videos -def get_cmap(n, name="hsv"): - """Returns a function that maps each index in 0, 1, ..., n-1 to a distinct - RGB color; the keyword argument name must be a standard mpl colormap name.""" +def get_cmap( + n: int, + name: str = "hsv" +) -> Colormap: + """ + Args: + n: number of distinct colors + name: name of matplotlib colormap + + Returns: + A function that maps each index in 0, 1, ..., n-1 to a distinct + RGB color; the keyword argument name must be a standard mpl colormap name. + """ return plt.cm.get_cmap(name, n) @@ -105,21 +117,61 @@ def make_labeled_image( def make_multianimal_labeled_image( - frame, - coords_truth, - coords_pred, - probs_pred, - colors, - dotsize=12, - alphavalue=0.7, - pcutoff=0.6, - labels=["+", ".", "x"], - ax=None, -): + frame: np.ndarray, + coords_truth: np.ndarray | list, + coords_pred: np.ndarray | list, + probs_pred: np.ndarray | list, + colors: Colormap, + dotsize: float | int = 12, + alphavalue: float = 0.7, + pcutoff: float = 0.6, + labels: list = ["+", ".", "x"], + ax: plt.Axes | None = None, + bounding_boxes: tuple[np.ndarray, np.ndarray] | None = None, + bounding_boxes_color='k', + bboxes_cutoff: float = 0.6, +) -> plt.Axes: + """ + Plots groundtruth labels and predictions onto the matplotlib's axes, with the specified graphical parameters. + + Args: + frame: image + coords_truth: groundtruth labels + coords_pred: predictions + probs_pred: prediction probabilities + colors: colors for poses + dotsize: size of dot + alphavalue: + pcutoff: cut-off confidence value + labels: labels to use for ground truth, reliable predictions, and not reliable predictions (confidence below cut-off value) + ax: matplotlib plot's axes object + bounding_boxes: bounding boxes (top-left corner, size) and their respective confice levels, + bounding_boxes_color: If bounding_boxes is not None, this is the color that will be used for plotting them + bboxes_cutoff: bounding boxes confidence cutoff threshold. + + Returns: + matplotlib Axes object with plotted labels and predictions. + """ + if ax is None: h, w, _ = np.shape(frame) _, ax = prepare_figure_axes(w, h) ax.imshow(frame, "gray") + + if bounding_boxes is not None: + for bbox, bbox_score in zip(bounding_boxes[0], bounding_boxes[1]): + bbox_origin = (bbox[0], bbox[1]) + (bbox_width, bbox_height) = (bbox[2], bbox[3]) + rectangle = patches.Rectangle( + bbox_origin, + bbox_width, + bbox_height, + linewidth=1, + edgecolor=bounding_boxes_color, + facecolor='none', + linestyle = '--' if bbox_score < bboxes_cutoff else '-') + ax.add_patch(rectangle) + for n, data in enumerate(zip(coords_truth, coords_pred, probs_pred)): color = colors(n) coord_gt, coord_pred, prob_pred = data @@ -394,6 +446,8 @@ def plot_evaluation_results( dot_size: int = 12, alpha_value: float = 0.7, p_cutoff: float = 0.6, + bounding_boxes: dict = {}, + bboxes_cutoff: float = 0.6, ) -> None: """ Creates labeled images using the results of inference, and saves them to an output @@ -415,6 +469,8 @@ def plot_evaluation_results( dot_size: the dot size to use for keypoints alpha_value: the alpha value to use for keypoints p_cutoff: the p-cutoff for "confident" keypoints + bounding_boxes: dictionary with df_combined rows as keys and bounding boxes (np array for coordinates and np array for confidence) + bboxes_cutoff: bounding boxes confidence cutoff threshold. """ for row_index, row in df_combined.iterrows(): if isinstance(row_index, str): @@ -440,6 +496,8 @@ def plot_evaluation_results( ground_truth = df_gt.to_numpy().reshape((individuals, bodyparts, 2)) predictions = df_predictions.to_numpy().reshape((individuals, bodyparts, 3)) + bboxes = bounding_boxes.get(row_index) + if plot_unique_bodyparts: row_unique = row.loc[ (slice(None), row.index.get_level_values("individuals") == "single") @@ -480,28 +538,30 @@ def plot_evaluation_results( colors = [] ax = make_multianimal_labeled_image( - frame, - ground_truth, - predictions[:, :, :2], - predictions[:, :, 2:], - colors, - dot_size, - alpha_value, - p_cutoff, + frame=frame, + coords_truth=ground_truth, + coords_pred=predictions[:, :, :2], + probs_pred=predictions[:, :, 2:], + colors=colors, + dotsize=dot_size, + alphavalue=alpha_value, + pcutoff=p_cutoff, ax=ax, + bounding_boxes=bboxes, + bboxes_cutoff=bboxes_cutoff, ) if plot_unique_bodyparts: unique_predictions = unique_predictions.swapaxes(0, 1) unique_ground_truth = unique_ground_truth.swapaxes(0, 1) ax = make_multianimal_labeled_image( - frame, - unique_ground_truth, - unique_predictions[:, :, :2], - unique_predictions[:, :, 2:], - colors, - dot_size, - alpha_value, - p_cutoff, + frame=frame, + coords_truth=unique_ground_truth, + coords_pred=unique_predictions[:, :, :2], + probs_pred=unique_predictions[:, :, 2:], + colors=colors, + dotsize=dot_size, + alphavalue=alpha_value, + pcutoff=p_cutoff, ax=ax, ) From 566aa393c11de0bca1c093908b1f2a4979db0cae Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Mon, 18 Nov 2024 13:03:24 +0100 Subject: [PATCH 59/88] Plot bboxes in create_video_with_all_detections --- deeplabcut/utils/make_labeled_video.py | 34 +++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/deeplabcut/utils/make_labeled_video.py b/deeplabcut/utils/make_labeled_video.py index 828132d654..4d20301b8f 100644 --- a/deeplabcut/utils/make_labeled_video.py +++ b/deeplabcut/utils/make_labeled_video.py @@ -40,7 +40,7 @@ import pandas as pd from matplotlib.animation import FFMpegWriter from matplotlib.collections import LineCollection -from skimage.draw import disk, line_aa, set_color +from skimage.draw import disk, line_aa, set_color, rectangle_perimeter from skimage.util import img_as_ubyte from tqdm import trange @@ -1061,6 +1061,7 @@ def create_video_with_all_detections( destfolder=None, modelprefix="", confidence_to_alpha: Union[bool, Callable[[float], float]] = False, + plot_bboxes: bool = True ): """ Create a video labeled with all the detections stored in a '*_full.pickle' file. @@ -1100,6 +1101,10 @@ def create_video_with_all_detections( defined as a function f: [0, 1] -> [0, 1] such that the alpha value for a keypoint will be set as a function of its score: alpha = f(score). The default function used when True is f(x) = x. + + plot_bboxes: bool, optional (default=True) + If detections were produced using a Pytorch Top-Down model, setting this parameter to True will also plot + the bounding boxes generated by the detector. """ import re @@ -1171,12 +1176,39 @@ def create_video_with_all_detections( clip = vp(fname=video, sname=outputname, codec="mp4v") ny, nx = clip.height(), clip.width() + bboxes_pcutoff = metadata.get("data", {}).get("pytorch-config", {}).get("detector", {}).get("model", {}).get("box_score_thresh", 0.6) + bboxes_color = (0, 0, 0) + for n in trange(clip.nframes): frame = clip.load_frame() if frame is None: continue try: ind = frames.index(n) + + # Draw bounding boxes of required and present + if plot_bboxes and "bboxes" in data[frame_names[ind]]: + bboxes = data[frame_names[ind]]["bboxes"] + bbox_scores = data[frame_names[ind]]["bbox_scores"] + n_bboxes = bboxes.shape[0] + for i in range(n_bboxes): + bbox = bboxes[i,:] + x, y = bbox[0], bbox[1] + x += x1 + y += y1 + w, h = bbox[2], bbox[3] + confidence = bbox_scores[i] + if confidence < bboxes_pcutoff: + continue + rect_coords = rectangle_perimeter(start=(y, x), extent=(h, w)) + + set_color( + frame, + rect_coords, + bboxes_color, + ) + + # Draw detected bodyparts dets = Assembler._flatten_detections(data[frame_names[ind]]) for det in dets: if det.label not in bpts or det.confidence < pcutoff: From ec7d64303d29e79ed2fbd419922a1914cd13a61c Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Tue, 19 Nov 2024 16:28:21 +0100 Subject: [PATCH 60/88] Plot bounding boxes in create_labeled_video --- deeplabcut/modelzoo/video_inference.py | 8 + .../modelzoo/inference.py | 13 +- deeplabcut/utils/auxiliaryfunctions.py | 87 +++++++-- deeplabcut/utils/make_labeled_video.py | 184 ++++++++++++++---- 4 files changed, 244 insertions(+), 48 deletions(-) diff --git a/deeplabcut/modelzoo/video_inference.py b/deeplabcut/modelzoo/video_inference.py index da5efe5c84..64c32caf4a 100644 --- a/deeplabcut/modelzoo/video_inference.py +++ b/deeplabcut/modelzoo/video_inference.py @@ -59,6 +59,7 @@ def video_inference_superanimal( customized_pose_checkpoint: Optional[str] = None, customized_detector_checkpoint: Optional[str] = None, customized_model_config: Optional[str] = None, + plot_bboxes: bool = True, ): """ This function performs inference on videos using a pretrained SuperAnimal model. @@ -150,6 +151,9 @@ def video_inference_superanimal( customized_model_config (str): Used for loading customized model config. Only supported in Pytorch + plot_bboxes (bool): + If using Top-Down approach, whether to plot the detector's bounding boxes. The default is True. + Raises: NotImplementedError: If the model is not found in the modelzoo. @@ -356,6 +360,8 @@ def video_inference_superanimal( cropping=cropping, dest_folder=dest_folder, output_suffix=output_suffix, + plot_bboxes=plot_bboxes, + bboxes_pcutoff=bbox_threshold, ) # we prepare the pseudo dataset in the same folder of the target video @@ -493,4 +499,6 @@ def video_inference_superanimal( cropping=cropping, dest_folder=dest_folder, output_suffix=output_suffix, + plot_bboxes=plot_bboxes, + bboxes_pcutoff=bbox_threshold, ) diff --git a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py index e4d42ecef1..8842f4ed5e 100644 --- a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py +++ b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py @@ -59,6 +59,8 @@ def _video_inference_superanimal( cropping: list[int] | None = None, dest_folder: Optional[str] = None, output_suffix: str = "", + plot_bboxes: bool = True, + bboxes_pcutoff: float = 0.9, ) -> dict: """ Perform inference on a video using a superanimal model from the model zoo specified by `superanimal_name`. @@ -131,8 +133,6 @@ def _video_inference_superanimal( output_json = output_h5.with_suffix(".json") if len(output_suffix) > 0: - # str(output_h5).replace(".h5", "_before_adapt.json") - # str(output_h5).replace(".h5", "_after_adapt.json") output_json = output_json.with_stem(output_h5.stem + output_suffix) video = VideoIterator(video_path, cropping=cropping) @@ -143,6 +143,12 @@ def _video_inference_superanimal( detector_runner=detector_runner, ) + bbox_keys_in_predictions = {"bboxes", "bbox_scores"} + bboxes_list = [{key: value + for key, value in p.items() + if key in bbox_keys_in_predictions} + for i, p in enumerate(predictions)] + bbox = cropping if cropping is None: vid_w, vid_h = video.dimensions @@ -176,6 +182,9 @@ def _video_inference_superanimal( bbox=bbox, cmap=colormap, output_path=str(output_video), + plot_bboxes=plot_bboxes, + bboxes_list=bboxes_list, + bboxes_pcutoff=bboxes_pcutoff, ) print(f"Video with predictions was saved as {output_path}") diff --git a/deeplabcut/utils/auxiliaryfunctions.py b/deeplabcut/utils/auxiliaryfunctions.py index 914c440ece..84da4ca76e 100644 --- a/deeplabcut/utils/auxiliaryfunctions.py +++ b/deeplabcut/utils/auxiliaryfunctions.py @@ -506,6 +506,54 @@ def grab_files_in_folder(folder, ext="", relative=True): yield file if relative else os.path.join(folder, file) +def filter_files_by_patterns( + folder: str | Path, + start_patterns: set[str] | None = None, + contain_patterns: set[str] | None = None, + end_patterns: set[str] | None = None +) -> List[Path]: + """ + Filters files in a folder based on start, contain, and end patterns. + + Args: + folder (str | Path): The folder to search for files. + + start_patterns (Set[str] | None): Patterns the filenames should start with. + If None or empty, this pattern is not taken into account. + + contain_patterns (set[str]): Patterns the filenames should contain. + If None or empty, this pattern is not taken into account. + + end_patterns (set[str]): Patterns the filenames should end with. + If None or empty, this pattern is not taken into account. + + Returns: + List[Path]: List of files that match the criteria. + """ + folder = Path(folder) # Ensure the folder is a Path object + if not folder.is_dir(): + raise ValueError(f"{folder} is not a valid directory.") + + # Filter files based on the given patterns + matching_files = [ + file + for file in folder.iterdir() + if file.is_file() + and ( + not start_patterns + or any(file.name.startswith(start) for start in start_patterns) + ) and ( + not contain_patterns + or any(contain in file.name for contain in contain_patterns) + ) and ( + not end_patterns + or any(file.name.endswith(end) for end in end_patterns) + ) + ] + + return matching_files + + def get_video_list(filename, videopath, videtype): """Get list of videos in a path (if filetype == all), otherwise just a specific file.""" videos = list(grab_files_in_folder(videopath, videtype)) @@ -864,30 +912,47 @@ def check_if_not_evaluated(folder, DLCscorer, DLCscorerlegacy, snapshot): return True, dataname, DLCscorer +def find_video_full_data(folder, videoname, scorer): + scorer_legacy = scorer.replace("DLC", "DeepCut") + full_files = filter_files_by_patterns( + folder=folder, + start_patterns={videoname+scorer, videoname+scorer_legacy}, + contain_patterns={"full"}, + end_patterns={"pickle"}, + ) + if not full_files: + raise FileNotFoundError( + f"No full data found in {folder} " + f"for video {videoname} and scorer {scorer}." + ) + return full_files[0] + + def find_video_metadata(folder, videoname, scorer): """For backward compatibility, let us search the substring 'meta'""" scorer_legacy = scorer.replace("DLC", "DeepCut") - meta = [ - file - for file in grab_files_in_folder(folder, "pickle") - if "meta" in file - and ( - file.startswith(videoname + scorer) - or file.startswith(videoname + scorer_legacy) - ) - ] - if not len(meta): + meta_files = filter_files_by_patterns( + folder=folder, + start_patterns={videoname+scorer, videoname+scorer_legacy}, + contain_patterns={"meta"}, + end_patterns={"pickle"}, + ) + if not meta_files: raise FileNotFoundError( f"No metadata found in {folder} " f"for video {videoname} and scorer {scorer}." ) - return os.path.join(folder, meta[0]) + return meta_files[0] def load_video_metadata(folder, videoname, scorer): return read_pickle(find_video_metadata(folder, videoname, scorer)) +def load_video_full_data(folder, videoname, scorer): + return read_pickle(find_video_full_data(folder, videoname, scorer)) + + def find_analyzed_data(folder, videoname, scorer, filtered=False, track_method=""): """Find potential data files from the hints given to the function.""" scorer_legacy = scorer.replace("DLC", "DeepCut") diff --git a/deeplabcut/utils/make_labeled_video.py b/deeplabcut/utils/make_labeled_video.py index 4d20301b8f..7a5ac28ebb 100644 --- a/deeplabcut/utils/make_labeled_video.py +++ b/deeplabcut/utils/make_labeled_video.py @@ -38,6 +38,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd +from matplotlib import patches from matplotlib.animation import FFMpegWriter from matplotlib.collections import LineCollection from skimage.draw import disk, line_aa, set_color, rectangle_perimeter @@ -86,6 +87,9 @@ def CreateVideo( displaycropped, color_by, confidence_to_alpha=None, + plot_bboxes=True, + bboxes_list=None, + bboxes_pcutoff=0.6, ): """Creating individual frames with labeled body parts and making a video""" bpts = Dataframe.columns.get_level_values("bodyparts") @@ -152,12 +156,36 @@ def CreateVideo( C = colorclass.to_rgba(np.linspace(0, 1, nindividuals)) colors = (C[:, :3] * 255).astype(np.uint8) + bboxes_color = (0, 0, 0) + with np.errstate(invalid="ignore"): for index in trange(min(nframes, len(Dataframe))): image = clip.load_frame() if displaycropped: image = image[y1:y2, x1:x2] + # Draw bounding boxes if required and present + if plot_bboxes and bboxes_list: + bboxes = bboxes_list[index]["bboxes"] + bbox_scores = bboxes_list[index]["bbox_scores"] + n_bboxes = bboxes.shape[0] + for i in range(n_bboxes): + bbox = bboxes[i, :] + x, y = bbox[0], bbox[1] + x += x1 + y += y1 + w, h = bbox[2], bbox[3] + confidence = bbox_scores[i] + if confidence < bboxes_pcutoff: + continue + rect_coords = rectangle_perimeter(start=(y, x), extent=(h, w)) + + set_color( + image, + rect_coords, + bboxes_color, + ) + # Draw the skeleton for specific bodyparts to be connected as # specified in the config file if draw_skeleton: @@ -225,10 +253,11 @@ def CreateVideoSlow( draw_skeleton, displaycropped, color_by, + plot_bboxes=True, + bboxes_list=None, + bboxes_pcutoff=0.6, ): """Creating individual frames with labeled body parts and making a video""" - # scorer=np.unique(Dataframe.columns.get_level_values(0))[0] - # bodyparts2plot = list(np.unique(Dataframe.columns.get_level_values(1))) if displaycropped: ny, nx = y2 - y1, x2 - x1 @@ -285,6 +314,8 @@ def CreateVideoSlow( else: colors = visualization.get_cmap(nbodyparts, name=colormap) + bounding_boxes_color = "k" + nframes_digits = int(np.ceil(np.log10(nframes))) if nframes_digits > 9: raise Exception( @@ -313,6 +344,28 @@ def CreateVideoSlow( image = image[y1:y2, x1:x2] ax.imshow(image) + # Draw bounding boxes of required and present + if plot_bboxes and bboxes_list: + bboxes = bboxes_list[index]["bboxes"] + bbox_scores = bboxes_list[index]["bbox_scores"] + n_bboxes = bboxes.shape[0] + for i in range(n_bboxes): + bbox = bboxes[i, :] + bbox_origin = (bbox[0], bbox[1]) + (bbox_width, bbox_height) = (bbox[2], bbox[3]) + bbox_confidence = bbox_scores[i] + if bbox_confidence < bboxes_pcutoff: + continue + rectangle = patches.Rectangle( + bbox_origin, + bbox_width, + bbox_height, + linewidth = 1, + edgecolor = bounding_boxes_color, + facecolor = 'none') + ax.add_patch(rectangle) + + # Draw skeleton if draw_skeleton: for bpt1, bpt2 in bpts2connect: if np.all(df_likelihood[[bpt1, bpt2], index] > pcutoff): @@ -323,6 +376,7 @@ def CreateVideoSlow( alpha=alphavalue, ) + # Draw bodyparts for ind, num_bp, num_ind in bpts2color: if df_likelihood[ind, index] > pcutoff: if color_by == "bodypart": @@ -361,37 +415,39 @@ def CreateVideoSlow( def create_labeled_video( - config, - videos, - videotype="", - shuffle=1, - trainingsetindex=0, - filtered=False, - fastmode=True, - save_frames=False, - keypoints_only=False, - Frames2plot=None, - displayedbodyparts="all", - displayedindividuals="all", - codec="mp4v", - outputframerate=None, - destfolder=None, - draw_skeleton=False, - trailpoints=0, - displaycropped=False, - color_by="bodypart", - modelprefix="", - init_weights="", - track_method="", - superanimal_name="", - pcutoff=None, - skeleton=[], - skeleton_color="white", - dotsize=8, - colormap="rainbow", - alphavalue=0.5, - overwrite=False, + config: str, + videos: list[str], + videotype: str = "", + shuffle: int = 1, + trainingsetindex: int = 0, + filtered: bool = False, + fastmode: bool = True, + save_frames: bool = False, + keypoints_only: bool = False, + Frames2plot: list[int] | None = None, + displayedbodyparts: list[str] | str = "all", + displayedindividuals: list[str] | str = "all", + codec: str = "mp4v", + outputframerate : int | None = None, + destfolder: Path | str | None = None, + draw_skeleton: bool = False, + trailpoints: int = 0, + displaycropped: bool = False, + color_by: str = "bodypart", + modelprefix: str = "", + init_weights: str = "", + track_method: str = "", + superanimal_name: str = "", + pcutoff: float | None = None, + skeleton: list = [], + skeleton_color: str = "white", + dotsize: int = 8, + colormap: str = "rainbow", + alphavalue: float = 0.5, + overwrite: bool = False, confidence_to_alpha: Union[bool, Callable[[float], float]] = False, + plot_bboxes: bool = True, + bboxes_pcutoff: float | None = None, ): """Labels the bodyparts in a video. @@ -468,7 +524,7 @@ def create_labeled_video( mode with saving frames.) If ``None``, which results in the original video rate. - destfolder: string or None, optional, default=None + destfolder: Path, string or None, optional, default=None Specifies the destination folder that was used for storing analysis data. If ``None``, the path of the video file is used. @@ -503,9 +559,25 @@ def create_labeled_video( For multiple animals, must be either 'box', 'skeleton', or 'ellipse' and will be taken from the config.yaml file if none is given. - pcutoff: string, optional, default=None + superanimal_name: str, optional, default="" + Name of the superanimal model. + + pcutoff: float, optional, default=None Overrides the pcutoff set in the project configuration to plot the trajectories. + skeleton: list, optional, default=[], + + skeleton_color: string, optional, default="white", + Color for the skeleton + + dotsize, int, optional, default=8, + Size of label dots tu use + + colormap: str, optional, default="rainbow", + Colormap to use for the labels + + alphavalue: float, optional, default=0.5, + overwrite: bool, optional, default=False If ``True`` overwrites existing labeled videos. @@ -515,6 +587,12 @@ def create_labeled_video( keypoint will be set as a function of its score: alpha = f(score). The default function used when True is f(x) = max(0, (x - pcutoff)/(1 - pcutoff)). + plot_bboxes: bool, optional, default=True + If using Pytorch and in Top-Down mode, setting this to true will also plot the bounding boxes + + bboxes_pcutoff, float, optional, default=None: + If plotting bounding boxes, this overrides the bboxes_pcutoff set in the model configuration. + Returns ------- results : list[bool] @@ -567,6 +645,8 @@ def create_labeled_video( if config == "": if pcutoff is None: pcutoff = 0.6 + if bboxes_pcutoff is None: + bboxes_pcutoff = 0.6 individuals = [""] uniquebodyparts = [] @@ -605,6 +685,11 @@ def create_labeled_video( superanimal_name = model_config["train_settings"]["weight_init"][ "dataset" ] + if bboxes_pcutoff is None: + bboxes_pcutoff = model_config.get("detector", {}).get("model", {}).get("box_score_thresh", 0.6) + else: + if bboxes_pcutoff is None: + bboxes_pcutoff = 0.6 if init_weights == "": DLCscorer, DLCscorerlegacy = auxiliaryfunctions.get_scorer_name( @@ -702,6 +787,8 @@ def create_labeled_video( init_weights=init_weights, pcutoff=pcutoff, confidence_to_alpha=confidence_to_alpha, + plot_bboxes=plot_bboxes, + bboxes_pcutoff=bboxes_pcutoff, ) if get_start_method() == "fork": @@ -743,6 +830,8 @@ def proc_video( init_weights="", pcutoff: float | None = None, confidence_to_alpha: Optional[Callable[[float], float]] = None, + plot_bboxes: bool = True, + bboxes_pcutoff: float = 0.6, ): """Helper function for create_videos @@ -755,7 +844,7 @@ def proc_video( result : bool ``True`` if a video is successfully created. """ - videofolder = Path(video).parents[0] + videofolder = Path(video).parent if destfolder is None: destfolder = videofolder # where your folder with videos is. @@ -790,6 +879,9 @@ def proc_video( df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data( destfolder, vname, DLCscorer, filtered, track_method ) + full_data = auxiliaryfunctions.load_video_full_data( + destfolder, vname, DLCscorer + ) metadata = auxiliaryfunctions.load_video_metadata( destfolder, vname, DLCscorer ) @@ -815,6 +907,15 @@ def proc_video( if bp in bodyparts ] + frames_dict = { + int(key.replace("frame", "")): value + for key, value in full_data.items() + if key.startswith("frame") and key[5:].isdigit() + } + bboxes_list = None + if "bboxes" in frames_dict.get(min(frames_dict.keys()), {}): + bboxes_list = [frames_dict[key] for key in sorted(frames_dict.keys())] + if keypoints_only: # Mask rather than drop unwanted bodyparts to ensure consistent coloring mask = df.columns.get_level_values("bodyparts").isin(bodyparts) @@ -866,6 +967,9 @@ def proc_video( draw_skeleton, displaycropped, color_by, + plot_bboxes=plot_bboxes, + bboxes_list=bboxes_list, + bboxes_pcutoff=bboxes_pcutoff, ) clip.close() else: @@ -887,7 +991,11 @@ def proc_video( fps=outputframerate, display_cropped=displaycropped, confidence_to_alpha=confidence_to_alpha, + plot_bboxes=plot_bboxes, + bboxes_list=bboxes_list, + bboxes_pcutoff=bboxes_pcutoff, ) + return True except FileNotFoundError as e: @@ -913,6 +1021,9 @@ def _create_labeled_video( fps=None, output_path="", confidence_to_alpha=None, + plot_bboxes=True, + bboxes_list=None, + bboxes_pcutoff=0.6, ): if color_by not in ("bodypart", "individual"): raise ValueError("`color_by` should be either 'bodypart' or 'individual'.") @@ -967,6 +1078,9 @@ def _create_labeled_video( display_cropped, color_by, confidence_to_alpha=confidence_to_alpha, + plot_bboxes=plot_bboxes, + bboxes_list=bboxes_list, + bboxes_pcutoff=bboxes_pcutoff, ) From e5372e6ee4fbe7f63ce0654bbb772c472ab07fd7 Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Fri, 22 Nov 2024 19:45:58 +0100 Subject: [PATCH 61/88] black --- .../apis/analyze_videos.py | 22 ++++++------ .../pose_estimation_pytorch/apis/evaluate.py | 8 +++-- .../pose_estimation_pytorch/apis/utils.py | 8 +++-- .../modelzoo/inference.py | 8 ++--- deeplabcut/utils/auxiliaryfunctions.py | 30 +++++++++------- deeplabcut/utils/make_labeled_video.py | 35 +++++++++++++------ deeplabcut/utils/visualization.py | 12 +++---- 7 files changed, 75 insertions(+), 48 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py index 6201abc2c9..e7ad68621f 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py +++ b/deeplabcut/pose_estimation_pytorch/apis/analyze_videos.py @@ -45,7 +45,10 @@ class VideoIterator(VideoReader): """A class to iterate over videos, with possible added context""" def __init__( - self, video_path: str | Path, context: list[dict[str, Any]] | None = None, cropping: list[int] | None = None + self, + video_path: str | Path, + context: list[dict[str, Any]] | None = None, + cropping: list[int] | None = None, ) -> None: super().__init__(str(video_path)) self._context = context @@ -288,7 +291,10 @@ def analyze_videos( pose_cfg = auxiliaryfunctions.read_plainconfig(pose_cfg_path) snapshot_index, detector_snapshot_index = parse_snapshot_index_for_analysis( - cfg, model_cfg, snapshot_index, detector_snapshot_index, + cfg, + model_cfg, + snapshot_index, + detector_snapshot_index, ) if cropping is None and cfg.get("cropping", False): @@ -476,14 +482,10 @@ def create_df_from_prediction( output_prefix: str | Path, save_as_csv: bool = False, ) -> pd.DataFrame: - pred_bodyparts = np.stack( - [p["bodyparts"][..., :3] for p in predictions] - ) + pred_bodyparts = np.stack([p["bodyparts"][..., :3] for p in predictions]) pred_unique_bodyparts = None if len(predictions) > 0 and "unique_bodyparts" in predictions[0]: - pred_unique_bodyparts = np.stack( - [p["unique_bodyparts"] for p in predictions] - ) + pred_unique_bodyparts = np.stack([p["unique_bodyparts"] for p in predictions]) output_h5 = Path(output_path) / f"{output_prefix}.h5" output_pkl = Path(output_path) / f"{output_prefix}_full.pickle" @@ -510,7 +512,7 @@ def create_df_from_prediction( index=range(len(pred_bodyparts)), ) if pred_unique_bodyparts is not None: - unique_columns = [dlc_scorer], ['single'], unique_bodyparts, coords + unique_columns = [dlc_scorer], ["single"], unique_bodyparts, coords df_u = pd.DataFrame( pred_unique_bodyparts.reshape((len(pred_unique_bodyparts), -1)), columns=pd.MultiIndex.from_product(unique_columns, names=cols_names), @@ -690,7 +692,7 @@ def _generate_output_data( } if "bboxes" in frame_predictions: - output[key]["bboxes"] = frame_predictions["bboxes"] + output[key]["bboxes"] = frame_predictions["bboxes"] output[key]["bbox_scores"] = frame_predictions["bbox_scores"] if "identity_scores" in frame_predictions: diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index bdcf277818..05dc97ad6e 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -494,7 +494,11 @@ def evaluate_snapshot( df_ground_truth = ensure_multianimal_df_format(loader.df) - bboxes_cutoff = loader.model_cfg.get("detector", {}).get("model", {}).get("box_score_thresh", 0.6) + bboxes_cutoff = ( + loader.model_cfg.get("detector", {}) + .get("model", {}) + .get("box_score_thresh", 0.6) + ) for mode in ["train", "test"]: df_combined = predictions[mode].merge( @@ -517,7 +521,7 @@ def evaluate_snapshot( alpha_value=cfg["alphavalue"], p_cutoff=cfg["pcutoff"], bounding_boxes=bboxes_split, - bboxes_cutoff=bboxes_cutoff + bboxes_cutoff=bboxes_cutoff, ) return df_predictions diff --git a/deeplabcut/pose_estimation_pytorch/apis/utils.py b/deeplabcut/pose_estimation_pytorch/apis/utils.py index ff3203e2e1..eaaec0fbc6 100644 --- a/deeplabcut/pose_estimation_pytorch/apis/utils.py +++ b/deeplabcut/pose_estimation_pytorch/apis/utils.py @@ -339,7 +339,9 @@ def _image_names_to_df_index( """ if image_name_to_index is not None: - return pd.MultiIndex.from_tuples([image_name_to_index(image_name) for image_name in image_names]) + return pd.MultiIndex.from_tuples( + [image_name_to_index(image_name) for image_name in image_names] + ) else: return image_names @@ -409,7 +411,9 @@ def build_bboxes_dict_for_dataframe( for image_name, image_predictions in predictions.items(): image_names.append(image_name) if "bboxes" in image_predictions: - bboxes_data.append((image_predictions["bboxes"], image_predictions["bbox_scores"])) + bboxes_data.append( + (image_predictions["bboxes"], image_predictions["bbox_scores"]) + ) index = _image_names_to_df_index(image_names, image_name_to_index) diff --git a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py index 8842f4ed5e..fbd61a1b41 100644 --- a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py +++ b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py @@ -144,10 +144,10 @@ def _video_inference_superanimal( ) bbox_keys_in_predictions = {"bboxes", "bbox_scores"} - bboxes_list = [{key: value - for key, value in p.items() - if key in bbox_keys_in_predictions} - for i, p in enumerate(predictions)] + bboxes_list = [ + {key: value for key, value in p.items() if key in bbox_keys_in_predictions} + for i, p in enumerate(predictions) + ] bbox = cropping if cropping is None: diff --git a/deeplabcut/utils/auxiliaryfunctions.py b/deeplabcut/utils/auxiliaryfunctions.py index 84da4ca76e..817211b53d 100644 --- a/deeplabcut/utils/auxiliaryfunctions.py +++ b/deeplabcut/utils/auxiliaryfunctions.py @@ -319,7 +319,7 @@ def get_bodyparts(cfg: dict) -> typing.List[str]: return cfg["bodyparts"] -def get_unique_bodyparts(cfg : dict) -> typing.List[str]: +def get_unique_bodyparts(cfg: dict) -> typing.List[str]: """ Args: cfg: a project configuration file @@ -507,10 +507,10 @@ def grab_files_in_folder(folder, ext="", relative=True): def filter_files_by_patterns( - folder: str | Path, - start_patterns: set[str] | None = None, - contain_patterns: set[str] | None = None, - end_patterns: set[str] | None = None + folder: str | Path, + start_patterns: set[str] | None = None, + contain_patterns: set[str] | None = None, + end_patterns: set[str] | None = None, ) -> List[Path]: """ Filters files in a folder based on start, contain, and end patterns. @@ -542,13 +542,12 @@ def filter_files_by_patterns( and ( not start_patterns or any(file.name.startswith(start) for start in start_patterns) - ) and ( + ) + and ( not contain_patterns or any(contain in file.name for contain in contain_patterns) - ) and ( - not end_patterns - or any(file.name.endswith(end) for end in end_patterns) ) + and (not end_patterns or any(file.name.endswith(end) for end in end_patterns)) ] return matching_files @@ -659,6 +658,7 @@ def get_evaluation_folder( """ if engine is None: from deeplabcut.generate_training_dataset.metadata import get_shuffle_engine + engine = get_shuffle_engine( cfg=cfg, trainingsetindex=cfg["TrainingFraction"].index(trainFraction), @@ -760,6 +760,7 @@ def get_scorer_name( """ if engine is None: from deeplabcut.generate_training_dataset.metadata import get_shuffle_engine + engine = get_shuffle_engine( cfg=cfg, trainingsetindex=cfg["TrainingFraction"].index(trainFraction), @@ -769,6 +770,7 @@ def get_scorer_name( if engine == Engine.PYTORCH: from deeplabcut.pose_estimation_pytorch.apis.utils import get_scorer_name + snapshot_index = None if isinstance(trainingsiterations, int): snapshot_index = trainingsiterations @@ -801,7 +803,11 @@ def get_scorer_name( dlc_cfg = read_plainconfig( os.path.join( cfg["project_path"], - str(get_model_folder(trainFraction, shuffle, cfg, engine=engine, modelprefix=modelprefix)), + str( + get_model_folder( + trainFraction, shuffle, cfg, engine=engine, modelprefix=modelprefix + ) + ), "train", engine.pose_cfg_name, ) @@ -916,7 +922,7 @@ def find_video_full_data(folder, videoname, scorer): scorer_legacy = scorer.replace("DLC", "DeepCut") full_files = filter_files_by_patterns( folder=folder, - start_patterns={videoname+scorer, videoname+scorer_legacy}, + start_patterns={videoname + scorer, videoname + scorer_legacy}, contain_patterns={"full"}, end_patterns={"pickle"}, ) @@ -933,7 +939,7 @@ def find_video_metadata(folder, videoname, scorer): scorer_legacy = scorer.replace("DLC", "DeepCut") meta_files = filter_files_by_patterns( folder=folder, - start_patterns={videoname+scorer, videoname+scorer_legacy}, + start_patterns={videoname + scorer, videoname + scorer_legacy}, contain_patterns={"meta"}, end_patterns={"pickle"}, ) diff --git a/deeplabcut/utils/make_labeled_video.py b/deeplabcut/utils/make_labeled_video.py index 7a5ac28ebb..9a209e37e6 100644 --- a/deeplabcut/utils/make_labeled_video.py +++ b/deeplabcut/utils/make_labeled_video.py @@ -166,7 +166,7 @@ def CreateVideo( # Draw bounding boxes if required and present if plot_bboxes and bboxes_list: - bboxes = bboxes_list[index]["bboxes"] + bboxes = bboxes_list[index]["bboxes"] bbox_scores = bboxes_list[index]["bbox_scores"] n_bboxes = bboxes.shape[0] for i in range(n_bboxes): @@ -346,7 +346,7 @@ def CreateVideoSlow( # Draw bounding boxes of required and present if plot_bboxes and bboxes_list: - bboxes = bboxes_list[index]["bboxes"] + bboxes = bboxes_list[index]["bboxes"] bbox_scores = bboxes_list[index]["bbox_scores"] n_bboxes = bboxes.shape[0] for i in range(n_bboxes): @@ -360,9 +360,10 @@ def CreateVideoSlow( bbox_origin, bbox_width, bbox_height, - linewidth = 1, - edgecolor = bounding_boxes_color, - facecolor = 'none') + linewidth=1, + edgecolor=bounding_boxes_color, + facecolor="none", + ) ax.add_patch(rectangle) # Draw skeleton @@ -428,7 +429,7 @@ def create_labeled_video( displayedbodyparts: list[str] | str = "all", displayedindividuals: list[str] | str = "all", codec: str = "mp4v", - outputframerate : int | None = None, + outputframerate: int | None = None, destfolder: Path | str | None = None, draw_skeleton: bool = False, trailpoints: int = 0, @@ -686,7 +687,11 @@ def create_labeled_video( "dataset" ] if bboxes_pcutoff is None: - bboxes_pcutoff = model_config.get("detector", {}).get("model", {}).get("box_score_thresh", 0.6) + bboxes_pcutoff = ( + model_config.get("detector", {}) + .get("model", {}) + .get("box_score_thresh", 0.6) + ) else: if bboxes_pcutoff is None: bboxes_pcutoff = 0.6 @@ -1175,7 +1180,7 @@ def create_video_with_all_detections( destfolder=None, modelprefix="", confidence_to_alpha: Union[bool, Callable[[float], float]] = False, - plot_bboxes: bool = True + plot_bboxes: bool = True, ): """ Create a video labeled with all the detections stored in a '*_full.pickle' file. @@ -1290,7 +1295,13 @@ def create_video_with_all_detections( clip = vp(fname=video, sname=outputname, codec="mp4v") ny, nx = clip.height(), clip.width() - bboxes_pcutoff = metadata.get("data", {}).get("pytorch-config", {}).get("detector", {}).get("model", {}).get("box_score_thresh", 0.6) + bboxes_pcutoff = ( + metadata.get("data", {}) + .get("pytorch-config", {}) + .get("detector", {}) + .get("model", {}) + .get("box_score_thresh", 0.6) + ) bboxes_color = (0, 0, 0) for n in trange(clip.nframes): @@ -1306,7 +1317,7 @@ def create_video_with_all_detections( bbox_scores = data[frame_names[ind]]["bbox_scores"] n_bboxes = bboxes.shape[0] for i in range(n_bboxes): - bbox = bboxes[i,:] + bbox = bboxes[i, :] x, y = bbox[0], bbox[1] x += x1 y += y1 @@ -1314,7 +1325,9 @@ def create_video_with_all_detections( confidence = bbox_scores[i] if confidence < bboxes_pcutoff: continue - rect_coords = rectangle_perimeter(start=(y, x), extent=(h, w)) + rect_coords = rectangle_perimeter( + start=(y, x), extent=(h, w) + ) set_color( frame, diff --git a/deeplabcut/utils/visualization.py b/deeplabcut/utils/visualization.py index e7c95c9a3e..b56dc033f8 100644 --- a/deeplabcut/utils/visualization.py +++ b/deeplabcut/utils/visualization.py @@ -33,10 +33,7 @@ from deeplabcut.utils import auxiliaryfunctions, auxfun_videos -def get_cmap( - n: int, - name: str = "hsv" -) -> Colormap: +def get_cmap(n: int, name: str = "hsv") -> Colormap: """ Args: n: number of distinct colors @@ -128,7 +125,7 @@ def make_multianimal_labeled_image( labels: list = ["+", ".", "x"], ax: plt.Axes | None = None, bounding_boxes: tuple[np.ndarray, np.ndarray] | None = None, - bounding_boxes_color='k', + bounding_boxes_color="k", bboxes_cutoff: float = 0.6, ) -> plt.Axes: """ @@ -168,8 +165,9 @@ def make_multianimal_labeled_image( bbox_height, linewidth=1, edgecolor=bounding_boxes_color, - facecolor='none', - linestyle = '--' if bbox_score < bboxes_cutoff else '-') + facecolor="none", + linestyle="--" if bbox_score < bboxes_cutoff else "-", + ) ax.add_patch(rectangle) for n, data in enumerate(zip(coords_truth, coords_pred, probs_pred)): From 4017639bf40d178d6d140860739a560dc7ce9047 Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Thu, 5 Dec 2024 15:52:24 +0100 Subject: [PATCH 62/88] rename method --- deeplabcut/pose_estimation_pytorch/modelzoo/inference.py | 4 ++-- deeplabcut/utils/make_labeled_video.py | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py index fbd61a1b41..361e1d5a6e 100644 --- a/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py +++ b/deeplabcut/pose_estimation_pytorch/modelzoo/inference.py @@ -26,7 +26,7 @@ raise_warning_if_called_directly, ) from deeplabcut.pose_estimation_pytorch.task import Task -from deeplabcut.utils.make_labeled_video import _create_labeled_video +from deeplabcut.utils.make_labeled_video import create_video class NumpyEncoder(json.JSONEncoder): @@ -174,7 +174,7 @@ def _video_inference_superanimal( superanimal_colormaps = get_superanimal_colormaps() colormap = superanimal_colormaps[superanimal_name] - _create_labeled_video( + create_video( video_path, output_h5, pcutoff=pcutoff, diff --git a/deeplabcut/utils/make_labeled_video.py b/deeplabcut/utils/make_labeled_video.py index 9a209e37e6..8fde4e193d 100644 --- a/deeplabcut/utils/make_labeled_video.py +++ b/deeplabcut/utils/make_labeled_video.py @@ -978,7 +978,7 @@ def proc_video( ) clip.close() else: - _create_labeled_video( + create_video( video, filepath, keypoints2show=labeled_bpts, @@ -1008,7 +1008,7 @@ def proc_video( return False -def _create_labeled_video( +def create_video( video, h5file, keypoints2show="all", @@ -1088,6 +1088,9 @@ def _create_labeled_video( bboxes_pcutoff=bboxes_pcutoff, ) +# for backwards compatibility +_create_labeled_video = create_video + def create_video_with_keypoints_only( df, From 4e9f48a1b445656ba0350f06ef51ee7cb716f291 Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Thu, 5 Dec 2024 15:53:22 +0100 Subject: [PATCH 63/88] add param description --- deeplabcut/utils/visualization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeplabcut/utils/visualization.py b/deeplabcut/utils/visualization.py index b56dc033f8..299e10ffa0 100644 --- a/deeplabcut/utils/visualization.py +++ b/deeplabcut/utils/visualization.py @@ -138,7 +138,7 @@ def make_multianimal_labeled_image( probs_pred: prediction probabilities colors: colors for poses dotsize: size of dot - alphavalue: + alphavalue: transparency for the keypoints pcutoff: cut-off confidence value labels: labels to use for ground truth, reliable predictions, and not reliable predictions (confidence below cut-off value) ax: matplotlib plot's axes object From 3eacc3c337529ed2c291985f79f573a87392c2f5 Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Thu, 5 Dec 2024 15:53:39 +0100 Subject: [PATCH 64/88] fix param default value --- deeplabcut/utils/visualization.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/deeplabcut/utils/visualization.py b/deeplabcut/utils/visualization.py index 299e10ffa0..11b123c623 100644 --- a/deeplabcut/utils/visualization.py +++ b/deeplabcut/utils/visualization.py @@ -444,7 +444,7 @@ def plot_evaluation_results( dot_size: int = 12, alpha_value: float = 0.7, p_cutoff: float = 0.6, - bounding_boxes: dict = {}, + bounding_boxes: dict | None = None, bboxes_cutoff: float = 0.6, ) -> None: """ @@ -467,9 +467,14 @@ def plot_evaluation_results( dot_size: the dot size to use for keypoints alpha_value: the alpha value to use for keypoints p_cutoff: the p-cutoff for "confident" keypoints - bounding_boxes: dictionary with df_combined rows as keys and bounding boxes (np array for coordinates and np array for confidence) + bounding_boxes: dictionary with df_combined rows as keys and bounding boxes + (np array for coordinates and np array for confidence). + None corresponds to no bounding boxes. bboxes_cutoff: bounding boxes confidence cutoff threshold. """ + if bounding_boxes is None: + bounding_boxes = {} + for row_index, row in df_combined.iterrows(): if isinstance(row_index, str): image_rel_path = Path(row_index) From 4dfdc10b30195a30f541de3a768ac40d31b8758a Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Mon, 9 Dec 2024 15:28:54 +0100 Subject: [PATCH 65/88] plot bounding boxes in visualize_predictions --- .../pose_estimation_pytorch/apis/evaluate.py | 18 ++++++++++++++++++ deeplabcut/utils/visualization.py | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 05dc97ad6e..a931fda676 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -183,6 +183,7 @@ def visualize_predictions( num_samples: int | None = None, random_select: bool = False, show_ground_truth: bool = True, + plot_bboxes: bool = True, ) -> None: """Visualize model predictions alongside ground truth keypoints. @@ -251,6 +252,13 @@ def visualize_predictions( visible_pred.append(visible_points) visible_pred = np.stack(visible_pred) # Shape: [N, num_visible_joints, 3] + if plot_bboxes: + bboxes = predictions[image_path].get("bboxes", None) + bbox_scores = predictions[image_path].get("bbox_scores", None) + bounding_boxes = (bboxes, bbox_scores) if bbox_scores is not None and bbox_scores is not None else None + else: + bounding_boxes = None + # Generate and save visualization try: plot_gt_and_predictions( @@ -258,6 +266,7 @@ def visualize_predictions( output_dir=output_dir, gt_bodyparts=visible_gt, pred_bodyparts=visible_pred, + bounding_boxes=bounding_boxes, ) print(f"Successfully plotted predictions for {image_path}") except Exception as e: @@ -276,6 +285,9 @@ def plot_gt_and_predictions( dot_size: int = 12, alpha_value: float = 0.7, p_cutoff: float = 0.6, + bounding_boxes: tuple[np.ndarray, np.ndarray] | None = None, + bounding_boxes_color="k", + bboxes_pcutoff: float = 0.6, ): """Plot ground truth and predictions on an image. @@ -291,6 +303,9 @@ def plot_gt_and_predictions( dot_size: Size of the plotted points alpha_value: Transparency of the points p_cutoff: Confidence threshold for showing predictions + bounding_boxes: bounding boxes (top-left corner, size) and their respective confidence levels, + bounding_boxes_color: If bounding_boxes is not None, this is the color that will be used for plotting them + bboxes_cutoff: bounding boxes confidence cutoff threshold. """ # Ensure output directory exists output_dir = Path(output_dir) @@ -336,6 +351,9 @@ def plot_gt_and_predictions( alpha_value, p_cutoff, ax=ax, + bounding_boxes=bounding_boxes, + bounding_boxes_color=bounding_boxes_color, + bboxes_cutoff=bboxes_pcutoff, ) # Plot unique bodyparts if present diff --git a/deeplabcut/utils/visualization.py b/deeplabcut/utils/visualization.py index 11b123c623..386e6e42ca 100644 --- a/deeplabcut/utils/visualization.py +++ b/deeplabcut/utils/visualization.py @@ -142,7 +142,7 @@ def make_multianimal_labeled_image( pcutoff: cut-off confidence value labels: labels to use for ground truth, reliable predictions, and not reliable predictions (confidence below cut-off value) ax: matplotlib plot's axes object - bounding_boxes: bounding boxes (top-left corner, size) and their respective confice levels, + bounding_boxes: bounding boxes (top-left corner, size) and their respective confidence levels, bounding_boxes_color: If bounding_boxes is not None, this is the color that will be used for plotting them bboxes_cutoff: bounding boxes confidence cutoff threshold. From 320a03f0798ccb1a3984be5f4662ef82d99a1c16 Mon Sep 17 00:00:00 2001 From: maximpavliv Date: Mon, 9 Dec 2024 15:31:41 +0100 Subject: [PATCH 66/88] black --- deeplabcut/pose_estimation_pytorch/apis/evaluate.py | 6 +++++- deeplabcut/utils/make_labeled_video.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index a931fda676..4af0e95b8d 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -255,7 +255,11 @@ def visualize_predictions( if plot_bboxes: bboxes = predictions[image_path].get("bboxes", None) bbox_scores = predictions[image_path].get("bbox_scores", None) - bounding_boxes = (bboxes, bbox_scores) if bbox_scores is not None and bbox_scores is not None else None + bounding_boxes = ( + (bboxes, bbox_scores) + if bbox_scores is not None and bbox_scores is not None + else None + ) else: bounding_boxes = None diff --git a/deeplabcut/utils/make_labeled_video.py b/deeplabcut/utils/make_labeled_video.py index 8fde4e193d..616dd8a131 100644 --- a/deeplabcut/utils/make_labeled_video.py +++ b/deeplabcut/utils/make_labeled_video.py @@ -1088,6 +1088,7 @@ def create_video( bboxes_pcutoff=bboxes_pcutoff, ) + # for backwards compatibility _create_labeled_video = create_video From 3e1251b712fe4754e89c687ad7e34278347af2eb Mon Sep 17 00:00:00 2001 From: ti Date: Wed, 15 Jan 2025 15:25:15 +0100 Subject: [PATCH 67/88] add adaptive dot_size to visualize_predictions(*); dot_size is calculated from image_size --- .../pose_estimation_pytorch/apis/evaluate.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index 4af0e95b8d..f2a047551f 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -89,7 +89,7 @@ def predict( f"Missing context for some images: {len(context)} != {len(image_paths)}" ) images_with_context = list(zip(image_paths, context)) - + predictions = pose_runner.inference(images=tqdm(images_with_context)) return { image_path: image_predictions @@ -227,6 +227,15 @@ def visualize_predictions( # Process each selected image for image_path in image_paths: + # Read image to get dimensions + frame = auxfun_videos.imread(str(image_path), mode="skimage") + h, w = frame.shape[:2] + + # Calculate adaptive dot size based on image dimensions + # This creates dots that scale with image size while staying reasonable + dot_size = int(min(w, h) * 0.015) # 1.5% of smallest dimension + dot_size = max(6, min(dot_size, 25)) # Keep size between 6 and 25 pixels + # Get prediction and ground truth data pred_data = predictions[image_path] gt_keypoints = ground_truth[image_path] # Shape: [N, num_keypoints, 3] @@ -271,12 +280,12 @@ def visualize_predictions( gt_bodyparts=visible_gt, pred_bodyparts=visible_pred, bounding_boxes=bounding_boxes, + dot_size=dot_size, # Pass the adaptive dot size ) print(f"Successfully plotted predictions for {image_path}") except Exception as e: print(f"Error plotting predictions for {image_path}: {str(e)}") - - + def plot_gt_and_predictions( image_path: str | Path, output_dir: str | Path, From 241ea2998da296d09928945d0d0bfd77a220c2d7 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 6 Mar 2025 14:19:46 +0100 Subject: [PATCH 68/88] add pfm format plotting function: including skeleton, skeleton_name, keypoint_mask --- .../pose_estimation_pytorch/apis/evaluate.py | 513 +++++++++++++++++- 1 file changed, 509 insertions(+), 4 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py index f2a047551f..a7a6c600f4 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluate.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluate.py @@ -46,7 +46,9 @@ plot_evaluation_results, save_labeled_frame, ) - +from typing import Dict, List, Tuple, Optional, Union +import logging +import matplotlib.patches as patches def predict( pose_task: Task, @@ -89,8 +91,8 @@ def predict( f"Missing context for some images: {len(context)} != {len(image_paths)}" ) images_with_context = list(zip(image_paths, context)) - predictions = pose_runner.inference(images=tqdm(images_with_context)) + return { image_path: image_predictions for image_path, image_predictions in zip(image_paths, predictions) @@ -184,6 +186,7 @@ def visualize_predictions( random_select: bool = False, show_ground_truth: bool = True, plot_bboxes: bool = True, + skeleton: list | None = None, ) -> None: """Visualize model predictions alongside ground truth keypoints. @@ -210,6 +213,11 @@ def visualize_predictions( show_ground_truth: If True, displays ground truth poses alongside predictions. If False, only shows predictions but uses GT visibility mask + + plot_bboxes: Whether to plot bounding boxes if available + + skeleton: List of joint pairs defining the skeleton connections. + Each pair should be a tuple of indices corresponding to the joints to connect. """ # Setup output directory output_dir = Path(output_dir or "predictions_visualizations") @@ -280,7 +288,8 @@ def visualize_predictions( gt_bodyparts=visible_gt, pred_bodyparts=visible_pred, bounding_boxes=bounding_boxes, - dot_size=dot_size, # Pass the adaptive dot size + dot_size=dot_size, + skeleton=skeleton, ) print(f"Successfully plotted predictions for {image_path}") except Exception as e: @@ -301,6 +310,7 @@ def plot_gt_and_predictions( bounding_boxes: tuple[np.ndarray, np.ndarray] | None = None, bounding_boxes_color="k", bboxes_pcutoff: float = 0.6, + skeleton: list | None = None, ): """Plot ground truth and predictions on an image. @@ -319,6 +329,8 @@ def plot_gt_and_predictions( bounding_boxes: bounding boxes (top-left corner, size) and their respective confidence levels, bounding_boxes_color: If bounding_boxes is not None, this is the color that will be used for plotting them bboxes_cutoff: bounding boxes confidence cutoff threshold. + skeleton: List of joint pairs defining the skeleton connections. + Each pair should be a tuple of indices corresponding to the joints to connect. """ # Ensure output directory exists output_dir = Path(output_dir) @@ -352,7 +364,7 @@ def plot_gt_and_predictions( ground_truth = gt_bodyparts else: raise ValueError(f"Invalid mode: {mode}") - + # Plot regular bodyparts ax = make_multianimal_labeled_image( frame, @@ -367,6 +379,7 @@ def plot_gt_and_predictions( bounding_boxes=bounding_boxes, bounding_boxes_color=bounding_boxes_color, bboxes_cutoff=bboxes_pcutoff, + skeleton=skeleton, ) # Plot unique bodyparts if present @@ -401,6 +414,498 @@ def plot_gt_and_predictions( plt.close() +def visualize_predictions_PFM( + predictions: Dict[str, Dict], + ground_truth: Dict[str, np.ndarray], + output_dir: Optional[Union[str, Path]] = None, + num_samples: Optional[int] = None, + random_select: bool = False, + plot_bboxes: bool = True, + skeleton: Optional[List[Tuple[int, int]]] = None, + keypoint_vis_mask: Optional[List[int]] = None, + keypoint_names: Optional[List[str]] = None, + confidence_threshold: float = 0.6 +) -> None: + """Visualize model predictions alongside ground truth keypoints with additional PFM-specific configurations.""" + # Setup output directory and logging + output_dir = Path(output_dir or "predictions_visualizations") + output_dir.mkdir(exist_ok=True, parents=True) + + # Configure logging with a unique handler + log_file = output_dir / "visualization.log" + handler = logging.FileHandler(log_file) + handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + logger = logging.getLogger('PFM_visualization') + logger.setLevel(logging.INFO) + logger.addHandler(handler) + logger.info(f"Starting visualization process. Output directory: {output_dir}") + + # Select images to process efficiently + image_paths = list(predictions.keys()) + if num_samples and num_samples < len(image_paths): + if random_select: + image_paths = np.random.choice( + image_paths, num_samples, replace=False + ).tolist() + else: + image_paths = image_paths[:num_samples] + + # Process each selected image + for image_path in image_paths: + # Get prediction and ground truth data + pred_data = predictions[image_path] + gt_keypoints = ground_truth[image_path] # Shape: [N, num_keypoints, 3] + + # Process predicted keypoints + pred_keypoints = pred_data["bodyparts"] + + if plot_bboxes: + bboxes = predictions[image_path].get("bboxes", None) + bbox_scores = predictions[image_path].get("bbox_scores", None) + # this means the bboxes is the GT; so we should set the score as 1 + if bbox_scores is None: + bbox_scores = np.ones(len(bboxes)) + # print("bboxes:", bboxes) + # print("bbox_scores:", bbox_scores) + bounding_boxes = ( + (bboxes, bbox_scores) + if bbox_scores is not None and bbox_scores is not None + else None + ) + else: + bounding_boxes = None + + # print("bounding_boxes:", bounding_boxes) + + # Generate visualization + plot_gt_and_predictions_PFM( + image_path=image_path, + output_dir=output_dir, + gt_bodyparts=gt_keypoints, + pred_bodyparts=pred_keypoints, + bounding_boxes=bounding_boxes, + skeleton=skeleton, + keypoint_names=keypoint_names, + p_cutoff=confidence_threshold, + keypoint_vis_mask=keypoint_vis_mask, # Pass the mask to plotting function + ) + logger.info(f"Successfully visualized predictions for {image_path}") + + # Clean up logging handler + logger.removeHandler(handler) + handler.close() + +def plot_gt_and_predictions_PFM( + image_path: Union[str, Path], + output_dir: Union[str, Path], + gt_bodyparts: Optional[np.ndarray] = None, + pred_bodyparts: Optional[np.ndarray] = None, + mode: str = "bodypart", + colormap: str = "rainbow", + dot_size: int = 12, + alpha_value: float = 0.8, + p_cutoff: float = 0.6, + bounding_boxes: tuple[np.ndarray, np.ndarray] | None = None, + bounding_boxes_color="k", + bboxes_pcutoff: float = 0.6, + skeleton: Optional[List[Tuple[int, int]]] = None, + keypoint_names: Optional[List[str]] = None, + keypoint_vis_mask: Optional[List[int]] = None, + labels: List[str] = ["+", ".", "x"], +) -> None: + """Plot ground truth and predictions on an image. + + Args: + image_path: Path to the image file + output_dir: Directory to save the visualization + gt_bodyparts: Ground truth keypoints array [N, num_keypoints, 3] (x, y, vis_label) + pred_bodyparts: Predicted keypoints array [N, num_keypoints, 3] (x, y, confidence) + bounding_boxes: Tuple of (boxes, scores) for bounding box visualization + dot_size: Size of the keypoint markers + alpha_value: Transparency for points and lines + p_cutoff: Confidence threshold for predictions + mode: How to color the points ("bodypart" or "individual") + colormap: Matplotlib colormap name + bbox_color: Color for bounding boxes + skeleton: List of joint pairs for skeleton visualization + keypoint_names: List of keypoint names for labeling + keypoint_vis_mask: List of keypoint indices to show (default: all keypoints visible) + labels: Marker styles for [ground truth, reliable predictions, unreliable predictions] + """ + # Set default keypoint visibility mask if not provided + if pred_bodyparts is not None and keypoint_vis_mask is None: + keypoint_vis_mask = [1] * pred_bodyparts.shape[1] # All keypoints visible by default + + # Read image and calculate dot size + frame = auxfun_videos.imread(str(image_path), mode="skimage") + h, w = frame.shape[:2] + # Calculate adaptive dot size based on image dimensions + # Use a logarithmic scale to handle very large or small images better + diagonal = np.sqrt(w * w + h * h) # Image diagonal length + base_size = np.log10(diagonal) * 3 # Logarithmic scaling + # print("diagonal:", diagonal) + # Fine-tune the dot size + if diagonal > 1200: # High resolution + dot_size = base_size * 2.0 + elif diagonal < 800: # Low resolution + dot_size = base_size * 1.0 + else: # Medium resolution + dot_size = base_size + + # Ensure dot size stays within reasonable bounds + dot_size = int(max(4, min(dot_size, 15)))*0.8 # Tighter bounds for dots + + # filter out the individuals that without GT keypoints + if bounding_boxes is not None: + # filter out the individuals that without GT keypoints + valid_individuals = [] + for idx, bbox_score in enumerate(bounding_boxes[1]): + if bbox_score > bboxes_pcutoff: + valid_individuals.append(idx) + + # if gt_bodyparts is None: + # tmp_valid_bodyparts = pred_bodyparts + # else: + # tmp_valid_bodyparts = gt_bodyparts + + # if tmp_valid_bodyparts is not None: + # valid_individuals = [] + # for idx in range(tmp_valid_bodyparts.shape[0]): + # # Check if this individual has any valid keypoints + # # A keypoint is valid if its visibility (3rd value) is not -1 + # has_valid_keypoints = False + + # for kp_idx in range(tmp_valid_bodyparts.shape[1]): + # kp = tmp_valid_bodyparts[idx, kp_idx] + # # Check if keypoint is visible + # if kp[2] != -1: + # has_valid_keypoints = True + # break # We found at least one valid keypoint, no need to check more + + # # Include individual if they have at least one valid keypoint + # if has_valid_keypoints: + # valid_individuals.append(idx) + + # print(f"Found {len(valid_individuals)} valid individuals out of {gt_bodyparts.shape[0]}") + # Filter both ground truth and predictions + + # print(f"valid_individuals: {valid_individuals}") + if valid_individuals: + if gt_bodyparts is not None: + gt_bodyparts = gt_bodyparts[valid_individuals] + if pred_bodyparts is not None: + pred_bodyparts = pred_bodyparts[valid_individuals] + if bounding_boxes is not None: + bounding_boxes = ( + bounding_boxes[0][valid_individuals], + bounding_boxes[1][valid_individuals] + ) + + num_pred, num_keypoints = pred_bodyparts.shape[:2] + + # print("After filtering:") + # print("num_pred, num_keypoints:", num_pred, num_keypoints) + # if gt_bodyparts is not None: + # print("gt_bodyparts shape:", gt_bodyparts.shape) + + # Create figure with optimal settings + fig, ax = create_minimal_figure() + fig.set_size_inches(w/100, h/100) + ax.set_xlim(0, w) + ax.set_ylim(0, h) + ax.invert_yaxis() + ax.imshow(frame, "gray") + + # Set up colors based on mode + if mode == "bodypart": + num_colors = num_keypoints + # if pred_unique_bodyparts is not None: + # num_colors += pred_unique_bodyparts.shape[1] + colors = get_cmap(num_colors, name=colormap) + # print("colors:", colors) + # predictions = pred_bodyparts.swapaxes(0, 1) + # ground_truth = gt_bodyparts.swapaxes(0, 1) + elif mode == "individual": + colors = get_cmap(num_pred + 1, name=colormap) + # predictions = pred_bodyparts + # ground_truth = gt_bodyparts + else: + raise ValueError(f"Invalid mode: {mode}") + + # print("bounding_boxes:", bounding_boxes) + + # Draw bounding boxes if provided + if bounding_boxes is not None: + # print(f"bounding_boxes: {bounding_boxes}") + for bbox, bbox_score in zip(bounding_boxes[0], bounding_boxes[1]): + bbox_origin = (bbox[0], bbox[1]) + (bbox_width, bbox_height) = (bbox[2], bbox[3]) + rect = patches.Rectangle( + bbox_origin, + bbox_width, + bbox_height, + linewidth=2, + edgecolor=bounding_boxes_color, + facecolor='none', + linestyle="--" if bbox_score < bboxes_pcutoff else "-" + ) + ax.add_patch(rect) + + # Track existing text positions to avoid overlap + existing_text_positions = [] + scale_factor = min(w, h) / 1000 # Normalize scale factor based on image size + + + plot_individual = False + if plot_individual: + # Save individual plots for each animal + for idx_individual in range(num_pred): + # print("plot individual:", idx_individual) + # Create a new figure for each individual + fig_ind, ax_ind = create_minimal_figure() + fig_ind.set_size_inches(w/100, h/100) + ax_ind.set_xlim(0, w) + ax_ind.set_ylim(0, h) + ax_ind.invert_yaxis() + ax_ind.imshow(frame, "gray") + + # Draw bounding box for this individual if available + if bounding_boxes is not None: + bbox = bounding_boxes[0][idx_individual] + bbox_score = bounding_boxes[1][idx_individual] + bbox_origin = (bbox[0], bbox[1]) + (bbox_width, bbox_height) = (bbox[2], bbox[3]) + rect = patches.Rectangle( + bbox_origin, + bbox_width, + bbox_height, + linewidth=2, + edgecolor=bounding_boxes_color, + facecolor='none', + linestyle="--" if bbox_score < bboxes_pcutoff else "-" + ) + ax_ind.add_patch(rect) + + # Reset text positions for each individual + existing_text_positions = [] + + # Plot keypoints for this individual + for idx_keypoint in range(num_keypoints): + if keypoint_vis_mask[idx_keypoint]: + + keypoint_confidence = pred_bodyparts[idx_individual, idx_keypoint, 2] + # print("keypoint_confidence_individual:", keypoint_confidence) + if keypoint_confidence > p_cutoff: + x_kp = pred_bodyparts[idx_individual, idx_keypoint, 0] + y_kp = pred_bodyparts[idx_individual, idx_keypoint, 1] + + ax_ind.plot( + x_kp, + y_kp, + labels[1] if keypoint_confidence > p_cutoff else labels[2], + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size + ) + + if keypoint_names is not None: + # Calculate and adjust text position + x_text = x_kp - (10 * scale_factor) + y_text = y_kp - (15 * scale_factor) + x_text = min(max(0, x_text), w - 100) + y_text = min(max(0, y_text), h - 10) + + while any(abs(x_text - ex) < 50 * scale_factor and abs(y_text - ey) < 20 * scale_factor + for ex, ey in existing_text_positions): + y_text += 20 * scale_factor + if y_text > h - 10: + y_text = y_kp + x_text += 50 * scale_factor + + existing_text_positions.append((x_text, y_text)) + + ax_ind.text( + x_text, + y_text, + keypoint_names[idx_keypoint], + color=colors(idx_keypoint), + alpha=alpha_value, + fontsize=dot_size * 0.8 + ) + + # Plot ground truth for this individual + if gt_bodyparts is not None: + if gt_bodyparts[idx_individual, idx_keypoint, 2] != -1: + ax_ind.plot( + gt_bodyparts[idx_individual, idx_keypoint, 0], + gt_bodyparts[idx_individual, idx_keypoint, 1], + labels[0], + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size + ) + + # Save individual plot + if num_pred > 1: + # Add index for multi-animal images + output_path = Path(output_dir) / f"{Path(image_path).stem}_animal_{idx_individual}_predictions.png" + else: + # No index needed for single animal + output_path = Path(output_dir) / f"{Path(image_path).stem}_predictions.png" + + plt.savefig( + output_path, + bbox_inches='tight', + pad_inches=0, + transparent=False + ) + plt.close(fig_ind) + + # Original combined plot + for idx_individual in range(num_pred): + for idx_keypoint in range(num_keypoints): + if pred_bodyparts is not None and keypoint_vis_mask[idx_keypoint]: + # if the keypoint is allowed to be shown and the prediction is reliable + keypoint_confidence = pred_bodyparts[idx_individual, idx_keypoint, 2] + if keypoint_confidence > p_cutoff: + pred_label = labels[1] + else: + pred_label = labels[2] + if keypoint_confidence > p_cutoff: + x_kp = pred_bodyparts[idx_individual, idx_keypoint, 0] + y_kp = pred_bodyparts[idx_individual, idx_keypoint, 1] + + ax.plot( + x_kp, + y_kp, + pred_label, + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size + ) + + if keypoint_names is not None: + # Calculate initial text position + x_text = x_kp - (10 * scale_factor) + y_text = y_kp - (15 * scale_factor) + + # Ensure text stays within image bounds + x_text = min(max(0, x_text), w - 100) + y_text = min(max(0, y_text), h - 10) + + # Avoid overlapping with existing text + while any(abs(x_text - ex) < 50 * scale_factor and abs(y_text - ey) < 20 * scale_factor + for ex, ey in existing_text_positions): + y_text += 20 * scale_factor + if y_text > h - 10: # If we run out of vertical space + y_text = pred_bodyparts[idx_individual, idx_keypoint, 1] # Reset to original y + x_text += 50 * scale_factor # Move text horizontally instead + + # Record this position + existing_text_positions.append((x_text, y_text)) + + ax.text( + x_text, + y_text, + keypoint_names[idx_keypoint], + color=colors(idx_keypoint), + alpha=alpha_value, + fontsize=dot_size * 0.5 + ) + + # plot ground truth + if gt_bodyparts is not None: + if gt_bodyparts[idx_individual, idx_keypoint, 2] != -1: + ax.plot( + gt_bodyparts[idx_individual, idx_keypoint, 0], + gt_bodyparts[idx_individual, idx_keypoint, 1], + labels[0], + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size*0.5 + ) + if skeleton is not None: + # Draw all valid connections + # plot the skeleton is the skeleton is not None + connection_pairs = [] + for [idx1, idx2] in skeleton: + # idx1 = idx1 - 1 + # idx2 = idx2 - 1 + # Only add the connection if both keypoints are visible and have confidence above threshold + if (pred_bodyparts[idx_individual, idx1, 2] > p_cutoff and + pred_bodyparts[idx_individual, idx2, 2] > p_cutoff): + connection_pairs.append({ + 'start': (pred_bodyparts[idx_individual, idx1, 0], + pred_bodyparts[idx_individual, idx1, 1]), + 'end': (pred_bodyparts[idx_individual, idx2, 0], + pred_bodyparts[idx_individual, idx2, 1]) + }) + + # if center_hip (26) is below the p_cutoff, and root_tail (33) is above the p_cutoff, + # then we can use root_tail to replace center_hip (just for connection!), otherwise we use center_hip + # if idx1 == 26 and pred_bodyparts[idx_individual, 26, 2] < p_cutoff and pred_bodyparts[idx_individual, 33, 2] > p_cutoff: + # # Replace center_hip with root_tail for this connection + # if pred_bodyparts[idx_individual, idx2, 2] > p_cutoff: + # connection_pairs.append({ + # 'start': (pred_bodyparts[idx_individual, 33, 0], + # pred_bodyparts[idx_individual, 33, 1]), + # 'end': (pred_bodyparts[idx_individual, idx2, 0], + # pred_bodyparts[idx_individual, idx2, 1]) + # }) + # elif idx2 == 26 and pred_bodyparts[idx_individual, 26, 2] < p_cutoff and pred_bodyparts[idx_individual, 33, 2] > p_cutoff: + # # Handle case where center_hip is the end point + # if pred_bodyparts[idx_individual, idx1, 2] > p_cutoff: + # connection_pairs.append({ + # 'start': (pred_bodyparts[idx_individual, idx1, 0], + # pred_bodyparts[idx_individual, idx1, 1]), + # 'end': (pred_bodyparts[idx_individual, 33, 0], + # pred_bodyparts[idx_individual, 33, 1]) + # }) + + # if left hip (idx: 24) is below the p_cutoff and left knee (idx: 27) is above the p_cutoff, + # if center hip (idx: 26) is above the p_cutoff, then connect left knee to center hip, + # if center hip (idx: 26) is below the p_cutoff and root_tail (idx: 33) is above the p_cutoff, then we connect left knee to root_tail + if pred_bodyparts[idx_individual, 24, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 27, 2] > p_cutoff: + connection_pairs.append({ + 'start': (pred_bodyparts[idx_individual, 27, 0], + pred_bodyparts[idx_individual, 27, 1]), + 'end': (pred_bodyparts[idx_individual, 26, 0], + pred_bodyparts[idx_individual, 26, 1]) + }) + + # if right hip (idx: 25) is below the p_cutoff, and center hip (idx: 26) and right knee (idx: 28) are above the p_cutoff, + # then we can draw a line from the right knee (idx: 28) to the center hip (idx: 26) + if pred_bodyparts[idx_individual, 25, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 28, 2] > p_cutoff: + connection_pairs.append({ + 'start': (pred_bodyparts[idx_individual, 28, 0], + pred_bodyparts[idx_individual, 28, 1]), + 'end': (pred_bodyparts[idx_individual, 26, 0], + pred_bodyparts[idx_individual, 26, 1]) + }) + + for connection in connection_pairs: + ax.plot( + [connection['start'][0], connection['end'][0]], + [connection['start'][1], connection['end'][1]], + 'g', # black solid line + alpha=alpha_value * 0.8, # slightly more transparent than points + linewidth=dot_size * 0.1 # scale line width with dot size + ) + + # Save the figure + output_path = Path(output_dir) / f"{Path(image_path).stem}_predictions.png" + # save_labeled_frame(fig, str(image_path), str(output_dir), belongs_to_train=False) + plt.savefig( + output_path, + dpi=200, + bbox_inches='tight', + pad_inches=0, + transparent=False + ) + erase_artists(ax) + plt.close() + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, From 3dda8c04911a4258853de82a9a40cc6d51d15c22 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 6 Mar 2025 15:56:49 +0100 Subject: [PATCH 69/88] add pfm plotting: skeleton, skeleton_name, masking --- .../apis/evaluation.py | 493 ++++++++++++++++++ 1 file changed, 493 insertions(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index d3b15d1822..7010f664ae 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -456,6 +456,499 @@ def plot_gt_and_predictions( plt.close() + +def visualize_predictions_PFM( + predictions: Dict[str, Dict], + ground_truth: Dict[str, np.ndarray], + output_dir: Optional[Union[str, Path]] = None, + num_samples: Optional[int] = None, + random_select: bool = False, + plot_bboxes: bool = True, + skeleton: Optional[List[Tuple[int, int]]] = None, + keypoint_vis_mask: Optional[List[int]] = None, + keypoint_names: Optional[List[str]] = None, + confidence_threshold: float = 0.6 +) -> None: + """Visualize model predictions alongside ground truth keypoints with additional PFM-specific configurations.""" + # Setup output directory and logging + output_dir = Path(output_dir or "predictions_visualizations") + output_dir.mkdir(exist_ok=True, parents=True) + + # Configure logging with a unique handler + log_file = output_dir / "visualization.log" + handler = logging.FileHandler(log_file) + handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + logger = logging.getLogger('PFM_visualization') + logger.setLevel(logging.INFO) + logger.addHandler(handler) + logger.info(f"Starting visualization process. Output directory: {output_dir}") + + # Select images to process efficiently + image_paths = list(predictions.keys()) + if num_samples and num_samples < len(image_paths): + if random_select: + image_paths = np.random.choice( + image_paths, num_samples, replace=False + ).tolist() + else: + image_paths = image_paths[:num_samples] + + # Process each selected image + for image_path in image_paths: + # Get prediction and ground truth data + pred_data = predictions[image_path] + gt_keypoints = ground_truth[image_path] # Shape: [N, num_keypoints, 3] + + # Process predicted keypoints + pred_keypoints = pred_data["bodyparts"] + + if plot_bboxes: + bboxes = predictions[image_path].get("bboxes", None) + bbox_scores = predictions[image_path].get("bbox_scores", None) + # this means the bboxes is the GT; so we should set the score as 1 + if bbox_scores is None: + bbox_scores = np.ones(len(bboxes)) + # print("bboxes:", bboxes) + # print("bbox_scores:", bbox_scores) + bounding_boxes = ( + (bboxes, bbox_scores) + if bbox_scores is not None and bbox_scores is not None + else None + ) + else: + bounding_boxes = None + + # print("bounding_boxes:", bounding_boxes) + + # Generate visualization + plot_gt_and_predictions_PFM( + image_path=image_path, + output_dir=output_dir, + gt_bodyparts=gt_keypoints, + pred_bodyparts=pred_keypoints, + bounding_boxes=bounding_boxes, + skeleton=skeleton, + keypoint_names=keypoint_names, + p_cutoff=confidence_threshold, + keypoint_vis_mask=keypoint_vis_mask, # Pass the mask to plotting function + ) + logger.info(f"Successfully visualized predictions for {image_path}") + + # Clean up logging handler + logger.removeHandler(handler) + handler.close() + +def plot_gt_and_predictions_PFM( + image_path: Union[str, Path], + output_dir: Union[str, Path], + gt_bodyparts: Optional[np.ndarray] = None, + pred_bodyparts: Optional[np.ndarray] = None, + mode: str = "bodypart", + colormap: str = "rainbow", + dot_size: int = 12, + alpha_value: float = 0.8, + p_cutoff: float = 0.6, + bounding_boxes: tuple[np.ndarray, np.ndarray] | None = None, + bounding_boxes_color="k", + bboxes_pcutoff: float = 0.6, + skeleton: Optional[List[Tuple[int, int]]] = None, + keypoint_names: Optional[List[str]] = None, + keypoint_vis_mask: Optional[List[int]] = None, + labels: List[str] = ["+", ".", "x"], +) -> None: + """Plot ground truth and predictions on an image. + + Args: + image_path: Path to the image file + output_dir: Directory to save the visualization + gt_bodyparts: Ground truth keypoints array [N, num_keypoints, 3] (x, y, vis_label) + pred_bodyparts: Predicted keypoints array [N, num_keypoints, 3] (x, y, confidence) + bounding_boxes: Tuple of (boxes, scores) for bounding box visualization + dot_size: Size of the keypoint markers + alpha_value: Transparency for points and lines + p_cutoff: Confidence threshold for predictions + mode: How to color the points ("bodypart" or "individual") + colormap: Matplotlib colormap name + bbox_color: Color for bounding boxes + skeleton: List of joint pairs for skeleton visualization + keypoint_names: List of keypoint names for labeling + keypoint_vis_mask: List of keypoint indices to show (default: all keypoints visible) + labels: Marker styles for [ground truth, reliable predictions, unreliable predictions] + """ + # Set default keypoint visibility mask if not provided + if pred_bodyparts is not None and keypoint_vis_mask is None: + keypoint_vis_mask = [1] * pred_bodyparts.shape[1] # All keypoints visible by default + + # Read image and calculate dot size + frame = auxfun_videos.imread(str(image_path), mode="skimage") + h, w = frame.shape[:2] + # Calculate adaptive dot size based on image dimensions + # Use a logarithmic scale to handle very large or small images better + diagonal = np.sqrt(w * w + h * h) # Image diagonal length + base_size = np.log10(diagonal) * 3 # Logarithmic scaling + # print("diagonal:", diagonal) + # Fine-tune the dot size + if diagonal > 1200: # High resolution + dot_size = base_size * 2.0 + elif diagonal < 800: # Low resolution + dot_size = base_size * 1.0 + else: # Medium resolution + dot_size = base_size + + # Ensure dot size stays within reasonable bounds + dot_size = int(max(4, min(dot_size, 15)))*0.8 # Tighter bounds for dots + + # filter out the individuals that without GT keypoints + if bounding_boxes is not None: + # filter out the individuals that without GT keypoints + valid_individuals = [] + for idx, bbox_score in enumerate(bounding_boxes[1]): + if bbox_score > bboxes_pcutoff: + valid_individuals.append(idx) + + # if gt_bodyparts is None: + # tmp_valid_bodyparts = pred_bodyparts + # else: + # tmp_valid_bodyparts = gt_bodyparts + + # if tmp_valid_bodyparts is not None: + # valid_individuals = [] + # for idx in range(tmp_valid_bodyparts.shape[0]): + # # Check if this individual has any valid keypoints + # # A keypoint is valid if its visibility (3rd value) is not -1 + # has_valid_keypoints = False + + # for kp_idx in range(tmp_valid_bodyparts.shape[1]): + # kp = tmp_valid_bodyparts[idx, kp_idx] + # # Check if keypoint is visible + # if kp[2] != -1: + # has_valid_keypoints = True + # break # We found at least one valid keypoint, no need to check more + + # # Include individual if they have at least one valid keypoint + # if has_valid_keypoints: + # valid_individuals.append(idx) + + # print(f"Found {len(valid_individuals)} valid individuals out of {gt_bodyparts.shape[0]}") + # Filter both ground truth and predictions + + # print(f"valid_individuals: {valid_individuals}") + if valid_individuals: + if gt_bodyparts is not None: + gt_bodyparts = gt_bodyparts[valid_individuals] + if pred_bodyparts is not None: + pred_bodyparts = pred_bodyparts[valid_individuals] + if bounding_boxes is not None: + bounding_boxes = ( + bounding_boxes[0][valid_individuals], + bounding_boxes[1][valid_individuals] + ) + + num_pred, num_keypoints = pred_bodyparts.shape[:2] + + # print("After filtering:") + # print("num_pred, num_keypoints:", num_pred, num_keypoints) + # if gt_bodyparts is not None: + # print("gt_bodyparts shape:", gt_bodyparts.shape) + + # Create figure with optimal settings + fig, ax = create_minimal_figure() + fig.set_size_inches(w/100, h/100) + ax.set_xlim(0, w) + ax.set_ylim(0, h) + ax.invert_yaxis() + ax.imshow(frame, "gray") + + # Set up colors based on mode + if mode == "bodypart": + num_colors = num_keypoints + # if pred_unique_bodyparts is not None: + # num_colors += pred_unique_bodyparts.shape[1] + colors = get_cmap(num_colors, name=colormap) + # print("colors:", colors) + # predictions = pred_bodyparts.swapaxes(0, 1) + # ground_truth = gt_bodyparts.swapaxes(0, 1) + elif mode == "individual": + colors = get_cmap(num_pred + 1, name=colormap) + # predictions = pred_bodyparts + # ground_truth = gt_bodyparts + else: + raise ValueError(f"Invalid mode: {mode}") + + # print("bounding_boxes:", bounding_boxes) + + # Draw bounding boxes if provided + if bounding_boxes is not None: + # print(f"bounding_boxes: {bounding_boxes}") + for bbox, bbox_score in zip(bounding_boxes[0], bounding_boxes[1]): + bbox_origin = (bbox[0], bbox[1]) + (bbox_width, bbox_height) = (bbox[2], bbox[3]) + rect = patches.Rectangle( + bbox_origin, + bbox_width, + bbox_height, + linewidth=2, + edgecolor=bounding_boxes_color, + facecolor='none', + linestyle="--" if bbox_score < bboxes_pcutoff else "-" + ) + ax.add_patch(rect) + + # Track existing text positions to avoid overlap + existing_text_positions = [] + scale_factor = min(w, h) / 1000 # Normalize scale factor based on image size + + + plot_individual = False + if plot_individual: + # Save individual plots for each animal + for idx_individual in range(num_pred): + # print("plot individual:", idx_individual) + # Create a new figure for each individual + fig_ind, ax_ind = create_minimal_figure() + fig_ind.set_size_inches(w/100, h/100) + ax_ind.set_xlim(0, w) + ax_ind.set_ylim(0, h) + ax_ind.invert_yaxis() + ax_ind.imshow(frame, "gray") + + # Draw bounding box for this individual if available + if bounding_boxes is not None: + bbox = bounding_boxes[0][idx_individual] + bbox_score = bounding_boxes[1][idx_individual] + bbox_origin = (bbox[0], bbox[1]) + (bbox_width, bbox_height) = (bbox[2], bbox[3]) + rect = patches.Rectangle( + bbox_origin, + bbox_width, + bbox_height, + linewidth=2, + edgecolor=bounding_boxes_color, + facecolor='none', + linestyle="--" if bbox_score < bboxes_pcutoff else "-" + ) + ax_ind.add_patch(rect) + + # Reset text positions for each individual + existing_text_positions = [] + + # Plot keypoints for this individual + for idx_keypoint in range(num_keypoints): + if keypoint_vis_mask[idx_keypoint]: + + keypoint_confidence = pred_bodyparts[idx_individual, idx_keypoint, 2] + # print("keypoint_confidence_individual:", keypoint_confidence) + if keypoint_confidence > p_cutoff: + x_kp = pred_bodyparts[idx_individual, idx_keypoint, 0] + y_kp = pred_bodyparts[idx_individual, idx_keypoint, 1] + + ax_ind.plot( + x_kp, + y_kp, + labels[1] if keypoint_confidence > p_cutoff else labels[2], + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size + ) + + if keypoint_names is not None: + # Calculate and adjust text position + x_text = x_kp - (10 * scale_factor) + y_text = y_kp - (15 * scale_factor) + x_text = min(max(0, x_text), w - 100) + y_text = min(max(0, y_text), h - 10) + + while any(abs(x_text - ex) < 50 * scale_factor and abs(y_text - ey) < 20 * scale_factor + for ex, ey in existing_text_positions): + y_text += 20 * scale_factor + if y_text > h - 10: + y_text = y_kp + x_text += 50 * scale_factor + + existing_text_positions.append((x_text, y_text)) + + ax_ind.text( + x_text, + y_text, + keypoint_names[idx_keypoint], + color=colors(idx_keypoint), + alpha=alpha_value, + fontsize=dot_size * 0.8 + ) + + # Plot ground truth for this individual + if gt_bodyparts is not None: + if gt_bodyparts[idx_individual, idx_keypoint, 2] != -1: + ax_ind.plot( + gt_bodyparts[idx_individual, idx_keypoint, 0], + gt_bodyparts[idx_individual, idx_keypoint, 1], + labels[0], + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size + ) + + # Save individual plot + if num_pred > 1: + # Add index for multi-animal images + output_path = Path(output_dir) / f"{Path(image_path).stem}_animal_{idx_individual}_predictions.png" + else: + # No index needed for single animal + output_path = Path(output_dir) / f"{Path(image_path).stem}_predictions.png" + + plt.savefig( + output_path, + bbox_inches='tight', + pad_inches=0, + transparent=False + ) + plt.close(fig_ind) + + # Original combined plot + for idx_individual in range(num_pred): + for idx_keypoint in range(num_keypoints): + if pred_bodyparts is not None and keypoint_vis_mask[idx_keypoint]: + # if the keypoint is allowed to be shown and the prediction is reliable + keypoint_confidence = pred_bodyparts[idx_individual, idx_keypoint, 2] + if keypoint_confidence > p_cutoff: + pred_label = labels[1] + else: + pred_label = labels[2] + if keypoint_confidence > p_cutoff: + x_kp = pred_bodyparts[idx_individual, idx_keypoint, 0] + y_kp = pred_bodyparts[idx_individual, idx_keypoint, 1] + + ax.plot( + x_kp, + y_kp, + pred_label, + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size + ) + + if keypoint_names is not None: + # Calculate initial text position + x_text = x_kp - (10 * scale_factor) + y_text = y_kp - (15 * scale_factor) + + # Ensure text stays within image bounds + x_text = min(max(0, x_text), w - 100) + y_text = min(max(0, y_text), h - 10) + + # Avoid overlapping with existing text + while any(abs(x_text - ex) < 50 * scale_factor and abs(y_text - ey) < 20 * scale_factor + for ex, ey in existing_text_positions): + y_text += 20 * scale_factor + if y_text > h - 10: # If we run out of vertical space + y_text = pred_bodyparts[idx_individual, idx_keypoint, 1] # Reset to original y + x_text += 50 * scale_factor # Move text horizontally instead + + # Record this position + existing_text_positions.append((x_text, y_text)) + + ax.text( + x_text, + y_text, + keypoint_names[idx_keypoint], + color=colors(idx_keypoint), + alpha=alpha_value, + fontsize=dot_size * 0.5 + ) + + # plot ground truth + if gt_bodyparts is not None: + if gt_bodyparts[idx_individual, idx_keypoint, 2] != -1: + ax.plot( + gt_bodyparts[idx_individual, idx_keypoint, 0], + gt_bodyparts[idx_individual, idx_keypoint, 1], + labels[0], + color=colors(idx_keypoint), + alpha=alpha_value, + markersize=dot_size*0.5 + ) + if skeleton is not None: + # Draw all valid connections + # plot the skeleton is the skeleton is not None + connection_pairs = [] + for [idx1, idx2] in skeleton: + # idx1 = idx1 - 1 + # idx2 = idx2 - 1 + # Only add the connection if both keypoints are visible and have confidence above threshold + if (pred_bodyparts[idx_individual, idx1, 2] > p_cutoff and + pred_bodyparts[idx_individual, idx2, 2] > p_cutoff): + connection_pairs.append({ + 'start': (pred_bodyparts[idx_individual, idx1, 0], + pred_bodyparts[idx_individual, idx1, 1]), + 'end': (pred_bodyparts[idx_individual, idx2, 0], + pred_bodyparts[idx_individual, idx2, 1]) + }) + + # if center_hip (26) is below the p_cutoff, and root_tail (33) is above the p_cutoff, + # then we can use root_tail to replace center_hip (just for connection!), otherwise we use center_hip + # if idx1 == 26 and pred_bodyparts[idx_individual, 26, 2] < p_cutoff and pred_bodyparts[idx_individual, 33, 2] > p_cutoff: + # # Replace center_hip with root_tail for this connection + # if pred_bodyparts[idx_individual, idx2, 2] > p_cutoff: + # connection_pairs.append({ + # 'start': (pred_bodyparts[idx_individual, 33, 0], + # pred_bodyparts[idx_individual, 33, 1]), + # 'end': (pred_bodyparts[idx_individual, idx2, 0], + # pred_bodyparts[idx_individual, idx2, 1]) + # }) + # elif idx2 == 26 and pred_bodyparts[idx_individual, 26, 2] < p_cutoff and pred_bodyparts[idx_individual, 33, 2] > p_cutoff: + # # Handle case where center_hip is the end point + # if pred_bodyparts[idx_individual, idx1, 2] > p_cutoff: + # connection_pairs.append({ + # 'start': (pred_bodyparts[idx_individual, idx1, 0], + # pred_bodyparts[idx_individual, idx1, 1]), + # 'end': (pred_bodyparts[idx_individual, 33, 0], + # pred_bodyparts[idx_individual, 33, 1]) + # }) + + # if left hip (idx: 24) is below the p_cutoff and left knee (idx: 27) is above the p_cutoff, + # if center hip (idx: 26) is above the p_cutoff, then connect left knee to center hip, + # if center hip (idx: 26) is below the p_cutoff and root_tail (idx: 33) is above the p_cutoff, then we connect left knee to root_tail + if pred_bodyparts[idx_individual, 24, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 27, 2] > p_cutoff: + connection_pairs.append({ + 'start': (pred_bodyparts[idx_individual, 27, 0], + pred_bodyparts[idx_individual, 27, 1]), + 'end': (pred_bodyparts[idx_individual, 26, 0], + pred_bodyparts[idx_individual, 26, 1]) + }) + + # if right hip (idx: 25) is below the p_cutoff, and center hip (idx: 26) and right knee (idx: 28) are above the p_cutoff, + # then we can draw a line from the right knee (idx: 28) to the center hip (idx: 26) + if pred_bodyparts[idx_individual, 25, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 28, 2] > p_cutoff: + connection_pairs.append({ + 'start': (pred_bodyparts[idx_individual, 28, 0], + pred_bodyparts[idx_individual, 28, 1]), + 'end': (pred_bodyparts[idx_individual, 26, 0], + pred_bodyparts[idx_individual, 26, 1]) + }) + + for connection in connection_pairs: + ax.plot( + [connection['start'][0], connection['end'][0]], + [connection['start'][1], connection['end'][1]], + 'g', # black solid line + alpha=alpha_value * 0.8, # slightly more transparent than points + linewidth=dot_size * 0.1 # scale line width with dot size + ) + + # Save the figure + output_path = Path(output_dir) / f"{Path(image_path).stem}_predictions.png" + # save_labeled_frame(fig, str(image_path), str(output_dir), belongs_to_train=False) + plt.savefig( + output_path, + dpi=200, + bbox_inches='tight', + pad_inches=0, + transparent=False + ) + erase_artists(ax) + plt.close() + + def evaluate_snapshot( cfg: dict, loader: DLCLoader, From 1c6d89854a53c6bd35344e4e2e4a828670762497 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 6 Mar 2025 15:58:46 +0100 Subject: [PATCH 70/88] add libraries for evaluation and visualization in PFM predictions --- deeplabcut/pose_estimation_pytorch/apis/evaluation.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 7010f664ae..3a176c244f 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -47,7 +47,9 @@ plot_evaluation_results, save_labeled_frame, ) - +import matplotlib.pyplot as plt +from typing import Optional, Union, List, Tuple, Dict +import logging def predict( pose_runner: InferenceRunner, @@ -456,7 +458,6 @@ def plot_gt_and_predictions( plt.close() - def visualize_predictions_PFM( predictions: Dict[str, Dict], ground_truth: Dict[str, np.ndarray], From 66f881d9749f8eba4d701893305ec0255db03e2e Mon Sep 17 00:00:00 2001 From: ti Date: Fri, 7 Mar 2025 17:49:36 +0100 Subject: [PATCH 71/88] add dynamic skeleton function to make adjustment based on keypoint confidence scores; but it's better to realise this using tree structure --- .../apis/evaluation.py | 104 +++++++++++++++--- 1 file changed, 86 insertions(+), 18 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 3a176c244f..6f2f92092d 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -50,6 +50,7 @@ import matplotlib.pyplot as plt from typing import Optional, Union, List, Tuple, Dict import logging +import matplotlib.patches as patches def predict( pose_runner: InferenceRunner, @@ -538,7 +539,71 @@ def visualize_predictions_PFM( # Clean up logging handler logger.removeHandler(handler) handler.close() + +def get_dynamic_skeleton(skeleton, keypoints, p_cutoff=0.6): + """ + Modify skeleton connections based on keypoint confidence scores. + + If certain keypoints have low confidence (below threshold), alternative + skeleton connections will be used instead of the original ones. + Args: + skeleton (list): List of tuples/lists representing skeleton connections as (start_idx, end_idx) + keypoints (numpy.ndarray): Array of shape (..., 3) where the last dimension contains + [x, y, confidence] for each keypoint + p_cutoff (float): Confidence threshold (0.0-1.0) + + Returns: + list: Modified skeleton connections based on confidence scores + """ + dynamic_skeleton = skeleton.copy() + confidences = keypoints[..., 2] # Get confidence scores + + # Dictionary to store special connection rules + # dict_name_to_idx = {name: idx for idx, name in enumerate(keypoint_name_simplified)} + dict_name_to_idx = {"L_Shoulder" : 12, "R_Shoulder" : 13, "L_Elbow": 18, "R_Elbow": 19, "neck": 11, + "L_Wrist": 20, "R_Wrist": 21, "L_Hand": 22, "R_Hand": 23, "L_Knee": 27, "R_Knee": 28, "L_hip": 24, "R_hip": 25, "C_hip": 26} + + # Template for special connections with rules for alternative connections + special_connections = { + # Format: (point_to_check, [(original_connections), (alternative_connection)]) + "L_Shoulder": [(("neck", "L_Shoulder"), ("L_Shoulder", "L_Elbow") ), ("neck", "L_Elbow")], # L_S: ori_connection: {L_S to L_elbow, L_S to neck}; alt_connection: L_Elbow to neck if L_S is below threshold + "R_Shoulder": [(("neck", "R_Shoulder"), ("R_Shoulder", "R_Elbow") ), ("neck", "R_Elbow")], # R_S: ori_connection: {R_S to R_elbow, R_S to neck}; alt_connection: R_Elbow to neck if R_S is below threshold + "L_Wrist": [(("L_Hand", "L_Wrist"), ("L_Wrist", "L_Elbow")), ("L_Hand", "L_Elbow")], # L_W: ori_connection: {L_H to L_W, L_W to L_Elbow}; alt_connection: L_H to L_Elbow if L_W is below threshold + "R_Wrist": [(("R_Hand", "R_Wrist"), ("R_Wrist", "R_Elbow")), ("R_Hand", "R_Elbow")], # R_W: ori_connection: {R_H to R_W, R_W to R_Elbow}; alt_connection: R_H to R_Elbow if R_W is below threshold + "L_hip": [(( "L_Knee", "L_hip"), ("L_hip", "C_hip")), ("L_Knee", "C_hip")], + "R_hip": [(( "R_Knee", "R_hip"), ("R_hip", "C_hip")), ("R_Knee", "C_hip")], + } + # Process each keypoint in special connections + for keypoint_name, (original_connections, alternative_connection) in special_connections.items(): + # Get the index of the keypoint + keypoint_idx = dict_name_to_idx[keypoint_name] + + # Check if keypoint confidence is below threshold + if confidences[keypoint_idx] < p_cutoff: + # Convert named connections to index-based connections + original_connections_idx = [] + for conn1, conn2 in original_connections: + # Add both connections to the list + original_connections_idx.append([dict_name_to_idx[conn1], dict_name_to_idx[conn2]]) + # Also consider reverse connection + original_connections_idx.append([dict_name_to_idx[conn2], dict_name_to_idx[conn1]]) + + # Convert alternative connection to index-based + # todo: alternative_connection also could contain multiple connections + alt_conn_idx = [dict_name_to_idx[alternative_connection[0]], dict_name_to_idx[alternative_connection[1]]] + + # Remove original connections from dynamic skeleton + for conn in original_connections_idx: + if conn in dynamic_skeleton: + dynamic_skeleton.remove(conn) + + # Add alternative connection if it's not already in the skeleton + if alt_conn_idx not in dynamic_skeleton and [alt_conn_idx[1], alt_conn_idx[0]] not in dynamic_skeleton: + dynamic_skeleton.append(alt_conn_idx) + + return dynamic_skeleton + def plot_gt_and_predictions_PFM( image_path: Union[str, Path], output_dir: Union[str, Path], @@ -599,9 +664,8 @@ def plot_gt_and_predictions_PFM( # Ensure dot size stays within reasonable bounds dot_size = int(max(4, min(dot_size, 15)))*0.8 # Tighter bounds for dots - # filter out the individuals that without GT keypoints + # filter out the non exist individuals if bounding_boxes is not None: - # filter out the individuals that without GT keypoints valid_individuals = [] for idx, bbox_score in enumerate(bounding_boxes[1]): if bbox_score > bboxes_pcutoff: @@ -633,7 +697,6 @@ def plot_gt_and_predictions_PFM( # print(f"Found {len(valid_individuals)} valid individuals out of {gt_bodyparts.shape[0]}") # Filter both ground truth and predictions - # print(f"valid_individuals: {valid_individuals}") if valid_individuals: if gt_bodyparts is not None: gt_bodyparts = gt_bodyparts[valid_individuals] @@ -872,7 +935,12 @@ def plot_gt_and_predictions_PFM( # Draw all valid connections # plot the skeleton is the skeleton is not None connection_pairs = [] - for [idx1, idx2] in skeleton: + dynamic_skeleton = skeleton.copy() + + dynamic_skeleton = get_dynamic_skeleton(dynamic_skeleton, pred_bodyparts[idx_individual], p_cutoff) + + + for [idx1, idx2] in dynamic_skeleton: # idx1 = idx1 - 1 # idx2 = idx2 - 1 # Only add the connection if both keypoints are visible and have confidence above threshold @@ -909,23 +977,23 @@ def plot_gt_and_predictions_PFM( # if left hip (idx: 24) is below the p_cutoff and left knee (idx: 27) is above the p_cutoff, # if center hip (idx: 26) is above the p_cutoff, then connect left knee to center hip, # if center hip (idx: 26) is below the p_cutoff and root_tail (idx: 33) is above the p_cutoff, then we connect left knee to root_tail - if pred_bodyparts[idx_individual, 24, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 27, 2] > p_cutoff: - connection_pairs.append({ - 'start': (pred_bodyparts[idx_individual, 27, 0], - pred_bodyparts[idx_individual, 27, 1]), - 'end': (pred_bodyparts[idx_individual, 26, 0], - pred_bodyparts[idx_individual, 26, 1]) - }) + # if pred_bodyparts[idx_individual, 24, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 27, 2] > p_cutoff: + # connection_pairs.append({ + # 'start': (pred_bodyparts[idx_individual, 27, 0], + # pred_bodyparts[idx_individual, 27, 1]), + # 'end': (pred_bodyparts[idx_individual, 26, 0], + # pred_bodyparts[idx_individual, 26, 1]) + # }) # if right hip (idx: 25) is below the p_cutoff, and center hip (idx: 26) and right knee (idx: 28) are above the p_cutoff, # then we can draw a line from the right knee (idx: 28) to the center hip (idx: 26) - if pred_bodyparts[idx_individual, 25, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 28, 2] > p_cutoff: - connection_pairs.append({ - 'start': (pred_bodyparts[idx_individual, 28, 0], - pred_bodyparts[idx_individual, 28, 1]), - 'end': (pred_bodyparts[idx_individual, 26, 0], - pred_bodyparts[idx_individual, 26, 1]) - }) + # if pred_bodyparts[idx_individual, 25, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 28, 2] > p_cutoff: + # connection_pairs.append({ + # 'start': (pred_bodyparts[idx_individual, 28, 0], + # pred_bodyparts[idx_individual, 28, 1]), + # 'end': (pred_bodyparts[idx_individual, 26, 0], + # pred_bodyparts[idx_individual, 26, 1]) + # }) for connection in connection_pairs: ax.plot( From 05c12c66d8fe8787f69b45634bcf00d3e42ae542 Mon Sep 17 00:00:00 2001 From: ti Date: Fri, 7 Mar 2025 21:01:54 +0100 Subject: [PATCH 72/88] add DynamicSkeleton class for improved dynamic skeleton generation based on keypoint confidence --- .../apis/evaluation.py | 173 +++++++++++++----- 1 file changed, 126 insertions(+), 47 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 6f2f92092d..3c36cfd75c 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -564,6 +564,7 @@ def get_dynamic_skeleton(skeleton, keypoints, p_cutoff=0.6): dict_name_to_idx = {"L_Shoulder" : 12, "R_Shoulder" : 13, "L_Elbow": 18, "R_Elbow": 19, "neck": 11, "L_Wrist": 20, "R_Wrist": 21, "L_Hand": 22, "R_Hand": 23, "L_Knee": 27, "R_Knee": 28, "L_hip": 24, "R_hip": 25, "C_hip": 26} + # Template for special connections with rules for alternative connections special_connections = { # Format: (point_to_check, [(original_connections), (alternative_connection)]) @@ -600,10 +601,129 @@ def get_dynamic_skeleton(skeleton, keypoints, p_cutoff=0.6): # Add alternative connection if it's not already in the skeleton if alt_conn_idx not in dynamic_skeleton and [alt_conn_idx[1], alt_conn_idx[0]] not in dynamic_skeleton: - dynamic_skeleton.append(alt_conn_idx) - + dynamic_skeleton.append(alt_conn_idx) return dynamic_skeleton + +class DynamicSkeleton: + def __init__(self, pred_bodyparts, p_cutoff=0.6): + self.keypoints = [ + "forehead", + "head", + "L_E", + "R_E", + "nose", + "L_ear", + "R_ear", + "mouth_front_top", + "mouth_front_bottom", + "mouth_B_L", + "mouth_B_R", + "neck", + "L_Shoulder", + "R_Shoulder", + "upper_B", + "torso_M_B", + "body_C", + "lower_B", + "L_Elbow", + "R_Elbow", + "L_Wrist", + "R_Wrist", + "L_Hand", + "R_Hand", + "L_hip", + "R_hip", + "C_hip", + "L_Knee", + "R_Knee", + "L_Ankle", + "R_Ankle", + "L_foot", + "R_foot", + "root_tail", + "M_tail", + "M_end_tail", + "end_tail" + ] + self.parent_mapping = { + 'head': "neck", + 'neck': None, # root + 'L_Shoulder': 'neck', # Left Shoulder + 'R_Shoulder': 'neck', # Right Shoulder + 'L_Elbow': 'L_Shoulder', + 'R_Elbow': 'R_Shoulder', + 'L_Wrist': 'L_Elbow', + 'R_Wrist': 'R_Elbow', + 'L_Hand': 'L_Wrist', + 'R_Hand': 'R_Wrist', + 'C_hip': None, # Hip connected to lower body + 'L_hip': 'C_hip', + 'R_hip': 'C_hip', + 'L_Knee': 'L_hip', + 'R_Knee': 'R_hip', + 'L_Ankle': 'L_Knee', + 'R_Ankle': 'R_Knee', + 'L_foot': 'L_Ankle', + 'R_foot': 'R_Ankle', + 'root_tail': 'C_hip', + 'M_tail': 'root_tail', + 'M_end_tail': 'M_tail', + 'end_tail': 'M_end_tail', + } + + confidence_dict = {} + for idx, keypoint in enumerate(self.keypoints): + confidence_dict[keypoint] = pred_bodyparts[idx, 2] + self.confidence_dict = confidence_dict + self.p_cutoff = p_cutoff + self.dynamic_skeleton = [] + + # if C_hip is None, then we use root_tail to replace C_hip, and remove {'root_tail': 'C_hip'} + if self.confidence_dict.get('C_hip') < self.p_cutoff: + self.parent_mapping['L_hip'] = 'root_tail' + self.parent_mapping['R_hip'] = 'root_tail' + self.parent_mapping['mid_tail'] = 'root_tail' + self.parent_mapping['root_tail'] = None + self.dynamic_skeleton.append(('root_tail', 'C_hip')) + + def change_name_to_idx_dynamic_skeleton(self, dynamic_skeleton): + # change the dynamic skeleton index to the new index; + dynamic_skeleton = [] + for idx, (from_node, end_node) in enumerate(self.dynamic_skeleton): + # print((self.keypoints.index(from_node), self.keypoints.index(end_node))) + dynamic_skeleton.append((self.keypoints.index(from_node), self.keypoints.index(end_node))) + return dynamic_skeleton + + + def find_nearest_ancester(self, node): + current_node = self.parent_mapping.get(node) + while current_node is not None: + current_node_conf = self.confidence_dict[current_node] + if current_node_conf > self.p_cutoff: + return current_node + else: + current_node = self.parent_mapping.get(current_node) + return None + + def get_dynamic_skeleton(self): + # only consider the keypoints that are in the parent_mapping + for keypoint in self.parent_mapping.keys(): + keypoint_conf = self.confidence_dict[keypoint] + if keypoint_conf > self.p_cutoff: + ancester = self.find_nearest_ancester(keypoint) + if ancester is not None: + self.dynamic_skeleton.append((ancester, keypoint)) + + # add connection between C_hip and neck + if self.confidence_dict.get('C_hip') > self.p_cutoff and self.confidence_dict.get('neck') > self.p_cutoff: + self.dynamic_skeleton.append(('C_hip', 'neck')) + # if conf[C_hip] self.p_cutoff and self.confidence_dict.get('root_tail') > self.p_cutoff: + self.dynamic_skeleton.append(('root_tail', 'neck')) + + return self.change_name_to_idx_dynamic_skeleton(self.dynamic_skeleton) + def plot_gt_and_predictions_PFM( image_path: Union[str, Path], output_dir: Union[str, Path], @@ -935,10 +1055,11 @@ def plot_gt_and_predictions_PFM( # Draw all valid connections # plot the skeleton is the skeleton is not None connection_pairs = [] - dynamic_skeleton = skeleton.copy() - - dynamic_skeleton = get_dynamic_skeleton(dynamic_skeleton, pred_bodyparts[idx_individual], p_cutoff) + + # dynamic_skeleton = skeleton.copy() + # dynamic_skeleton = get_dynamic_skeleton(dynamic_skeleton, pred_bodyparts[idx_individual], p_cutoff) + dynamic_skeleton = DynamicSkeleton(pred_bodyparts[idx_individual], p_cutoff).get_dynamic_skeleton() for [idx1, idx2] in dynamic_skeleton: # idx1 = idx1 - 1 @@ -953,48 +1074,6 @@ def plot_gt_and_predictions_PFM( pred_bodyparts[idx_individual, idx2, 1]) }) - # if center_hip (26) is below the p_cutoff, and root_tail (33) is above the p_cutoff, - # then we can use root_tail to replace center_hip (just for connection!), otherwise we use center_hip - # if idx1 == 26 and pred_bodyparts[idx_individual, 26, 2] < p_cutoff and pred_bodyparts[idx_individual, 33, 2] > p_cutoff: - # # Replace center_hip with root_tail for this connection - # if pred_bodyparts[idx_individual, idx2, 2] > p_cutoff: - # connection_pairs.append({ - # 'start': (pred_bodyparts[idx_individual, 33, 0], - # pred_bodyparts[idx_individual, 33, 1]), - # 'end': (pred_bodyparts[idx_individual, idx2, 0], - # pred_bodyparts[idx_individual, idx2, 1]) - # }) - # elif idx2 == 26 and pred_bodyparts[idx_individual, 26, 2] < p_cutoff and pred_bodyparts[idx_individual, 33, 2] > p_cutoff: - # # Handle case where center_hip is the end point - # if pred_bodyparts[idx_individual, idx1, 2] > p_cutoff: - # connection_pairs.append({ - # 'start': (pred_bodyparts[idx_individual, idx1, 0], - # pred_bodyparts[idx_individual, idx1, 1]), - # 'end': (pred_bodyparts[idx_individual, 33, 0], - # pred_bodyparts[idx_individual, 33, 1]) - # }) - - # if left hip (idx: 24) is below the p_cutoff and left knee (idx: 27) is above the p_cutoff, - # if center hip (idx: 26) is above the p_cutoff, then connect left knee to center hip, - # if center hip (idx: 26) is below the p_cutoff and root_tail (idx: 33) is above the p_cutoff, then we connect left knee to root_tail - # if pred_bodyparts[idx_individual, 24, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 27, 2] > p_cutoff: - # connection_pairs.append({ - # 'start': (pred_bodyparts[idx_individual, 27, 0], - # pred_bodyparts[idx_individual, 27, 1]), - # 'end': (pred_bodyparts[idx_individual, 26, 0], - # pred_bodyparts[idx_individual, 26, 1]) - # }) - - # if right hip (idx: 25) is below the p_cutoff, and center hip (idx: 26) and right knee (idx: 28) are above the p_cutoff, - # then we can draw a line from the right knee (idx: 28) to the center hip (idx: 26) - # if pred_bodyparts[idx_individual, 25, 2] < p_cutoff and pred_bodyparts[idx_individual, 26, 2] > p_cutoff and pred_bodyparts[idx_individual, 28, 2] > p_cutoff: - # connection_pairs.append({ - # 'start': (pred_bodyparts[idx_individual, 28, 0], - # pred_bodyparts[idx_individual, 28, 1]), - # 'end': (pred_bodyparts[idx_individual, 26, 0], - # pred_bodyparts[idx_individual, 26, 1]) - # }) - for connection in connection_pairs: ax.plot( [connection['start'][0], connection['end'][0]], From 331e3a472eadd83459c9a031aa05e978a7d16b25 Mon Sep 17 00:00:00 2001 From: ti Date: Sun, 9 Mar 2025 12:42:53 +0100 Subject: [PATCH 73/88] DynamicSkeleton class: update keypoint names and enhance parent mapping for eye and ear parts --- .../pose_estimation_pytorch/apis/evaluation.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 3c36cfd75c..69984425e6 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -610,11 +610,11 @@ def __init__(self, pred_bodyparts, p_cutoff=0.6): self.keypoints = [ "forehead", "head", - "L_E", - "R_E", + "L_Eye", + "R_Eye", "nose", - "L_ear", - "R_ear", + "L_Ear", + "R_Ear", "mouth_front_top", "mouth_front_bottom", "mouth_B_L", @@ -647,6 +647,7 @@ def __init__(self, pred_bodyparts, p_cutoff=0.6): "end_tail" ] self.parent_mapping = { + # body part 'head': "neck", 'neck': None, # root 'L_Shoulder': 'neck', # Left Shoulder @@ -670,6 +671,12 @@ def __init__(self, pred_bodyparts, p_cutoff=0.6): 'M_tail': 'root_tail', 'M_end_tail': 'M_tail', 'end_tail': 'M_end_tail', + # mouse part + 'L_Ear': 'L_Eye', + 'R_Ear': 'R_Eye', + 'L_Eye' : 'nose', + 'R_Eye' : 'nose', + 'nose' : None, } confidence_dict = {} @@ -1056,7 +1063,7 @@ def plot_gt_and_predictions_PFM( # plot the skeleton is the skeleton is not None connection_pairs = [] - # dynamic_skeleton = skeleton.copy() + dynamic_skeleton = skeleton.copy() # dynamic_skeleton = get_dynamic_skeleton(dynamic_skeleton, pred_bodyparts[idx_individual], p_cutoff) dynamic_skeleton = DynamicSkeleton(pred_bodyparts[idx_individual], p_cutoff).get_dynamic_skeleton() From 01bbb21d199da3ca8ee591a7ae5e0fbbfde6a847 Mon Sep 17 00:00:00 2001 From: ti Date: Sun, 9 Mar 2025 13:07:01 +0100 Subject: [PATCH 74/88] plot_gt_and_predictions_PFM: improve text position tracking to avoid overlap; --- .../apis/evaluation.py | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 69984425e6..1d5a658922 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -885,8 +885,6 @@ def plot_gt_and_predictions_PFM( ) ax.add_patch(rect) - # Track existing text positions to avoid overlap - existing_text_positions = [] scale_factor = min(w, h) / 1000 # Normalize scale factor based on image size @@ -949,9 +947,9 @@ def plot_gt_and_predictions_PFM( x_text = min(max(0, x_text), w - 100) y_text = min(max(0, y_text), h - 10) - while any(abs(x_text - ex) < 50 * scale_factor and abs(y_text - ey) < 20 * scale_factor - for ex, ey in existing_text_positions): - y_text += 20 * scale_factor + while any(abs(x_text - existing_x) < 50 * scale_factor and abs(y_text - existing_y) < 30 * scale_factor + for existing_x, existing_y in existing_text_positions): + y_text += 5 * scale_factor if y_text > h - 10: y_text = y_kp x_text += 50 * scale_factor @@ -996,6 +994,9 @@ def plot_gt_and_predictions_PFM( plt.close(fig_ind) # Original combined plot + # Track existing text positions to avoid overlap + existing_text_positions = [] + for idx_individual in range(num_pred): for idx_keypoint in range(num_keypoints): if pred_bodyparts is not None and keypoint_vis_mask[idx_keypoint]: @@ -1028,12 +1029,13 @@ def plot_gt_and_predictions_PFM( y_text = min(max(0, y_text), h - 10) # Avoid overlapping with existing text - while any(abs(x_text - ex) < 50 * scale_factor and abs(y_text - ey) < 20 * scale_factor - for ex, ey in existing_text_positions): - y_text += 20 * scale_factor - if y_text > h - 10: # If we run out of vertical space - y_text = pred_bodyparts[idx_individual, idx_keypoint, 1] # Reset to original y - x_text += 50 * scale_factor # Move text horizontally instead + while any(abs(x_text - existing_x) <= 15 * scale_factor and abs(y_text - existing_y) <= 15 * scale_factor + for existing_x, existing_y in existing_text_positions): + y_text += 7.5 * scale_factor + x_text += 4 * scale_factor + # if y_text > h - 10: # If we run out of vertical space + # y_text = pred_bodyparts[idx_individual, idx_keypoint, 1] # Reset to original y + # x_text += 50 * scale_factor # Move text horizontally instead # Record this position existing_text_positions.append((x_text, y_text)) From 1763f1a85a015a24cd31bc28a4a0783299ec74c7 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 3 Apr 2025 16:10:25 +0200 Subject: [PATCH 75/88] update parent mapping for tail in DynamicSkeleton and refine dot size calculation in PFM plotting --- .../pose_estimation_pytorch/apis/evaluation.py | 8 +++++--- .../pose_estimation_pytorch/runners/train.py | 14 ++++++++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 1d5a658922..317fbd0545 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -690,7 +690,7 @@ def __init__(self, pred_bodyparts, p_cutoff=0.6): if self.confidence_dict.get('C_hip') < self.p_cutoff: self.parent_mapping['L_hip'] = 'root_tail' self.parent_mapping['R_hip'] = 'root_tail' - self.parent_mapping['mid_tail'] = 'root_tail' + self.parent_mapping['M_tail'] = 'root_tail' self.parent_mapping['root_tail'] = None self.dynamic_skeleton.append(('root_tail', 'C_hip')) @@ -789,7 +789,7 @@ def plot_gt_and_predictions_PFM( dot_size = base_size # Ensure dot size stays within reasonable bounds - dot_size = int(max(4, min(dot_size, 15)))*0.8 # Tighter bounds for dots + dot_size = int(max(4, min(dot_size, 15)))*0.8 # *5 for oap # # Tighter bounds for dots # filter out the non exist individuals if bounding_boxes is not None: @@ -1028,6 +1028,8 @@ def plot_gt_and_predictions_PFM( x_text = min(max(0, x_text), w - 100) y_text = min(max(0, y_text), h - 10) + # ToDo + # dynamic text position; # Avoid overlapping with existing text while any(abs(x_text - existing_x) <= 15 * scale_factor and abs(y_text - existing_y) <= 15 * scale_factor for existing_x, existing_y in existing_text_positions): @@ -1065,7 +1067,7 @@ def plot_gt_and_predictions_PFM( # plot the skeleton is the skeleton is not None connection_pairs = [] - dynamic_skeleton = skeleton.copy() + # dynamic_skeleton = skeleton.copy() # dynamic_skeleton = get_dynamic_skeleton(dynamic_skeleton, pred_bodyparts[idx_individual], p_cutoff) dynamic_skeleton = DynamicSkeleton(pred_bodyparts[idx_individual], p_cutoff).get_dynamic_skeleton() diff --git a/deeplabcut/pose_estimation_pytorch/runners/train.py b/deeplabcut/pose_estimation_pytorch/runners/train.py index d9788c2922..9e8a3b6fea 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/train.py +++ b/deeplabcut/pose_estimation_pytorch/runners/train.py @@ -241,7 +241,7 @@ def fit( line_length = max([len(name) for name in epoch_metrics.keys()]) + 2 for name, score in epoch_metrics.items(): logging.info(f" {(name + ':').ljust(line_length)}{score:6.2f}") - + def _epoch( self, loader: torch.utils.data.DataLoader, @@ -273,6 +273,12 @@ def _epoch( epoch_loss = [] loss_metrics = defaultdict(list) for i, batch in enumerate(loader): + # batch: dict_keys(['image', 'image_id', 'path', 'original_size', 'offsets', 'scales', 'annotations']) + # print("batch:", batch.keys()) + # we can get the dataset name from the path; + # path ['xxx/v8_coco/images/mbw_0_cam1_0149.jpg', 'xx/v8_coco/images/mbw_0_cam2_0060.jpg'] + # print("path", batch["path"]) + losses_dict = self.step(batch, mode) if "total_loss" in losses_dict: epoch_loss.append(losses_dict["total_loss"]) @@ -433,15 +439,15 @@ def step( if mode == "train": self.optimizer.zero_grad() - inputs = batch["image"] + inputs = batch["image"] # [B,3, 256, 256] inputs = inputs.to(self.device).float() - outputs = self.model(inputs) + outputs = self.model(inputs) # {'bodypart':{'heatmap':[B,37,64,74]}, 'locref': {B, 74, 64,64} } if self._data_parallel: underlying_model = self.model.module else: underlying_model = self.model - + # the same structure with outputs target = underlying_model.get_target(outputs, batch["annotations"]) losses_dict = underlying_model.get_loss(outputs, target) if mode == "train": From 4c463600054644d9928008aa935e80a954fa2f69 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 3 Apr 2025 16:11:00 +0200 Subject: [PATCH 76/88] add json_tools module; first function: converting and formatting JSON files with proper indentation --- common_tools/json_tools.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 common_tools/json_tools.py diff --git a/common_tools/json_tools.py b/common_tools/json_tools.py new file mode 100644 index 0000000000..e338298c8a --- /dev/null +++ b/common_tools/json_tools.py @@ -0,0 +1,29 @@ +import json +import os +from pathlib import Path + +def convert_json_indent_folder_level(source_folder_path, target_folder_path): + """ + Read JSON files and save them with proper indentation. + + Args: + source_folder_path (str): Path to the folder containing original JSON files + target_folder_path (str): Path to save the formatted JSON files + """ + # Create target folder if it doesn't exist + Path(target_folder_path).mkdir(parents=True, exist_ok=True) + + # Get all JSON files from source folder + json_files = [f for f in os.listdir(source_folder_path) if f.endswith('.json')] + print(f"Processing {len(json_files)} JSON files...") + + for json_file in json_files: + source_path = os.path.join(source_folder_path, json_file) + target_path = os.path.join(target_folder_path, json_file) + + # Read source JSON file and save with proper indentation + with open(source_path, 'r') as f: + data = json.load(f) + + with open(target_path, 'w') as f: + json.dump(data, f, indent=4) \ No newline at end of file From 583d5d95283aabe78cde718a166fde2d12db6820 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 1 May 2025 17:35:55 +0200 Subject: [PATCH 77/88] add download_from_huggingface_hub function --- deeplabcut/common_tools/download.py | 58 +++++++++++++++++++ .../common_tools}/json_tools.py | 0 2 files changed, 58 insertions(+) create mode 100644 deeplabcut/common_tools/download.py rename {common_tools => deeplabcut/common_tools}/json_tools.py (100%) diff --git a/deeplabcut/common_tools/download.py b/deeplabcut/common_tools/download.py new file mode 100644 index 0000000000..68223bf24e --- /dev/null +++ b/deeplabcut/common_tools/download.py @@ -0,0 +1,58 @@ +from huggingface_hub import hf_hub_download +import os + +def download_from_huggingface_hub(target_folder_path, repo_id, filename, subfolder=None): + """ + Download a file from the Hugging Face Hub to a specified local directory. + + Parameters: + target_folder_path (str): Local directory path where file will be saved + repo_id (str): Hugging Face repository ID (e.g., 'noahcao/sapiens-pose-coco') + filename (str): Name of the file to download + subfolder (str, optional): Path to subfolder within the repository where the file is located + + Returns: + str: Full path to the downloaded file + + Examples: + >>> # Download a model file from noahcao/sapiens-pose-coco + >>> download_from_huggingface_hub( + ... target_folder_path="./models/sapiens", + ... repo_id="noahcao/sapiens-pose-coco", + ... filename="sapiens_2b_coco_best_coco_AP_822_torchscript.pt2", + ... subfolder="sapiens_lite_host/torchscript/pose/checkpoints/sapiens_2b" + ... ) + + >>> # Download a file without specifying a subfolder + >>> download_from_huggingface_hub( + ... target_folder_path="./data", + ... repo_id="noahcao/sapiens-pose-coco", + ... filename="COCO_val2017_detections_AP_H_70_person.json", + ... subfolder="sapiens_host/pose/person_detection_results" + ... ) + """ + # Create the target directory if it does not exist + os.makedirs(target_folder_path, exist_ok=True) + + # Download the file from Hugging Face Hub + return hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=target_folder_path + ) + +# Example usage: +if __name__ == "__main__": + # Example to download the model from noahcao/sapiens-pose-coco repository + target_dir = "xxxx/sapiens/sapiens_lite_host/torchscript/pose/checkpoints/sapiens_2b" + + downloaded_file = download_from_huggingface_hub( + target_folder_path=target_dir, + repo_id="noahcao/sapiens-pose-coco", + filename="sapiens_2b_coco_best_coco_AP_822_torchscript.pt2", + subfolder="sapiens_lite_host/torchscript/pose/checkpoints/sapiens_2b" + ) + + print(f"File downloaded to: {downloaded_file}") + \ No newline at end of file diff --git a/common_tools/json_tools.py b/deeplabcut/common_tools/json_tools.py similarity index 100% rename from common_tools/json_tools.py rename to deeplabcut/common_tools/json_tools.py From c5ddbfebbd4f599bb9d282ef9c179477925849b4 Mon Sep 17 00:00:00 2001 From: ti Date: Sun, 11 May 2025 23:15:49 +0200 Subject: [PATCH 78/88] debug --- deeplabcut/pose_estimation_pytorch/data/image.py | 8 ++++++++ deeplabcut/pose_estimation_pytorch/data/utils.py | 12 +++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/data/image.py b/deeplabcut/pose_estimation_pytorch/data/image.py index 115b76beb2..3dfa7b03a4 100644 --- a/deeplabcut/pose_estimation_pytorch/data/image.py +++ b/deeplabcut/pose_estimation_pytorch/data/image.py @@ -194,6 +194,14 @@ def top_down_crop( image_h, image_w, c = image.shape out_w, out_h = output_size x, y, w, h = bbox + + # Safety check for zero dimensions + # if w <= 0: + # print(f"ERROR: Zero width in bbox {bbox}, using default width=1") + # w = 1 + # if h <= 0: + # print(f"ERROR: Zero height in bbox {bbox}, using default height=1") + # h = 1 cx = x + w / 2 cy = y + h / 2 diff --git a/deeplabcut/pose_estimation_pytorch/data/utils.py b/deeplabcut/pose_estimation_pytorch/data/utils.py index 68101913ca..5808c79fad 100644 --- a/deeplabcut/pose_estimation_pytorch/data/utils.py +++ b/deeplabcut/pose_estimation_pytorch/data/utils.py @@ -52,6 +52,11 @@ def bbox_from_keypoints( # we do not estimate bbox on keypoints that have 0 or -1 flag keypoints = np.copy(keypoints) + + # debug + if np.all(keypoints[..., -1] <= 0): + print(f"All keypoints: {keypoints}") + keypoints[keypoints[..., -1] <= 0] = np.nan if len(keypoints.shape) == 2: @@ -72,9 +77,14 @@ def bbox_from_keypoints( ) bboxes[..., 2] = bboxes[..., 2] - bboxes[..., 0] # to width bboxes[..., 3] = bboxes[..., 3] - bboxes[..., 1] # to height + + # debug + if np.array_equal(bboxes[0], np.array([0, 0, 0, 0])): + print(f"All bboxes: {bboxes}") + if squeeze: return bboxes[0] - + return bboxes From 89d33a93df08d2b9b3020056a66207d89badb8b8 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 19 Jun 2025 10:37:20 +0200 Subject: [PATCH 79/88] =?UTF-8?q?SuperAnimal=20VideoAdaptation:=20Fix=20Ch?= =?UTF-8?q?eckpoint=20Inconsistency=20Error=20=09=E2=80=A2=09Added=20a=20g?= =?UTF-8?q?et=5Fcheckpoint=5Fepoch=20function=20to=20retrieve=20the=20curr?= =?UTF-8?q?ent=20epoch=20from=20a=20PyTorch=20checkpoint.=20=09=E2=80=A2?= =?UTF-8?q?=09Updated=20checkpoint=20handling=20logic=20to=20calculate=20t?= =?UTF-8?q?he=20adapted=20checkpoint=20path.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- deeplabcut/modelzoo/video_inference.py | 50 +++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/deeplabcut/modelzoo/video_inference.py b/deeplabcut/modelzoo/video_inference.py index f28d4846ca..2c54520f69 100644 --- a/deeplabcut/modelzoo/video_inference.py +++ b/deeplabcut/modelzoo/video_inference.py @@ -12,6 +12,7 @@ import json import os +import torch from pathlib import Path from typing import Optional, Union @@ -33,6 +34,18 @@ video_to_frames, ) +def get_checkpoint_epoch(checkpoint_path): + """ + Load a PyTorch checkpoint and return the current epoch number. + + Args: + checkpoint_path (str): Path to the checkpoint file + + Returns: + int: Current epoch number, or None if not found + """ + checkpoint = torch.load(checkpoint_path) + return checkpoint['metadata']['epoch'] def video_inference_superanimal( videos: Union[str, list], @@ -418,7 +431,6 @@ def video_inference_superanimal( model_snapshot_prefix = f"snapshot-{model_name}" detector_snapshot_prefix = f"snapshot-{detector_name}" - config["runner"]["snapshot_prefix"] = model_snapshot_prefix config["detector"]["runner"]["snapshot_prefix"] = detector_snapshot_prefix @@ -427,14 +439,26 @@ def video_inference_superanimal( with open(model_config_path, "w") as f: yaml = YAML() yaml.dump(config, f) - + + # get the current epoch of the detector and pose model + current_pose_epoch = get_checkpoint_epoch(pose_model_path) + current_detector_epoch = get_checkpoint_epoch(detector_path) + adapted_detector_checkpoint = ( - model_folder / f"{detector_snapshot_prefix}-{detector_epochs:03}.pt" + model_folder / f"{detector_snapshot_prefix}-{current_detector_epoch + detector_epochs:03}.pt" ) adapted_pose_checkpoint = ( - model_folder / f"{model_snapshot_prefix}-{pose_epochs:03}.pt" + model_folder / f"{model_snapshot_prefix}-{current_pose_epoch + pose_epochs:03}.pt" ) - + if not Path(adapted_detector_checkpoint).exists(): + adapted_detector_checkpoint = ( + model_folder / f"{detector_snapshot_prefix}-best-{current_detector_epoch + detector_epochs:03}.pt" + ) + if not Path(adapted_pose_checkpoint).exists(): + adapted_pose_checkpoint = ( + model_folder / f"{model_snapshot_prefix}-best-{current_pose_epoch + pose_epochs:03}.pt" + ) + if ( adapted_detector_checkpoint.exists() and adapted_pose_checkpoint.exists() @@ -484,6 +508,22 @@ def video_inference_superanimal( detector_batch_size=video_adapt_batch_size, ) + # recheck the checkpoint paths after adaptation training + adapted_detector_checkpoint = ( + model_folder / f"{detector_snapshot_prefix}-{current_detector_epoch + detector_epochs:03}.pt" + ) + adapted_pose_checkpoint = ( + model_folder / f"{model_snapshot_prefix}-{current_pose_epoch + pose_epochs:03}.pt" + ) + if not Path(adapted_detector_checkpoint).exists(): + adapted_detector_checkpoint = ( + model_folder / f"{detector_snapshot_prefix}-best-{current_detector_epoch + detector_epochs:03}.pt" + ) + if not Path(adapted_pose_checkpoint).exists(): + adapted_pose_checkpoint = ( + model_folder / f"{model_snapshot_prefix}-best-{current_pose_epoch + pose_epochs:03}.pt" + ) + # Set the customized checkpoint paths and output_suffix = "_after_adapt" detector_path = adapted_detector_checkpoint From c64cced6668ae08a37e76e457a5e9c322871ab42 Mon Sep 17 00:00:00 2001 From: ti Date: Thu, 19 Jun 2025 10:50:02 +0200 Subject: [PATCH 80/88] Enhance get_checkpoint_epoch function to handle missing metadata gracefully. Returns 0 if 'epoch' is not found in the checkpoint, improving robustness. --- deeplabcut/modelzoo/video_inference.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/deeplabcut/modelzoo/video_inference.py b/deeplabcut/modelzoo/video_inference.py index 2c54520f69..91068e7e9b 100644 --- a/deeplabcut/modelzoo/video_inference.py +++ b/deeplabcut/modelzoo/video_inference.py @@ -45,7 +45,10 @@ def get_checkpoint_epoch(checkpoint_path): int: Current epoch number, or None if not found """ checkpoint = torch.load(checkpoint_path) - return checkpoint['metadata']['epoch'] + if 'metadata' in checkpoint and 'epoch' in checkpoint['metadata']: + return checkpoint['metadata']['epoch'] + else: + return 0 def video_inference_superanimal( videos: Union[str, list], From 634e969a78311a71cf5bd97be7df41daa7f4cf26 Mon Sep 17 00:00:00 2001 From: ti Date: Sun, 27 Jul 2025 17:34:08 +0200 Subject: [PATCH 81/88] Add import for matplotlib.patches in evaluation.py --- deeplabcut/pose_estimation_pytorch/apis/evaluation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index a1bf4530c3..4ddda1b770 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -49,6 +49,7 @@ save_labeled_frame, ) import matplotlib.pyplot as plt +import matplotlib.patches as patches from typing import Optional, Union, List, Tuple, Dict import logging From bff93afe66354bf81c42fc5f88737cb725ad86a3 Mon Sep 17 00:00:00 2001 From: Ti Wang <81274389+xiu-cs@users.noreply.github.com> Date: Mon, 4 Aug 2025 15:15:36 +0200 Subject: [PATCH 82/88] Update deeplabcut/pose_estimation_pytorch/apis/evaluation.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- deeplabcut/pose_estimation_pytorch/apis/evaluation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 3a91195bb7..80f80fe328 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -52,7 +52,6 @@ import matplotlib.patches as patches from typing import Optional, Union, List, Tuple, Dict import logging -import matplotlib.patches as patches def predict( pose_runner: InferenceRunner, From 090a0aa1ddffa55e063c34740ec8874269c8e118 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 4 Aug 2025 15:41:59 +0200 Subject: [PATCH 83/88] Fix typo in find_nearest_ancestor method name in evaluation.py --- deeplabcut/pose_estimation_pytorch/apis/evaluation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 629289342f..826757fe63 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -709,7 +709,7 @@ def change_name_to_idx_dynamic_skeleton(self, dynamic_skeleton): return dynamic_skeleton - def find_nearest_ancester(self, node): + def find_nearest_ancestor(self, node): current_node = self.parent_mapping.get(node) while current_node is not None: current_node_conf = self.confidence_dict[current_node] @@ -724,9 +724,9 @@ def get_dynamic_skeleton(self): for keypoint in self.parent_mapping.keys(): keypoint_conf = self.confidence_dict[keypoint] if keypoint_conf > self.p_cutoff: - ancester = self.find_nearest_ancester(keypoint) - if ancester is not None: - self.dynamic_skeleton.append((ancester, keypoint)) + ancestor = self.find_nearest_ancestor(keypoint) + if ancestor is not None: + self.dynamic_skeleton.append((ancestor, keypoint)) # add connection between C_hip and neck if self.confidence_dict.get('C_hip') > self.p_cutoff and self.confidence_dict.get('neck') > self.p_cutoff: From 3dfa7aa65a49425058103abf785adeca68393858 Mon Sep 17 00:00:00 2001 From: Ti Wang <81274389+xiu-cs@users.noreply.github.com> Date: Mon, 4 Aug 2025 15:44:29 +0200 Subject: [PATCH 84/88] Update deeplabcut/pose_estimation_pytorch/apis/evaluation.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- deeplabcut/pose_estimation_pytorch/apis/evaluation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 826757fe63..ec4f0015c7 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -520,7 +520,7 @@ def visualize_predictions_PFM( # print("bbox_scores:", bbox_scores) bounding_boxes = ( (bboxes, bbox_scores) - if bbox_scores is not None and bbox_scores is not None + if bbox_scores is not None and bboxes is not None else None ) else: From e6cdf493308b69712c89a4b9596c97a9a20f39de Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 4 Aug 2025 15:55:11 +0200 Subject: [PATCH 85/88] Refactor change_name_to_idx_dynamic_skeleton method in evaluation.py Updated the method to convert dynamic skeleton keypoint names to numerical indices directly from the class attribute, improving clarity and functionality. Added detailed docstring to explain the method's purpose and usage. --- .../apis/evaluation.py | 40 +++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index ec4f0015c7..9099642e25 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -700,13 +700,39 @@ def __init__(self, pred_bodyparts, p_cutoff=0.6): self.parent_mapping['root_tail'] = None self.dynamic_skeleton.append(('root_tail', 'C_hip')) - def change_name_to_idx_dynamic_skeleton(self, dynamic_skeleton): - # change the dynamic skeleton index to the new index; - dynamic_skeleton = [] + def change_name_to_idx_dynamic_skeleton(self): + """ + Convert dynamic skeleton from keypoint names to numerical indices. + + This function takes the dynamic skeleton stored in self.dynamic_skeleton, + which contains tuples of keypoint names (e.g., ('head', 'neck')), + and converts them to tuples of numerical indices based on the position + of each keypoint name in self.keypoints list. + + Returns: + list: A list of tuples where each tuple contains (from_idx, to_idx) + representing the indices of connected keypoints in the skeleton. + + Example: + If self.keypoints = ['head', 'neck', 'shoulder'] and + self.dynamic_skeleton = [('head', 'neck'), ('neck', 'shoulder')] + Returns: [(0, 1), (1, 2)] + """ + # Initialize empty list to store converted skeleton indices + result = [] + + # Iterate through each connection in the dynamic skeleton for idx, (from_node, end_node) in enumerate(self.dynamic_skeleton): - # print((self.keypoints.index(from_node), self.keypoints.index(end_node))) - dynamic_skeleton.append((self.keypoints.index(from_node), self.keypoints.index(end_node))) - return dynamic_skeleton + # Convert keypoint names to their corresponding indices in self.keypoints + # from_node: the starting keypoint of the connection + # end_node: the ending keypoint of the connection + from_idx = self.keypoints.index(from_node) + end_idx = self.keypoints.index(end_node) + + # Add the converted indices as a tuple to the result list + result.append((from_idx, end_idx)) + + return result def find_nearest_ancestor(self, node): @@ -735,7 +761,7 @@ def get_dynamic_skeleton(self): elif self.confidence_dict.get('C_hip') < self.p_cutoff and self.confidence_dict.get('neck') > self.p_cutoff and self.confidence_dict.get('root_tail') > self.p_cutoff: self.dynamic_skeleton.append(('root_tail', 'neck')) - return self.change_name_to_idx_dynamic_skeleton(self.dynamic_skeleton) + return self.change_name_to_idx_dynamic_skeleton() def plot_gt_and_predictions_PFM( image_path: Union[str, Path], From 9b06f41419c33736e3a133c63d6b33c710c89257 Mon Sep 17 00:00:00 2001 From: Ti Wang <81274389+xiu-cs@users.noreply.github.com> Date: Mon, 4 Aug 2025 15:58:32 +0200 Subject: [PATCH 86/88] Update .circleci/data Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .circleci/data | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/data b/.circleci/data index 76bfa6a319..b8e5c68cee 120000 --- a/.circleci/data +++ b/.circleci/data @@ -1 +1 @@ -/home/ti_wang/data \ No newline at end of file +${DATA_DIR:-./data} \ No newline at end of file From 8fcc167a6fae25d97fc5c93a5f335a2253a15ffb Mon Sep 17 00:00:00 2001 From: Ti Wang <81274389+xiu-cs@users.noreply.github.com> Date: Mon, 4 Aug 2025 15:58:58 +0200 Subject: [PATCH 87/88] Update deeplabcut/pose_estimation_pytorch/runners/train.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- deeplabcut/pose_estimation_pytorch/runners/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeplabcut/pose_estimation_pytorch/runners/train.py b/deeplabcut/pose_estimation_pytorch/runners/train.py index 0d45443e36..80249a65e7 100644 --- a/deeplabcut/pose_estimation_pytorch/runners/train.py +++ b/deeplabcut/pose_estimation_pytorch/runners/train.py @@ -439,7 +439,7 @@ def step( if mode == "train": self.optimizer.zero_grad() - inputs = batch["image"] # [B,3, 256, 256] + inputs = batch["image"] # [batch_size, channels, height, width] inputs = inputs.to(self.device).float() if 'cond_keypoints' in batch['context']: cond_kpts = batch['context']['cond_keypoints'] From 29393a6edcb4c26a24951fbc0551e9cb1fb25014 Mon Sep 17 00:00:00 2001 From: ti Date: Mon, 4 Aug 2025 15:59:23 +0200 Subject: [PATCH 88/88] Refactor change_name_to_idx_dynamic_skeleton method in evaluation.py Simplified the method's docstring to enhance clarity while maintaining its functionality. Removed unnecessary comments to streamline the code. --- .../apis/evaluation.py | 24 +------------------ 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py index 9099642e25..a02410f7e2 100755 --- a/deeplabcut/pose_estimation_pytorch/apis/evaluation.py +++ b/deeplabcut/pose_estimation_pytorch/apis/evaluation.py @@ -701,37 +701,15 @@ def __init__(self, pred_bodyparts, p_cutoff=0.6): self.dynamic_skeleton.append(('root_tail', 'C_hip')) def change_name_to_idx_dynamic_skeleton(self): - """ - Convert dynamic skeleton from keypoint names to numerical indices. - - This function takes the dynamic skeleton stored in self.dynamic_skeleton, - which contains tuples of keypoint names (e.g., ('head', 'neck')), - and converts them to tuples of numerical indices based on the position - of each keypoint name in self.keypoints list. - - Returns: - list: A list of tuples where each tuple contains (from_idx, to_idx) - representing the indices of connected keypoints in the skeleton. - - Example: - If self.keypoints = ['head', 'neck', 'shoulder'] and - self.dynamic_skeleton = [('head', 'neck'), ('neck', 'shoulder')] - Returns: [(0, 1), (1, 2)] - """ - # Initialize empty list to store converted skeleton indices + """Convert skeleton from keypoint names to indices.""" result = [] - - # Iterate through each connection in the dynamic skeleton for idx, (from_node, end_node) in enumerate(self.dynamic_skeleton): # Convert keypoint names to their corresponding indices in self.keypoints # from_node: the starting keypoint of the connection # end_node: the ending keypoint of the connection from_idx = self.keypoints.index(from_node) end_idx = self.keypoints.index(end_node) - - # Add the converted indices as a tuple to the result list result.append((from_idx, end_idx)) - return result