From 40f0cdecbc7d229b37f4f42e5bed6dab6b5003ee Mon Sep 17 00:00:00 2001 From: Geonhyeong Kim Date: Wed, 29 Oct 2025 21:19:27 +0900 Subject: [PATCH 01/20] feat: analyze_imagenet_hpo --- experiments/scripts/analyze_imagenet_hpo.py | 340 ++++++++++++++++++++ experiments/utils/helpers.py | 99 ++++++ 2 files changed, 439 insertions(+) create mode 100644 experiments/scripts/analyze_imagenet_hpo.py diff --git a/experiments/scripts/analyze_imagenet_hpo.py b/experiments/scripts/analyze_imagenet_hpo.py new file mode 100644 index 0000000..ae927cd --- /dev/null +++ b/experiments/scripts/analyze_imagenet_hpo.py @@ -0,0 +1,340 @@ +from collections import defaultdict +import argparse +import os + +from torchvision.transforms import InterpolationMode, Resize +import captum +import torch +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec + +from pnpxai.core.modality.modality import ImageModality +from pnpxai.explainers import LRPUniformEpsilon, IntegratedGradients, KernelShap +from pnpxai.explainers.utils.feature_masks import FeatureMaskFunction +from pnpxai.explainers.utils.function_selectors import FunctionSelector +from pnpxai.explainers.utils.postprocess import PostProcessor +from pnpxai.evaluator.metrics import MoRF, LeRF, AbPC +from pnpxai.evaluator.optimizer import Objective, optimize + +from experiments.utils.helpers import ( + set_seed, + get_torchvision_model, + get_imagenet_sample_from_hf, + denormalize_image, + save_pickle_data, + load_pickle_data, +) + + +# plot settings +plt.rcParams['font.family'] = 'Times New Roman' + + +# configs +TARGET_EXPLAINERS = { + 'lrpe': { + 'pnpxai': LRPUniformEpsilon, + 'captum': captum.attr.LRP, + 'dname': r'LRP-Uniform$\varepsilon$', + }, + 'ig': { + 'pnpxai': IntegratedGradients, + 'captum': captum.attr.IntegratedGradients, + 'dname': 'Integrated Gradients' + }, + 'ks': { + 'pnpxai': KernelShap, + 'captum': captum.attr.KernelShap, + 'dname': 'KernelSHAP', + } +} + +TARGET_METRICS = { + 'morf': { + 'cls': MoRF, + 'dname': r'MoRF$\downarrow$', + }, + 'lerf': { + 'cls': LeRF, + 'dname': r'LeRF$\uparrow$', + }, + 'abpc': { + 'cls': AbPC, + 'dname': r'AbPC$\uparrow$', + }, +} + + +# a custom feature mask function +class Checkerboard(FeatureMaskFunction): + def __init__( + self, + size=[20, 20], + ): + assert len(size) == 2 + self.size = size + self._n_checkers = size[0] * size[1] + + def __call__(self, inputs: torch.Tensor): + assert inputs.dim() == 4 + + bsz, c, h, w = inputs.size() + # print(input_size) + + resize = Resize([h, w], interpolation=InterpolationMode.NEAREST) + + patch_masks = [] + for i in range(self._n_checkers): + mask = np.zeros(self._n_checkers) + mask[i] = i + mask = resize( + torch.Tensor(mask).reshape(- + 1,self.size[0], self.size[1])).unsqueeze(1) + patch_masks.append(mask.numpy()) + return torch.from_numpy(sum(patch_masks)).squeeze(1).repeat( + bsz, 1, 1).long().to(inputs.device) + + +def analyze(args, fp_data): + """ + Performs HPO and saves raw data for a single data instance. + """ + + # Setup + set_seed(args.seed) + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + model, transform = get_torchvision_model('resnet18') + model = model.to(device) + model.eval() + + img, label = get_imagenet_sample_from_hf( + transform, + hf_repo_id="geonhyeongkim/imagenet-samples-for-pnpxai-experiments", + indices=[args.data_id], + ) + imgs = img.unsqueeze(0).to(device) + targets = torch.tensor(args.data_id).unsqueeze(0).to(device) + + # Get explanations and evaluations + fm_selector = FunctionSelector( + {'checkerboard': Checkerboard}) + modality = ImageModality( # set data modality + channel_dim=1, + feature_mask_fn_selector=fm_selector, + ) + + plot_data = { # container for collecting plot data + 'img': denormalize_image(img, mean=transform.mean, std=transform.std), + 'label': label, + 'heatmaps': defaultdict(dict), + 'values': defaultdict(lambda: defaultdict(dict)), + } + for explainer_key in TARGET_EXPLAINERS: + # --- pnpxai ----------------------------------------------------------- + + # Create explainer + pnp_explainer = TARGET_EXPLAINERS[explainer_key]['pnpxai'](model) + metric = TARGET_METRICS['abpc']['cls']( + model=model, explainer=pnp_explainer, + ) # metric to be used as objective: AbPC + + # Get default postprocessor to initialize optimization: (SumPos, MinMax) + default_pp = modality.get_default_postprocessors()[0] + + # Optimize explainer + obj = Objective( + explainer=pnp_explainer, + postprocessor=default_pp, + metric=metric, + modality=modality, + inputs=imgs, + targets=targets, + ) + study = optimize( + obj, + direction='maximize', + n_trials=args.n_trials, + sampler='tpe', + seed=args.seed, + ) + opt_explainer, opt_pp = study.best_trial.user_attrs.values() + + # Get explanation of the optimized explainer + opt_attrs = opt_explainer.attribute(imgs, targets) + opt_attrs_pp = opt_pp(opt_attrs) + plot_data['heatmaps']['pnpxai'][explainer_key] = ( + opt_attrs_pp.squeeze().detach().cpu().numpy()) + + + # --- captum ----------------------------------------------------------- + + # Create explainer + captum_kwargs = {} + if explainer_key == 'ks': + # Use same feature mask with pnpxai + captum_kwargs['feature_mask'] = Checkerboard()(imgs) + captum_explainer = TARGET_EXPLAINERS[explainer_key]['captum'](model) + + # Get explanation of captum explainer + captum_attrs = captum_explainer.attribute( + inputs=imgs, target=targets, **captum_kwargs) + captum_attrs_pp = default_pp(captum_attrs) + plot_data['heatmaps']['captum'][explainer_key] = ( + captum_attrs_pp.squeeze().detach().cpu().numpy()) + + + # --- evaluation ------------------------------------------------------- + + for metric_key in TARGET_METRICS: + # Create metric and evaluate + pnp_value = TARGET_METRICS[metric_key]['cls']( + model=model, explainer=pnp_explainer).evaluate( + inputs=imgs, targets=targets, attributions=opt_attrs_pp).item() + plot_data['values']['pnpxai'][explainer_key][metric_key] = pnp_value + captum_value = TARGET_METRICS[metric_key]['cls']( + model=model, explainer=captum_explainer).evaluate( + inputs=imgs, targets=targets, attributions=captum_attrs_pp).item() + plot_data['values']['captum'][explainer_key][metric_key] = captum_value + + # Save the data + os.makedirs(os.path.dirname(fp_data), exist_ok=True) + save_pickle_data(data=plot_data, filepath=fp_data) + + +DEFAULT_SELECTED_SAMPLE_INDICES = [ + 75, 358, 367, 852, +] + + +def visualize(args, fp_data, fp_fig): + """ + Loads saved data for a single instance and generates visualization. + """ + # Load the data + plot_data = load_pickle_data(fp_data) + + # Set layout + edge_size = 2.5 + fig = plt.figure(figsize=(edge_size*3, edge_size*3)) + outer = gridspec.GridSpec( + 2, 1, + height_ratios=[1.0, 2.4], + ) + row1 = gridspec.GridSpecFromSubplotSpec( + 1, 3, + subplot_spec=outer[0], + ) + ax_img = fig.add_subplot(row1[0, 0]) + ax_bar = fig.add_subplot(row1[0, 1:3]) + row23 = gridspec.GridSpecFromSubplotSpec( + 2, 3, + subplot_spec=outer[1], + hspace=0.04, + wspace=0.04 + ) + axes_heatmaps = [[ + fig.add_subplot(row23[i, j]) for j in range(3)] for i in range(2)] + + # Plot sample img + ax_img.imshow(plot_data['img']) + ax_img.set_xticks([]); ax_img.set_yticks([]) + ax_img.set_aspect('equal') + ax_img.set_title(plot_data['label'].replace('_', ' ').title(), fontsize=15) + + # Plot differences in evaluations between pnpxai and captum + bar_data = defaultdict(list) + for explainer_key in TARGET_EXPLAINERS: + for metric_key in TARGET_METRICS: + diff = ( + plot_data['values']['pnpxai'][explainer_key][metric_key] + - plot_data['values']['captum'][explainer_key][metric_key] + ) + bar_data[explainer_key].append(diff) + + x = np.arange(len(TARGET_METRICS)) + width = 0.25 + for i, (explainer_key, evals) in enumerate(bar_data.items()): + ax_bar.bar( + x + i*width, evals, width, + label=TARGET_EXPLAINERS[explainer_key]['dname']) + + ax_bar.set_ylim(-.2, .8) + ax_bar.set_title('PnPXAI - Captum', fontsize=15) + ax_bar.set_xticks( + x + width, + [TARGET_METRICS[nm]['dname'] for nm in TARGET_METRICS], + fontsize=10, + ) + ax_bar.grid(axis='y') + ax_bar.margins(x=0.01) + + # Plot heatmaps + for c, explainer_key in enumerate(TARGET_EXPLAINERS): + alpha = 1. + if explainer_key == 'ks': + axes_heatmaps[0][c].imshow(plot_data['img']) + axes_heatmaps[1][c].imshow(plot_data['img']) + alpha *= .75 + + axes_heatmaps[0][c].imshow( + plot_data['heatmaps']['captum'][explainer_key], + cmap='Reds', alpha=alpha, + ) + axes_heatmaps[1][c].imshow( + plot_data['heatmaps']['pnpxai'][explainer_key], + cmap='Reds', alpha=alpha, + ) + + axes_heatmaps[0][c].set_title( + TARGET_EXPLAINERS[explainer_key]['dname'], fontsize=15) + if c == 0: + axes_heatmaps[0][c].set_ylabel('Captum', fontsize=15) + axes_heatmaps[1][c].set_ylabel( + 'PnPXAI (Ours)', fontsize=15, fontweight='bold') + + axes_heatmaps[0][c].set_xticks([]); axes_heatmaps[0][c].set_yticks([]) + axes_heatmaps[1][c].set_xticks([]); axes_heatmaps[1][c].set_yticks([]) + axes_heatmaps[0][c].set_aspect('equal') + axes_heatmaps[1][c].set_aspect('equal') + + # Save figure + fig.legend( + loc='upper center', ncols=3, + bbox_to_anchor=(.5, 1.0), frameon=False, fontsize=12) + os.makedirs(os.path.dirname(fp_fig), exist_ok=True) + fig.savefig(fp_fig, bbox_inches='tight', pad_inches=0.02, dpi=300) + print(f"Visualization saved for Data ID {args.data_id}: {fp_fig}") + + +def main(): + """Main execution function""" + # Arguments + parser = argparse.ArgumentParser() + parser.add_argument('--data_id', type=int, required=True, help='The specific ID of the data instance to process.') + parser.add_argument('--save_dir', type=str, default='results/analyze_imagenet_hpo/') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--n_trials', default=100, type=int) + parser.add_argument('--analyze', action='store_true') + parser.add_argument('--visualize', action='store_true') + args = parser.parse_args() + + # Set result filepaths + os.makedirs(args.save_dir, exist_ok=True) + fp_data = os.path.join(args.save_dir, 'raw', f'{args.data_id}.pkl') + fp_fig = os.path.join(args.save_dir, 'figures', f'{args.data_id}.pdf') + + # Run experiment + if args.analyze: + analyze(args, fp_data) + if args.visualize: + if not os.path.exists(fp_data): + raise Exception( + f'{fp_data} not found. Try again with the following flag: --analyze') + visualize(args, fp_data, fp_fig) + + +if __name__ == '__main__': + main() + diff --git a/experiments/utils/helpers.py b/experiments/utils/helpers.py index 6c93aa0..89ae53c 100644 --- a/experiments/utils/helpers.py +++ b/experiments/utils/helpers.py @@ -335,6 +335,105 @@ def get_livertumor_dataset_from_hf( return dataset +def get_imagenet_sample_from_hf( + transform, + hf_repo_id: str = "geonhyeongkim/imagenet-samples-for-pnpxai-experiments", + indices: Optional[List[int]] = None, + data_root: str = "./data", + cache_dir: Optional[str] = None, +): + """ + Downloads only necessary files (metadata + images for requested indices) + from Hugging Face Hub using hf_hub_download and creates a PyTorch Dataset. + + Args: + transform: Torchvision transforms to apply to the image-like data. + hf_repo_id (str): Repository ID of the dataset on Hugging Face Hub. + indices (Optional[List[int]]): Absolute indices to select/download. + data_root (str): The root directory within the project to store datasets. + cache_dir (Optional[str]): Path to HF cache (used for intermediate downloads). + + Returns: + A PyTorch Dataset containing only the data for the requested indices. + """ + if indices is None: + print("Warning: No indices provided. Attempting to load metadata only, but image loading might fail later or be inefficient.") + + dataset_local_dir = os.path.join(data_root, hf_repo_id.replace("/", "_")) + os.makedirs(dataset_local_dir, exist_ok=True) + + print(f"Downloading metadata for '{hf_repo_id}' from Hugging Face Hub...") + try: + # 1. Download metadata.jsonl only + metadata_local_path = hf_hub_download( + repo_id=hf_repo_id, + filename="imagenet_class_index.json", + repo_type="dataset", + local_dir=dataset_local_dir, + local_dir_use_symlinks=True, + cache_dir=cache_dir, + ) + base_download_dir = dataset_local_dir + print(f"Metadata available at: {metadata_local_path}") + print(f"Base download/cache directory: {base_download_dir}") + + except Exception as e: + print(f"Failed to download imagenet_class_index.json from Hugging Face Hub: {e}") + raise e + + # 2. Read metadata and filter for requested indices + filtered_metadata = {} + required_image_paths = set() # Use set to avoid duplicate downloads + all_metadata = {} + try: + with open(metadata_local_path, 'r') as f: + all_metadata = json.load(f) + + if indices is not None: + num_total = len(all_metadata) + for idx in indices: + if 0 <= idx < num_total: + metadata = all_metadata[str(idx)] + filtered_metadata[idx] = metadata + required_image_paths.add(f'samples/{"_".join(metadata)}.JPEG') + else: + print(f"Warning: Requested index {idx} is out of range (0-{num_total-1}). Skipping.") + else: + print("Warning: Loading without specific indices. Using all metadata entries.") + filtered_metadata = {int(k): all_metadata[k] for k in all_metadata} # Less efficient if not all images are needed later + + except Exception as e: + print(f"Error reading or processing metadata file {metadata_local_path}: {e}") + raise e + + if not filtered_metadata: + raise ValueError("No valid metadata found for the requested indices.") + + # 3. Download only the required images + print(f"Downloading {len(required_image_paths)} required image files (if not cached)...") + for img_rel_path in tqdm(list(required_image_paths), desc="Downloading images"): + try: + # hf_hub_download will download to the cache or find existing file + hf_hub_download( + repo_id=hf_repo_id, + filename=img_rel_path, + repo_type="dataset", + local_dir=dataset_local_dir, + local_dir_use_symlinks=True, + cache_dir=cache_dir, + ) + except Exception as e: + print(f"Warning: Failed to download image file {img_rel_path}: {e}") + + print("Required image files downloaded/cached.") + + # 4. Create and return the Dataset using filtered metadata and base download dir + fp_img = os.path.join(base_download_dir, list(required_image_paths)[0]) + img = transform(Image.open(fp_img).convert('RGB')) + label = all_metadata[str(indices[0])][-1] + return img, label + + # models def get_torchvision_model(model_name): weights = torchvision.models.get_model_weights(model_name).DEFAULT From 2e3d449284f1f685817907f61a94027142978b24 Mon Sep 17 00:00:00 2001 From: Geonhyeong Kim Date: Fri, 31 Oct 2025 00:08:53 +0900 Subject: [PATCH 02/20] increase n_samples for ks --- experiments/scripts/analyze_imagenet_hpo.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/experiments/scripts/analyze_imagenet_hpo.py b/experiments/scripts/analyze_imagenet_hpo.py index ae927cd..0fc9f97 100644 --- a/experiments/scripts/analyze_imagenet_hpo.py +++ b/experiments/scripts/analyze_imagenet_hpo.py @@ -162,6 +162,8 @@ def analyze(args, fp_data): opt_explainer, opt_pp = study.best_trial.user_attrs.values() # Get explanation of the optimized explainer + if explainer_key == 'ks': + setattr(opt_explainer, 'n_samples', 300) opt_attrs = opt_explainer.attribute(imgs, targets) opt_attrs_pp = opt_pp(opt_attrs) plot_data['heatmaps']['pnpxai'][explainer_key] = ( @@ -175,6 +177,7 @@ def analyze(args, fp_data): if explainer_key == 'ks': # Use same feature mask with pnpxai captum_kwargs['feature_mask'] = Checkerboard()(imgs) + captum_kwargs['n_samples'] = 300 captum_explainer = TARGET_EXPLAINERS[explainer_key]['captum'](model) # Get explanation of captum explainer From f20b9dd85741579c3756a3ade5c65dc39b6b0984 Mon Sep 17 00:00:00 2001 From: seongun-kim Date: Fri, 31 Oct 2025 17:04:53 +0900 Subject: [PATCH 03/20] refactor: Split monolithic helpers.py into modular files --- experiments/scripts/analyze_livertumor_hpo.py | 2 +- experiments/utils/__init__.py | 56 +++ experiments/utils/{helpers.py => datasets.py} | 277 +----------- experiments/utils/models.py | 117 +++++ experiments/utils/utils.py | 427 ++++++++++++++++++ 5 files changed, 603 insertions(+), 276 deletions(-) create mode 100644 experiments/utils/__init__.py rename experiments/utils/{helpers.py => datasets.py} (61%) create mode 100644 experiments/utils/models.py create mode 100644 experiments/utils/utils.py diff --git a/experiments/scripts/analyze_livertumor_hpo.py b/experiments/scripts/analyze_livertumor_hpo.py index 2b39313..4f366d3 100644 --- a/experiments/scripts/analyze_livertumor_hpo.py +++ b/experiments/scripts/analyze_livertumor_hpo.py @@ -22,7 +22,7 @@ from pnpxai import AutoExplanationForImageClassification from experiments.metrics import RelevanceAccuracy -from experiments.utils.helpers import ( +from experiments.utils import ( set_seed, get_livertumor_dataset, get_livertumor_dataset_from_hf, get_livertumor_model, get_livertumor_model_from_hf, diff --git a/experiments/utils/__init__.py b/experiments/utils/__init__.py new file mode 100644 index 0000000..a71183f --- /dev/null +++ b/experiments/utils/__init__.py @@ -0,0 +1,56 @@ +from .utils import ( + set_seed, + set_params, + patch_lrp_explainer, + save_params_csv, + save_pickle_data, + load_pickle_data, + img_to_np, + denormalize_image, + load_model_and_dataloader_for_tutorial +) + +from .datasets import ( + ImageNetDataset, + get_imagenet_dataset, + ImageNetValDataset, + get_imagenet_val_dataset, + IMDBDataset, + get_imdb_dataset, + VQADataset, + get_vqa_dataset, + get_livertumor_dataset, + get_livertumor_dataset_from_hf +) + +from .models import ( + get_torchvision_model, + get_livertumor_model, + get_livertumor_model_from_hf, + Bert, + get_bert_model, + get_bert_tokenizer, + bert_collate_fn, + Vilt, + get_vilt_model, + get_vilt_processor, + vilt_collate_fn, +) + +__all__ = [ + # utils + 'set_seed', 'set_params', 'patch_lrp_explainer', 'save_params_csv', 'save_pickle_data', 'load_pickle_data', + 'img_to_np', 'denormalize_image', 'load_model_and_dataloader_for_tutorial', + + # datasets + 'ImageNetDataset', 'get_imagenet_dataset', + 'ImageNetValDataset', 'get_imagenet_val_dataset', + 'IMDBDataset', 'get_imdb_dataset', + 'VQADataset', 'get_vqa_dataset', + 'get_livertumor_dataset', 'get_livertumor_dataset_from_hf', + + # models + 'get_torchvision_model', 'get_livertumor_model', 'get_livertumor_model_from_hf', + 'Bert', 'get_bert_model', 'bert_collate_fn', 'get_bert_tokenizer', + 'Vilt', 'get_vilt_model', 'get_vilt_processor', 'vilt_collate_fn', +] \ No newline at end of file diff --git a/experiments/utils/helpers.py b/experiments/utils/datasets.py similarity index 61% rename from experiments/utils/helpers.py rename to experiments/utils/datasets.py index 89ae53c..6a9ffdd 100644 --- a/experiments/utils/helpers.py +++ b/experiments/utils/datasets.py @@ -1,35 +1,20 @@ from typing import Optional, List import os import json -import pickle -import dill import requests -import functools from tqdm import tqdm -from collections import OrderedDict from io import BytesIO +from PIL import Image from pathlib import Path from urllib3 import disable_warnings from urllib3.exceptions import InsecureRequestWarning -import torch -import torchvision -from torch import Tensor from torch.utils.data import Dataset, Subset, DataLoader from torchvision import transforms -from transformers import BertTokenizer, BertForSequenceClassification -from transformers import ViltForQuestionAnswering, ViltProcessor -# from datasets import load_dataset from huggingface_hub import hf_hub_download -from experiments.models import ResNet50LiverTumor from experiments.datasets import LiverTumorDataset, LiverTumorDatasetHf -from PIL import Image - -import pdb - -# datasets class ImageNetDataset(Dataset): def __init__(self, root_dir, transform=None): @@ -116,64 +101,6 @@ def get_imagenet_val_dataset(transform, root_dir): return dataset -def save_params_csv(params, filepath='params.csv'): - """Saves the best parameters to a CSV file after optimization. - - Args: - params (dict): A dictionary containing the parameter names and values. - filepath (str, optional): The path to the CSV file. Defaults to 'params.csv'. - """ - try: - with open(filepath, 'w', newline='') as csvfile: - writer = csv.writer(csvfile) - # Write header row - writer.writerow(['parameter', 'value']) - # Write parameter name and value rows - for key, value in params.items(): - writer.writerow([key, value]) - print(f"Best parameters saved to: {filepath}") - except Exception as e: - print(f"Error saving best parameters to CSV: {e}") - - -def save_pickle_data(data, filepath='data.pkl'): - """Saves the data to a pickle file. - - Args: - data: A data variable to pickle. - filepath (str, optional): The path to the pickle file. Defaults to 'data.pkl'. - """ - try: - with open(filepath, 'wb') as f: - dill.dump(data, f) - print(f"Data saved to: {filepath}") - except Exception as e: - print(f"Error saving data to pickle file: {e}") - - -def load_pickle_data(filepath='data.pkl'): - """Loads the data from a pickle file. - - Args: - filepath (str, optional): The path to the pickle file. Defaults to 'data.pkl'. - - Returns: - Any: The loaded data, or None if the file is not found or an error occurs. - """ - try: - if os.path.exists(filepath): - with open(filepath, 'rb') as f: - data = dill.load(f) - print(f"data loaded from: {filepath}") - return data - else: - print(f"Warning: Pickle file not found at {filepath}") - return None - except Exception as e: - print(f"Error loading data from pickle file: {e}") - return None - - class IMDBDataset(Dataset): def __init__(self, split='test'): super().__init__() @@ -431,204 +358,4 @@ def get_imagenet_sample_from_hf( fp_img = os.path.join(base_download_dir, list(required_image_paths)[0]) img = transform(Image.open(fp_img).convert('RGB')) label = all_metadata[str(indices[0])][-1] - return img, label - - -# models -def get_torchvision_model(model_name): - weights = torchvision.models.get_model_weights(model_name).DEFAULT - model = torchvision.models.get_model(model_name, weights=weights).eval() - transform = weights.transforms() - return model, transform - - -def get_livertumor_model(model_path): - model = ResNet50LiverTumor(in_channels=1, num_classes=2) - checkpoint = torch.load(model_path) - - state_dict = {k.replace('model.', '', 1).replace('module.', '', 1): v for k, v in checkpoint.items()} - model.load_state_dict(state_dict, strict=True) - model.eval() - - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Resize((224, 224), antialias=False), - ]) - - return model, transform - - -def get_livertumor_model_from_hf(repo_id="seongun/resnet50-livertumor"): - model = ResNet50LiverTumor.from_pretrained(repo_id) - model.eval() - - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Resize((224, 224), antialias=False), - ]) - - return model, transform - - -class Bert(BertForSequenceClassification): - def forward(self, input_ids, token_type_ids, attention_mask): - return super().forward( - input_ids=input_ids, - token_type_ids=token_type_ids, - attention_mask=attention_mask - ).logits - - -def get_bert_model(model_name, num_labels): - return Bert.from_pretrained(model_name, num_labels=num_labels) - - -class Vilt(ViltForQuestionAnswering): - def forward( - self, - pixel_values, - input_ids, - token_type_ids, - attention_mask, - pixel_mask, - ): - return super().forward( - input_ids=input_ids, - token_type_ids=token_type_ids, - attention_mask=attention_mask, - pixel_values=pixel_values, - pixel_mask=pixel_mask, - ).logits - - -def get_vilt_model(model_name): - return Vilt.from_pretrained(model_name) - - -# utils -def img_to_np(img): return img.permute(1, 2, 0).detach().numpy() - - -def denormalize_image(inputs, mean, std): - return img_to_np( - inputs - * Tensor(std)[:, None, None] - + Tensor(mean)[:, None, None] - ) - - -def bert_collate_fn(batch, tokenizer=None): - inputs = tokenizer( - [d[0] for d in batch], - padding=True, - truncation=True, - return_tensors='pt', - ) - labels = torch.tensor([d[1] for d in batch]) - return tuple(inputs.values()), labels - - -def get_bert_tokenizer(model_name): - return BertTokenizer.from_pretrained(model_name) - - -def get_vilt_processor(model_name): - return ViltProcessor.from_pretrained(model_name) - - -def vilt_collate_fn(batch, processor=None, label2id=None): - imgs = [d[0] for d in batch] - qsts = [d[1] for d in batch] - inputs = processor( - images=imgs, - text=qsts, - padding=True, - truncation=True, - return_tensors='pt', - ) - labels = torch.tensor([label2id[d[2]] for d in batch]) - return ( - inputs['pixel_values'], - inputs['input_ids'], - inputs['token_type_ids'], - inputs['attention_mask'], - inputs['pixel_mask'], - labels, - ) - - -def load_model_and_dataloader_for_tutorial(modality, device): - if modality == 'image': - model, transform = get_torchvision_model('resnet18') - model = model.to(device) - model.eval() - dataset = get_imagenet_dataset(transform) - loader = DataLoader(dataset, batch_size=8, shuffle=False) - return model, loader, transform - elif modality == 'text': - model = get_bert_model( - 'fabriceyhc/bert-base-uncased-imdb', num_labels=2) - model = model.to(device) - model.eval() - dataset = get_imdb_dataset(split='test') - tokenizer = get_bert_tokenizer('fabriceyhc/bert-base-uncased-imdb') - loader = DataLoader( - dataset, - batch_size=8, - shuffle=False, - collate_fn=functools.partial(bert_collate_fn, tokenizer=tokenizer) - ) - return model, loader, tokenizer - elif modality == ('image', 'text'): - model = get_vilt_model('dandelin/vilt-b32-finetuned-vqa') - model.to(device) - model.eval() - dataset = get_vqa_dataset() - processor = get_vilt_processor('dandelin/vilt-b32-finetuned-vqa') - loader = DataLoader( - dataset, - batch_size=2, - shuffle=False, - collate_fn=functools.partial( - vilt_collate_fn, - processor=processor, - label2id=model.config.label2id, - ), - ) - return model, loader, processor - - -import random -import numpy as np - -def set_seed(seed): - """Sets the seed for various random number generators and CUDA settings for reproducibility.""" - seed = int(seed) - - # 1. Basic random libraries - random.seed(seed) - np.random.seed(seed) - - # 2. PyTorch - torch.manual_seed(seed) - - # 3. CUDA (GPU) related - if torch.cuda.is_available(): - torch.cuda.manual_seed(seed) # Set seed for the current GPU - torch.cuda.manual_seed_all(seed) # Set seed for *all* GPUs (important for multi-GPU setups) - - # Ensure deterministic algorithms are used for CUDA operations - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - # (Optional but recommended for strict reproducibility) - # Force PyTorch to use deterministic algorithms (might impact performance) - # Note: This might require PyTorch 1.7+ and setting an environment variable - # before running the script (e.g., export CUBLAS_WORKSPACE_CONFIG=:4096:8) - # Alternatively, try setting it directly (requires PyTorch 1.8+): - torch.use_deterministic_algorithms(True, warn_only=True) - - # (Optional: Set environment variable directly in script - might not always work depending on when CUDA context is initialized) - # os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - - print(f"Set seed to {seed} for random, numpy, and torch (including CUDA if available).") \ No newline at end of file + return img, label \ No newline at end of file diff --git a/experiments/utils/models.py b/experiments/utils/models.py new file mode 100644 index 0000000..8bc5f07 --- /dev/null +++ b/experiments/utils/models.py @@ -0,0 +1,117 @@ +import torch +import torchvision +from torchvision import transforms +from transformers import BertTokenizer, BertForSequenceClassification +from transformers import ViltForQuestionAnswering, ViltProcessor + +from experiments.models import ResNet50LiverTumor + + +def get_torchvision_model(model_name): + weights = torchvision.models.get_model_weights(model_name).DEFAULT + model = torchvision.models.get_model(model_name, weights=weights).eval() + transform = weights.transforms() + return model, transform + + +def get_livertumor_model(model_path): + model = ResNet50LiverTumor(in_channels=1, num_classes=2) + checkpoint = torch.load(model_path) + + state_dict = {k.replace('model.', '', 1).replace('module.', '', 1): v for k, v in checkpoint.items()} + model.load_state_dict(state_dict, strict=True) + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Resize((224, 224), antialias=False), + ]) + + return model, transform + + +def get_livertumor_model_from_hf(repo_id="seongun/resnet50-livertumor"): + model = ResNet50LiverTumor.from_pretrained(repo_id) + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Resize((224, 224), antialias=False), + ]) + + return model, transform + + +class Bert(BertForSequenceClassification): + def forward(self, input_ids, token_type_ids, attention_mask): + return super().forward( + input_ids=input_ids, + token_type_ids=token_type_ids, + attention_mask=attention_mask + ).logits + + +def get_bert_model(model_name, num_labels): + return Bert.from_pretrained(model_name, num_labels=num_labels) + + +def bert_collate_fn(batch, tokenizer=None): + inputs = tokenizer( + [d[0] for d in batch], + padding=True, + truncation=True, + return_tensors='pt', + ) + labels = torch.tensor([d[1] for d in batch]) + return tuple(inputs.values()), labels + + +def get_bert_tokenizer(model_name): + return BertTokenizer.from_pretrained(model_name) + + +class Vilt(ViltForQuestionAnswering): + def forward( + self, + pixel_values, + input_ids, + token_type_ids, + attention_mask, + pixel_mask, + ): + return super().forward( + input_ids=input_ids, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + pixel_values=pixel_values, + pixel_mask=pixel_mask, + ).logits + + +def get_vilt_model(model_name): + return Vilt.from_pretrained(model_name) + + +def get_vilt_processor(model_name): + return ViltProcessor.from_pretrained(model_name) + + +def vilt_collate_fn(batch, processor=None, label2id=None): + imgs = [d[0] for d in batch] + qsts = [d[1] for d in batch] + inputs = processor( + images=imgs, + text=qsts, + padding=True, + truncation=True, + return_tensors='pt', + ) + labels = torch.tensor([label2id[d[2]] for d in batch]) + return ( + inputs['pixel_values'], + inputs['input_ids'], + inputs['token_type_ids'], + inputs['attention_mask'], + inputs['pixel_mask'], + labels, + ) \ No newline at end of file diff --git a/experiments/utils/utils.py b/experiments/utils/utils.py new file mode 100644 index 0000000..5be5f59 --- /dev/null +++ b/experiments/utils/utils.py @@ -0,0 +1,427 @@ +import os +import pickle +import dill +import functools +import random +import numpy as np + +import torch +from torch import Tensor +from torch.utils.data import Dataset, Subset, DataLoader + +# from pnpxai.explainers.utils.baselines import BASELINE_FUNCTIONS +# from pnpxai.explainers.utils.feature_masks import FEATURE_MASK_FUNCTIONS + +import zennit +from pnpxai.explainers import Explainer +from pnpxai.explainers.lrp import ( + LRPBase, + LRPEpsilonGammaBox, + LRPUniformEpsilon, + LRPEpsilonPlus, + LRPEpsilonAlpha2Beta1, + _get_uniform_epsilon_composite, + _get_epsilon_gamma_box_composite, + _get_epsilon_plus_composite, + _get_epsilon_alpha2_beta1_composite +) + + +def save_params_csv(params, filepath='params.csv'): + """Saves the best parameters to a CSV file after optimization. + + Args: + params (dict): A dictionary containing the parameter names and values. + filepath (str, optional): The path to the CSV file. Defaults to 'params.csv'. + """ + try: + with open(filepath, 'w', newline='') as csvfile: + writer = csv.writer(csvfile) + # Write header row + writer.writerow(['parameter', 'value']) + # Write parameter name and value rows + for key, value in params.items(): + writer.writerow([key, value]) + print(f"Best parameters saved to: {filepath}") + except Exception as e: + print(f"Error saving best parameters to CSV: {e}") + + +def save_pickle_data(data, filepath='data.pkl'): + """Saves the data to a pickle file. + + Args: + data: A data variable to pickle. + filepath (str, optional): The path to the pickle file. Defaults to 'data.pkl'. + """ + try: + with open(filepath, 'wb') as f: + dill.dump(data, f) + print(f"Data saved to: {filepath}") + except Exception as e: + print(f"Error saving data to pickle file: {e}") + + +def load_pickle_data(filepath='data.pkl'): + """Loads the data from a pickle file. + + Args: + filepath (str, optional): The path to the pickle file. Defaults to 'data.pkl'. + + Returns: + Any: The loaded data, or None if the file is not found or an error occurs. + """ + try: + if os.path.exists(filepath): + with open(filepath, 'rb') as f: + data = dill.load(f) + print(f"data loaded from: {filepath}") + return data + else: + print(f"Warning: Pickle file not found at {filepath}") + return None + except Exception as e: + print(f"Error loading data from pickle file: {e}") + return None + + +# utils +def img_to_np(img): return img.permute(1, 2, 0).detach().numpy() + + +def denormalize_image(inputs, mean, std): + return img_to_np( + inputs + * Tensor(std)[:, None, None] + + Tensor(mean)[:, None, None] + ) + + +def load_model_and_dataloader_for_tutorial(modality, device): + if modality == 'image': + model, transform = get_torchvision_model('resnet18') + model = model.to(device) + model.eval() + dataset = get_imagenet_dataset(transform) + loader = DataLoader(dataset, batch_size=8, shuffle=False) + return model, loader, transform + elif modality == 'text': + model = get_bert_model( + 'fabriceyhc/bert-base-uncased-imdb', num_labels=2) + model = model.to(device) + model.eval() + dataset = get_imdb_dataset(split='test') + tokenizer = get_bert_tokenizer('fabriceyhc/bert-base-uncased-imdb') + loader = DataLoader( + dataset, + batch_size=8, + shuffle=False, + collate_fn=functools.partial(bert_collate_fn, tokenizer=tokenizer) + ) + return model, loader, tokenizer + elif modality == ('image', 'text'): + model = get_vilt_model('dandelin/vilt-b32-finetuned-vqa') + model.to(device) + model.eval() + dataset = get_vqa_dataset() + processor = get_vilt_processor('dandelin/vilt-b32-finetuned-vqa') + loader = DataLoader( + dataset, + batch_size=2, + shuffle=False, + collate_fn=functools.partial( + vilt_collate_fn, + processor=processor, + label2id=model.config.label2id, + ), + ) + return model, loader, processor + + +def set_seed(seed): + """Sets the seed for various random number generators and CUDA settings for reproducibility.""" + seed = int(seed) + + # 1. Basic random libraries + random.seed(seed) + np.random.seed(seed) + + # 2. PyTorch + torch.manual_seed(seed) + + # 3. CUDA (GPU) related + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) # Set seed for the current GPU + torch.cuda.manual_seed_all(seed) # Set seed for *all* GPUs (important for multi-GPU setups) + + # Ensure deterministic algorithms are used for CUDA operations + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # (Optional but recommended for strict reproducibility) + # Force PyTorch to use deterministic algorithms (might impact performance) + # Note: This might require PyTorch 1.7+ and setting an environment variable + # before running the script (e.g., export CUBLAS_WORKSPACE_CONFIG=:4096:8) + # Alternatively, try setting it directly (requires PyTorch 1.8+): + torch.use_deterministic_algorithms(True, warn_only=True) + + # (Optional: Set environment variable directly in script - might not always work depending on when CUDA context is initialized) + # os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + + print(f"Set seed to {seed} for random, numpy, and torch (including CUDA if available).") + + +''' +def set_params(explainer_key, params, modality): + """ + Helper function to parse a parameter dictionary into explainer-specific + kwargs and postprocessor-specific kwargs. + It also handles instantiation of baseline/feature_mask functions from strings. + """ + if not params: + print(f"Warning: No optimized parameters found for {explainer_key}. Using defaults.") + explainer_kwargs = {} + post_kwargs = {} + else: + explainer_kwargs = {k: v for k, v in params.items() if k not in POSTPROCESS_PARAM_KEYS} + post_kwargs = {k: v for k, v in params.items() if k in POSTPROCESS_PARAM_KEYS} + + if modality.dtype.is_floating_point: + python_dtype = float + elif modality.dtype.is_complex: + print(f"Warning: Complex dtype {modality.dtype} detected. Parameter instantiation might fail if not handled in dictionaries.") + python_dtype = None + else: + python_dtype = int + modality_key = (python_dtype, modality.ndims) + + # Separate potential nested parameters for all baseline/feature_mask functions + # from the main explainer_kwargs before processing. + # This prevents parameters for one mask/baseline from being passed to another. + all_nested_baseline_params = {} + all_nested_mask_params = {} + cleaned_explainer_kwargs = {} + + for k, v in list(explainer_kwargs.items()): + if k.startswith('baseline_fn.'): + all_nested_baseline_params[k] = v + elif k.startswith('feature_mask_fn.'): + all_nested_mask_params[k] = v + else: + cleaned_explainer_kwargs[k] = v + explainer_kwargs = cleaned_explainer_kwargs # explainer_kwargs now only has non-nested parameters + + # Handle baseline_fn + baseline_fn_str = explainer_kwargs.get('baseline_fn') + if baseline_fn_str and isinstance(baseline_fn_str, str): + specific_baseline_params = {} + prefix_baseline = 'baseline_fn.' + try: + BaselineClass = BASELINE_FUNCTIONS.get(modality_key, {}).get(baseline_fn_str) + if BaselineClass: + init_signature = inspect.signature(BaselineClass.__init__) + # Get expected parameters, excluding 'self', '*args', '**kwargs' + expected_params = {p.name for p in init_signature.parameters.values() + if p.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) + and p.name != 'self'} + + # Pass 'dim' if the BaselineClass expects it + if 'dim' in expected_params: + specific_baseline_params['dim'] = modality.pooling_dim + + for param_key, param_value in all_nested_baseline_params.items(): + param_name = param_key[len(prefix_baseline):] + if param_name in expected_params: + specific_baseline_params[param_name] = param_value + + instance = BaselineClass(**specific_baseline_params) + explainer_kwargs['baseline_fn'] = (instance,) # Replace string with instance tuple + print(f" Instantiated baseline_fn: {baseline_fn_str} -> {type(instance)} with params {specific_baseline_params}") + + else: + print(f" Warning: Baseline function '{baseline_fn_str}' not found for modality {modality_key}. Removing related keys.") + if 'baseline_fn' in explainer_kwargs: del explainer_kwargs['baseline_fn'] + + except Exception as e: + print(f" Error instantiating baseline function '{baseline_fn_str}' with params {specific_baseline_params}: {e}. Removing related keys.") + if 'baseline_fn' in explainer_kwargs: del explainer_kwargs['baseline_fn'] + + # Handle feature_mask_fn + feature_mask_fn_str = explainer_kwargs.get('feature_mask_fn') + if feature_mask_fn_str and isinstance(feature_mask_fn_str, str): + specific_mask_params = {} + prefix_mask = 'feature_mask_fn.' + try: + FeatureMaskClass = FEATURE_MASK_FUNCTIONS.get(modality_key, {}).get(feature_mask_fn_str) + if FeatureMaskClass: + init_signature = inspect.signature(FeatureMaskClass.__init__) + expected_params = {p.name for p in init_signature.parameters.values() + if p.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) + and p.name != 'self'} + + # Pass 'dim' if the FeatureMaskClass expects it + if 'dim' in expected_params: + specific_mask_params['dim'] = modality.pooling_dim + + for param_key, param_value in all_nested_mask_params.items(): + param_name = param_key[len(prefix_mask):] + if param_name in expected_params: + specific_mask_params[param_name] = param_value + + instance = FeatureMaskClass(**specific_mask_params) + explainer_kwargs['feature_mask_fn'] = (instance,) # Replace string with instance tuple + print(f" Instantiated feature_mask_fn: {feature_mask_fn_str} -> {type(instance)} with params {specific_mask_params}") + + else: + print(f" Warning: Feature mask function '{feature_mask_fn_str}' not found for modality {modality_key}. Removing related keys.") + for key in keys_to_remove_mask: + if key in explainer_kwargs: del explainer_kwargs[key] + + except Exception as e: + print(f" Error instantiating feature mask function '{feature_mask_fn_str}' with params {specific_mask_params}: {e}. Removing related keys.") + if 'feature_mask_fn' in explainer_kwargs: del explainer_kwargs['feature_mask_fn'] + + return explainer_kwargs, post_kwargs +''' +import inspect +from typing import Dict, Any, Tuple + +from pnpxai.core.modality.modality import Modality +from pnpxai.explainers.utils.postprocess import PostProcessor + +# Define keys that belong to the postprocessor, not the explainer +POSTPROCESS_PARAM_KEYS = {'pooling_method', 'normalization_method'} + +def set_params(params: Dict[str, Any], modality: Modality) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """ + Refactored set_params function for the new pnpxai API. + + This function takes a flat dictionary of parameters and the experiment's + modality object. It uses the modality's internal 'FunctionSelectors' + to correctly instantiate baseline and feature_mask functions. + + It splits parameters into explainer-ready kwargs and postprocessor-ready kwargs. + """ + + # Separate explainer kwargs from postprocessor kwargs + explainer_kwargs = {k: v for k, v in params.items() if k not in POSTPROCESS_PARAM_KEYS} + post_kwargs = {k: v for k, v in params.items() if k in POSTPROCESS_PARAM_KEYS} + + # Separate nested parameters + all_nested_baseline_params = {} + all_nested_mask_params = {} + cleaned_explainer_kwargs = {} + + for k, v in list(explainer_kwargs.items()): + if k.startswith('baseline_fn.'): + all_nested_baseline_params[k.replace('baseline_fn.', '')] = v + elif k.startswith('feature_mask_fn.'): + all_nested_mask_params[k.replace('feature_mask_fn.', '')] = v + else: + cleaned_explainer_kwargs[k] = v + explainer_kwargs = cleaned_explainer_kwargs + + # Handle baseline_fn instantiation using the modality's selector + baseline_fn_str = explainer_kwargs.get('baseline_fn') + if baseline_fn_str and isinstance(baseline_fn_str, str): + try: + # Use the modality's selector to instantiate the baseline_fn + instance = modality.baseline_fn_selector.select( + baseline_fn_str, + **all_nested_baseline_params + ) + + # Wrap in a tuple for multi-modality compatibility + explainer_kwargs['baseline_fn'] = (instance,) + print(f" Instantiated baseline_fn: {baseline_fn_str} -> {type(instance)}") + except Exception as e: + print(f" Error instantiating baseline '{baseline_fn_str}': {e}. Removing key.") + del explainer_kwargs['baseline_fn'] + + # Handle feature_mask_fn instantiation using the modality's selector + feature_mask_fn_str = explainer_kwargs.get('feature_mask_fn') + if feature_mask_fn_str and isinstance(feature_mask_fn_str, str): + try: + # Use the modality's selector to instantiate the feature_mask_fn + instance = modality.feature_mask_fn_selector.select( + feature_mask_fn_str, + **all_nested_mask_params + ) + + # Wrap in a tuple + explainer_kwargs['feature_mask_fn'] = (instance,) + print(f" Instantiated feature_mask_fn: {feature_mask_fn_str} -> {type(instance)}") + except Exception as e: + print(f" Error instantiating feature mask '{feature_mask_fn_str}': {e}. Removing key.") + del explainer_kwargs['feature_mask_fn'] + + post_kwargs['channel_dim'] = modality.channel_dim + + return explainer_kwargs, post_kwargs + + +def patch_lrp_explainer(explainer: Explainer) -> Explainer: + """ + Workaround for pnpxai LRP explainers' stateful initialization bug. + + This function addresses a bug where pnpxai LRP wrappers + create their internal `zennit_composite` object during `__init__` using + default parameters. + + A subsequent `.set_kwargs()` updates the wrapper's attributes (e.g., `explainer.epsilon`), + but does not recreate the stale `zennit_composite` object, which still holds + the stale default parameters in its internal rules. + + This patch *forces* recreation of `zennit_composite` *after* `.set_kwargs()` + by manually calling the same private helper functions used in `lrp.py`'s `__init__`, + but this time feeding them the *updated* attributes from the explainer instance. + + Args: + explainer: The LRP explainer instance (e.g., LRPEpsilonGammaBox) + that has just been configured with `.set_kwargs()`. + + Returns: + The same explainer instance, now patched with a correct `zennit_composite`. + """ + + new_composite = None + explainer_class_name = explainer.__class__.__name__ + + if isinstance(explainer, LRPEpsilonGammaBox): + # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}, gamma={explainer.gamma}") + new_composite = _get_epsilon_gamma_box_composite( + low=explainer.low, + high=explainer.high, + epsilon=explainer.epsilon, # Use the UPDATED attribute + gamma=explainer.gamma, # Use the UPDATED attribute + stabilizer=explainer.stabilizer, + zennit_canonizers=explainer.zennit_canonizers + ) + elif isinstance(explainer, LRPUniformEpsilon): + # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}") + new_composite = _get_uniform_epsilon_composite( + epsilon=explainer.epsilon, # Use the UPDATED attribute + stabilizer=explainer.stabilizer, + zennit_canonizers=explainer.zennit_canonizers + ) + elif isinstance(explainer, LRPEpsilonPlus): + # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}") + new_composite = _get_epsilon_plus_composite( + epsilon=explainer.epsilon, # Use the UPDATED attribute + stabilizer=explainer.stabilizer, + zennit_canonizers=explainer.zennit_canonizers + ) + elif isinstance(explainer, LRPEpsilonAlpha2Beta1): + # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}") + new_composite = _get_epsilon_alpha2_beta1_composite( + epsilon=explainer.epsilon, # Use the UPDATED attribute + stabilizer=explainer.stabilizer, + zennit_canonizers=explainer.zennit_canonizers + ) + else: + return explainer + + if new_composite is not None: + explainer.zennit_composite = new_composite + + return explainer \ No newline at end of file From 2e024077b867c5431f199f7443de5b488216906c Mon Sep 17 00:00:00 2001 From: seongun-kim Date: Fri, 31 Oct 2025 17:24:54 +0900 Subject: [PATCH 04/20] feat: Add HPO impact analysis --- .../scripts/analyze_imagenet_hpo_impact.py | 1348 +++++++++++++++++ experiments/utils/utils.py | 168 +- 2 files changed, 1374 insertions(+), 142 deletions(-) create mode 100644 experiments/scripts/analyze_imagenet_hpo_impact.py diff --git a/experiments/scripts/analyze_imagenet_hpo_impact.py b/experiments/scripts/analyze_imagenet_hpo_impact.py new file mode 100644 index 0000000..8071cb4 --- /dev/null +++ b/experiments/scripts/analyze_imagenet_hpo_impact.py @@ -0,0 +1,1348 @@ +import os +import argparse +import itertools +import random +from collections import defaultdict +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +import matplotlib.ticker as mticker +import numpy as np +from tqdm import tqdm + +import torch +import pandas as pd +from torch.utils.data import DataLoader + +from pnpxai import XaiRecommender, Experiment, AutoExplanationForImageClassification +from pnpxai.evaluator.metrics import AbPC, Complexity + +from experiments.utils import ( + set_seed, set_params, + patch_lrp_explainer, + get_torchvision_model, + get_imagenet_dataset, get_imagenet_val_dataset, + denormalize_image, + save_pickle_data, load_pickle_data, +) + +import pdb + + +# --- Matplotlib Configuration --- +matplotlib.rcParams['font.family'] = 'serif' +matplotlib.rcParams['font.serif'] = ['Times New Roman'] + matplotlib.rcParams['font.serif'] +matplotlib.rcParams['mathtext.fontset'] = 'stix' +matplotlib.rcParams['axes.unicode_minus'] = False +matplotlib.rcParams['pdf.fonttype'] = 42 + +TORCHVISION_MODEL_CHOICES = [ + 'resnet18', + 'vit_b_16', + # ... +] + +# --- Hyperparameter Grid Definitions --- + +POSTPROCESS_PARAM_KEYS = {'pooling_method', 'normalization_method'} + +# Define common hyperparameters +COMMON_HYPERPARAMETERS = { + 'pooling_method': ['sumpos', 'sumabs', 'l1norm', 'maxnorm', 'l2norm', 'l2normsq', 'possum', 'posmaxnorm', 'posl2norm', 'posl2normsq'], + 'normalization_method': ['identity', 'minmax'], +} + +# Define explainer-specific hyperparameters +EXPL_HYPERPARAMETERS = { + 'guided_grad_cam': { + 'interpolate_mode': ['nearest', 'area'], + }, + 'integrated_gradients': { + 'n_steps': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100], + 'baseline_fn': ['zeros', 'mean', 'invert'], + }, + 'kernel_shap': { + 'n_steps': [10, 20, 30, 40, 50], + 'baseline_fn': ['zeros', 'mean', 'invert'], + 'feature_mask_fn': ['felzenszwalb', 'quickshift', 'slic'], + }, + 'lrp_epsilon_alpha2_beta1': { + 'epsilon': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0], + }, + 'lrp_epsilon_gamma_box': { + 'epsilon': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0], + 'gamma': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0], + }, + 'lrp_epsilon_plus': { + 'epsilon': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0], + }, + 'lrp_uniform_epsilon': { + 'epsilon': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0], + }, + 'lime': { + 'n_steps': [10, 20, 30, 40, 50], + 'baseline_fn': ['zeros', 'mean', 'invert'], + 'feature_mask_fn': ['felzenszwalb', 'quickshift', 'slic'], + }, + 'smooth_grad': { + 'noise_level': np.round(np.arange(0.10, 1.05, 0.10), 2).tolist(), + 'n_iter': np.arange(10, 101, 10).tolist(), + }, + 'var_grad': { + 'noise_level': np.round(np.arange(0.10, 1.05, 0.10), 2).tolist(), + 'n_iter': np.arange(10, 101, 10).tolist(), + }, +} +# Special 2x2 grid for LRPEpsilonGammaBox qualitative examples, +# as the main grid lacks the precise default and optimized values. +QUALITATIVE_HYPERPARAMETERS = { + 'lrp_epsilon_gamma_box': { + 'epsilon': [1e-6, 0.022684753958641276], + 'gamma': [0.2413579229130399, 0.25], + }, +} + +# Define plotting configurations for each explainer +PLOTTING_CONFIGS = { + # 1. Impact of Pooling Methods + 'grad_cam': { + 'column_hp': 'pooling_method', + 'row_hps': ['normalization_method'], + 'fixed_hps': [], # No other HPs to fix + }, + 'guided_grad_cam': { + 'column_hp': 'pooling_method', + 'row_hps': ['normalization_method', 'interpolate_mode'], + 'fixed_hps': [], + }, + 'gradient': { + 'column_hp': 'pooling_method', + 'row_hps': ['normalization_method'], + 'fixed_hps': [], + }, + 'gradient_x_input': { + 'column_hp': 'pooling_method', + 'row_hps': ['normalization_method'], + 'fixed_hps': [], + }, + + # 2. Impact of Noise Level / Epsilon + 'lrp_epsilon_alpha2_beta1': { + 'column_hp': 'pooling_method', + 'row_hps': ['epsilon'], + 'fixed_hps': ['normalization_method'], + }, + 'lrp_epsilon_plus': { + 'column_hp': 'pooling_method', + 'row_hps': ['epsilon'], + 'fixed_hps': ['normalization_method'], + }, + 'lrp_uniform_epsilon': { + 'column_hp': 'pooling_method', + 'row_hps': ['epsilon'], + 'fixed_hps': ['normalization_method'], + }, + 'lrp_epsilon_gamma_box': { + 'column_hp': 'gamma', + 'row_hps': ['epsilon'], + 'fixed_hps': ['pooling_method', 'normalization_method'], # These are fixed at best_params values + }, + 'smooth_grad': { + 'column_hp': 'n_iter', + 'row_hps': ['noise_level'], + 'fixed_hps': ['pooling_method', 'normalization_method'], + }, + 'var_grad': { + 'column_hp': 'n_iter', + 'row_hps': ['noise_level'], + 'fixed_hps': ['pooling_method', 'normalization_method'], + }, + + # 3. Impact of Baseline Functions and Feature Mask Functions + 'kernel_shap': { + 'column_hp': 'baseline_fn', + 'row_hps': ['feature_mask_fn'], + 'fixed_hps': ['n_steps', 'pooling_method', 'normalization_method'], + }, + 'lime': { + 'column_hp': 'baseline_fn', + 'row_hps': ['feature_mask_fn'], + 'fixed_hps': ['n_steps', 'pooling_method', 'normalization_method'], + }, + + # 4. Impact of Number of Steps + 'integrated_gradients': { + 'column_hp': 'n_steps', + 'row_hps': ['baseline_fn'], + 'fixed_hps': ['pooling_method', 'normalization_method'], + }, +} + +DEFAULT_PLOT_CONFIG = { + 'column_hp': 'pooling_method', + 'row_hps': ['normalization_method'], # Default to vary normalization in rows if no other HPs are specified for rows + 'fixed_hps': [], +} + +# Metrics to evaluate in the grid search +TARGET_METRIC_KEYS = ['ab_pc', 'complexity'] + + +def _run_grid_for_explainer( + explainer_key, + explainer_class, + postprocessor_type, + model, + hyperparameter_source, + best_params_for_explainer, + expr, + data_id_iterator, + batch_size, + dataset, + target_metric_ids, +): + """ + Helper function to run the actual grid evaluation loop for a given set of hyperparameters. + This function is called by `run_grid_search_evaluation`. + """ + print(f"Running grid search for {explainer_key} using HP source: {hyperparameter_source.get(explainer_key, {}).keys()}") + + # --- Setup Grid Parameters --- + plot_config = PLOTTING_CONFIGS.get(explainer_key, DEFAULT_PLOT_CONFIG) + column_hp_key = plot_config['column_hp'] + row_hp_keys = plot_config['row_hps'] + fixed_hps = plot_config['fixed_hps'] + + all_hps_for_explainer = { + **hyperparameter_source.get(explainer_key, {}), + **COMMON_HYPERPARAMETERS + } + + column_values = all_hps_for_explainer.get(column_hp_key, []) + if not column_values: + print(f"Warning: No column HP values found for '{column_hp_key}'. Skipping run.") + return {}, {}, {} + + row_param_values_lists = [] + for rhp_key in row_hp_keys: + values = all_hps_for_explainer.get(rhp_key, []) + if not values: + print(f"Warning: No row HP values found for '{rhp_key}'. Skipping run.") + return {}, {}, {} + row_param_values_lists.append(values) + + row_combinations = list(itertools.product(*row_param_values_lists)) + if not row_combinations and row_hp_keys: # Ensure run if no row HPs (row_combinations=[()]) + print(f"Warning: No valid row combinations found. Skipping run.") + return {}, {}, {} + elif not row_combinations and not row_hp_keys: + row_combinations = [()] # Allow run if no row HPs are defined + + # --- Initialize Result Dictionaries --- + all_images_heatmaps = {} + all_images_eval_scores = {} + predicted_labels = {} # Labels will be populated by the first batch loop + + # --- Run Nested Loops --- + num_batches = (len(data_id_iterator) + batch_size - 1) // batch_size + + for batch_idx in tqdm(range(num_batches), desc=f"Computing batches for {explainer_key}", leave=False): + start_idx = batch_idx * batch_size + end_idx = min((batch_idx + 1) * batch_size, len(data_id_iterator)) + cur_batch_data_ids = list(data_id_iterator[start_idx:end_idx]) + + if not cur_batch_data_ids: + continue + + # Prediction and label generation (only needs to run once per batch) + expr.predict_batch(data_ids=cur_batch_data_ids) + target_idx = expr.get_targets_flattened(data_ids=cur_batch_data_ids) + for i, data_id in enumerate(cur_batch_data_ids): + predicted_labels[data_id] = dataset.class_index[str(target_idx[i].item())][-1] + + # --- Nested loops for HP combinations --- + for row_idx, row_combo_values in enumerate(tqdm(row_combinations, desc="Row HP Combinations", leave=False)): + current_row_params_dict = dict(zip(row_hp_keys, row_combo_values)) + + for col_idx, current_col_value in enumerate(tqdm(column_values, desc="Column HP Values", leave=False)): + params = best_params_for_explainer.copy() + params.update(current_row_params_dict) + params[column_hp_key] = current_col_value + + for fixed_hp_key in fixed_hps: + if fixed_hp_key not in params: + if fixed_hp_key in all_hps_for_explainer and all_hps_for_explainer[fixed_hp_key]: + params[fixed_hp_key] = all_hps_for_explainer[fixed_hp_key][0] + else: + print(f"Warning: Fixed HP '{fixed_hp_key}' not found.") + + explainer_kwargs, post_kwargs = set_params(params, expr.modality) + + current_explainer = explainer_class(model=model).set_kwargs(**explainer_kwargs) + current_explainer = patch_lrp_explainer(current_explainer) # Apply LRP patch + current_postprocessor = postprocessor_type.from_name(**post_kwargs) + + current_explainer_id = expr.manager.add_explainer(current_explainer) + current_postprocessor_id = expr.manager.add_postprocessor(current_postprocessor) + + with torch.no_grad(): + expr.explain_batch( + data_ids=cur_batch_data_ids, + explainer_id=current_explainer_id + ) + opt_attrs_pp = expr.postprocess_batch( + data_ids=cur_batch_data_ids, + explainer_id=current_explainer_id, + postprocessor_id=current_postprocessor_id + ) + + if torch.isnan(opt_attrs_pp).any(): + print(f" WARNING: NaN detected for params={params}. Clamping to zero.") + opt_attrs_pp = torch.zeros_like(opt_attrs_pp) + + for i, data_id in enumerate(cur_batch_data_ids): + if data_id not in all_images_heatmaps: + all_images_heatmaps[data_id] = {} + all_images_heatmaps[data_id][(row_idx, col_idx)] = opt_attrs_pp[i].cpu().numpy() + + for target_metric_key, target_metric_id in target_metric_ids.items(): + opt_evals = expr.evaluate_batch( + data_ids=cur_batch_data_ids, + explainer_id=current_explainer_id, + postprocessor_id=current_postprocessor_id, + metric_id=target_metric_id + ) + for i, data_id in enumerate(cur_batch_data_ids): + if data_id not in all_images_eval_scores: + all_images_eval_scores[data_id] = {} + all_images_eval_scores[data_id][(row_idx, col_idx, target_metric_key)] = opt_evals[i] + + return all_images_heatmaps, all_images_eval_scores, predicted_labels + + +def run_grid_search_evaluation(args): + """ + Runs the HPO grid search evaluation. + This function now handles the special 2x2 grid case for lrp_epsilon_gamma_box + by calling a helper function `_run_grid_for_explainer`. + """ + # Setup + cwd = os.getcwd() + use_gpu = torch.cuda.is_available() and not args.disable_gpu + device = torch.device('cuda' if use_gpu else 'cpu') + set_seed(0) + + # Prepare model + model, transform = get_torchvision_model(args.model) + model.to(device) + model.eval() + + # Prepare data + dataset = get_imagenet_val_dataset( + transform, args.data_dir, + ) + + data_id_iterator = range(args.data_from, args.data_to) + batch_size = args.batch_size + dataloader = DataLoader( + dataset, + batch_size=batch_size, + num_workers=0, + pin_memory=use_gpu, + shuffle=False, + ) + + if not dataloader: + raise ValueError("Dataloader is empty. Check dataset path or size.") + + # --- AutoExplanation --- + expr = AutoExplanationForImageClassification( + model=model, + data=dataloader, # Dataloader is used for init, loop uses data_id_iterator + input_extractor=lambda batch: batch[0].to(device), + label_extractor=lambda batch: batch[1].to(device), + target_extractor=lambda outputs: outputs.argmax(-1).to(device), + target_labels=False, + ) + + # Map explainer string keys to their base class names + KEY_TO_CLASS_NAME_MAP = { + 'grad_cam': 'GradCAM', + 'guided_grad_cam': 'GuidedGradCam', + 'gradient': 'Gradient', + 'gradient_x_input': 'GradientXInput', + 'integrated_gradients': 'IntegratedGradients', + 'kernel_shap': 'KernelShap', + 'lrp_epsilon_alpha2_beta1': 'LRPEpsilonAlpha2Beta1', + 'lrp_epsilon_gamma_box': 'LRPEpsilonGammaBox', + 'lrp_epsilon_plus': 'LRPEpsilonPlus', + 'lrp_uniform_epsilon': 'LRPUniformEpsilon', + 'lime': 'Lime', + 'smooth_grad': 'SmoothGrad', + 'var_grad': 'VarGrad', + } + + # Map class names to the actual base instances initialized by AutoExplanation + CLASS_NAME_TO_CLASS_TYPE = {exp.__class__.__name__: exp.__class__ for exp in expr.manager.explainers} + + # Get the base postprocessor instance + base_postprocessor_instance = expr.manager.get_postprocessor_by_id(0) + PostprocessorType = type(base_postprocessor_instance) + + # Map target metric keys (like 'ab_pc') to their metric IDs + METRIC_NAME_TO_ID_MAP = {type(m).__name__.lower(): i for i, m in enumerate(expr.manager.metrics)} + target_metric_ids = { + 'ab_pc': METRIC_NAME_TO_ID_MAP.get('abpc'), + 'complexity': METRIC_NAME_TO_ID_MAP.get('complexity') + } + if None in target_metric_ids.values(): + raise RuntimeError(f"Could not find all target metrics. Found: {METRIC_NAME_TO_ID_MAP}") + + # Load optimized parameters + best_params = defaultdict(dict) + for explainer_key in args.eval_explainer: + params_path = os.path.join(cwd, f'data/ImageNet/optimized_params/{args.model}/{explainer_key}.pkl') + if not os.path.exists(params_path): + print(f"Warning: Optimized params file not found at {params_path}. Using empty dict.") + else: + best_params[explainer_key] = load_pickle_data(params_path)['composite'] + + # --- Main Loop --- + for explainer_key in args.eval_explainer: + print(f"\n--- Processing Explainer: {explainer_key} ---") + + explainer_class_name = KEY_TO_CLASS_NAME_MAP.get(explainer_key) + ExplainerClass = CLASS_NAME_TO_CLASS_TYPE.get(explainer_class_name) + if not ExplainerClass: + print(f"Error: Could not find class type for {explainer_class_name}. Skipping.") + continue + + # Run the main evaluation grid + main_heatmaps, main_scores, main_labels = _run_grid_for_explainer( + explainer_key=explainer_key, + explainer_class=ExplainerClass, + postprocessor_type=PostprocessorType, + model=model, + hyperparameter_source=EXPL_HYPERPARAMETERS, + best_params_for_explainer=best_params[explainer_key], + expr=expr, + data_id_iterator=data_id_iterator, + batch_size=batch_size, + dataset=dataset, + target_metric_ids=target_metric_ids, + ) + + # Save main results + if main_heatmaps: + savedir = os.path.join(cwd, f'results/hpo_impact_imagenet/raw/{args.model}') + save_fname = os.path.join(savedir, f'{explainer_key}.pkl') + os.makedirs(savedir, exist_ok=True) + save_pickle_data( + data={'all_images_heatmaps': main_heatmaps, 'all_images_eval_scores': main_scores, 'predicted_labels': main_labels}, + filepath=save_fname, + ) + print(f"Saved main analysis results for {explainer_key} to {save_fname}") + + # Run the special 2x2 grid for LRPEpsilonGammaBox (if applicable) + if explainer_key == 'lrp_epsilon_gamma_box': + print(f"\n--- Processing Explainer: {explainer_key} (2x2 Qualitative Grid) ---") + + q_heatmaps, q_scores, q_labels = _run_grid_for_explainer( + explainer_key=explainer_key, + explainer_class=ExplainerClass, + postprocessor_type=PostprocessorType, + model=model, + hyperparameter_source=QUALITATIVE_HYPERPARAMETERS, # Use the 2x2 grid + best_params_for_explainer=best_params[explainer_key], + expr=expr, + data_id_iterator=data_id_iterator, + batch_size=batch_size, + dataset=dataset, + target_metric_ids=target_metric_ids, + ) + + # Save special 2x2 results for LRPEpsilonGammaBox + if q_heatmaps: + savedir = os.path.join(cwd, f'results/hpo_impact_imagenet/raw/{args.model}') + save_fname_2x2 = os.path.join(savedir, f'{explainer_key}_2x2.pkl') + os.makedirs(savedir, exist_ok=True) + save_pickle_data( + data={'all_images_heatmaps': q_heatmaps, 'all_images_eval_scores': q_scores, 'predicted_labels': q_labels}, + filepath=save_fname_2x2, + ) + print(f"Saved 2x2 qualitative results for {explainer_key} to {save_fname_2x2}") + + +def _find_param_indices(params_to_find, row_hp_keys, row_combinations, column_hp_key, column_values): + """ + Helper function to find the grid (row, col) indices for a set of hyperparameters. + """ + print(f'\nparams_to_find:\n{params_to_find}') + if not params_to_find: + return None + + # Find column index by finding the closest value + opt_col_val = params_to_find.get(column_hp_key) + c_idx = -1 + if opt_col_val is not None: + try: + # Always find the closest value for numeric types + if isinstance(opt_col_val, (int, float)): + c_idx = np.argmin(np.abs(np.array(column_values) - opt_col_val)) + else: + c_idx = column_values.index(opt_col_val) + except (ValueError, IndexError): + print(f"Warning: Could not find column value {opt_col_val} for {column_hp_key}.") + return None + + # Find row index by finding the closest value + r_idx = -1 + try: + target_row_values = [params_to_find.get(key) for key in row_hp_keys] + + # Check if all row values are numeric to calculate distance + is_numeric_search = all(isinstance(v, (int, float)) for v in target_row_values) + + if is_numeric_search: + target_np = np.array(target_row_values) + rows_np = np.array(row_combinations) + # Calculate Euclidean distance and find the index of the minimum distance + distances = np.linalg.norm(rows_np - target_np, axis=1) + r_idx = np.argmin(distances) + else: + # Fallback to exact match for non-numeric or mixed types (e.g., strings) + r_idx = row_combinations.index(tuple(target_row_values)) + + except (ValueError, IndexError): + print(f"Warning: Could not find row combination for {target_row_values}.") + return None + + if r_idx >= 0 and c_idx >= 0: + return (r_idx, c_idx) + return None + + +def plot_hpo_analysis(args): + """ + Analyzes HPO results at the dataset level, focusing on a composite score. + Generates a composite plot with a central heatmap (for the composite score) + and marginal line plots showing AbPC, Simplicity, and Composite scores. + """ + print("\n--- Starting Dataset-Level HPO Analysis ---") + cwd = os.getcwd() + + # Define explainer-specific default parameters to locate them on the grid + DEFAULT_PARAMS_CONFIG = { + 'smooth_grad': { + 'n_iter': 20, + 'noise_level': 0.1, + }, + 'lrp_epsilon_gamma_box': { + 'epsilon': 1e-6, + 'gamma': 0.25, + }, + 'integrated_gradients': { + 'n_steps': 20, + 'baseline_fn': 'zeros', + }, + 'guided_grad_cam': { + 'interpolate_mode': 'nearest', + 'pooling_method': 'sumpos', + 'normalization_method': 'identity', + }, + } + FIGURE_SIZE_CONFIG = { + 'default': { + 'cell_size': 1.0, 'width_padding': 1.0, 'height_padding': 0.0, 'additional_fontsize': 0 + }, + 'smooth_grad': { + 'cell_size': 1.0, 'width_padding': 1.45, 'height_padding': 0.0, 'additional_fontsize': 2.5 + }, + 'lrp_epsilon_gamma_box': { + 'cell_size': 1.0, 'width_padding': 1.4, 'height_padding': 0.0, 'additional_fontsize': 1 + }, + 'guided_grad_cam': { + 'cell_size': 1.2, 'width_padding': 0.0, 'height_padding': 0.2, 'additional_fontsize': 1.2 + }, + 'integrated_gradients': { + 'cell_size': 1.2, 'width_padding': 0.0, 'height_padding': 0.5, 'additional_fontsize': 2 + }, + } + MARGINAL_PLOT_CONFIG = { + 'default': { + 'row': {'nbins': 3, 'format': '.3f'}, 'row_twin': {'nbins': 3, 'format': '.2f'}, + 'col': {'nbins': 3, 'format': '.2f'}, 'col_twin': {'nbins': 3, 'format': '.3f'} + }, + 'smooth_grad': { + 'row': {'nbins': 2, 'format': '.2f'}, 'row_twin': {'nbins': 2, 'format': '.2f'}, + 'col': {'nbins': 3, 'format': '.2f'}, 'col_twin': {'nbins': 2, 'format': '.3f'} + }, + 'lrp_epsilon_gamma_box': { + 'row': {'nbins': 2, 'format': '.3f'}, 'row_twin': {'nbins': 3, 'format': '.2f'}, + 'col': {'nbins': 3, 'format': '.2f'}, 'col_twin': {'nbins': 3, 'format': '.2f'} + }, + 'guided_grad_cam': { + 'row': {'nbins': 2, 'format': '.3f'}, 'row_twin': {'nbins': 2, 'format': '.2f'}, + 'col': {'nbins': 3, 'format': '.2f'}, 'col_twin': {'nbins': 2, 'format': '.3f'} + }, + 'integrated_gradients': { + 'row': {'nbins': 2, 'format': '.3f'}, 'row_twin': {'nbins': 3, 'format': '.2f'}, + 'col': {'nbins': 3, 'format': '.2f'}, 'col_twin': {'nbins': 2, 'format': '.3f'} + }, + } + + # Add 'composite' to the list of metrics to process + METRICS_TO_PROCESS = TARGET_METRIC_KEYS + ['composite', 'simplicity'] + + # Pre-calculate figure widths to determine proportional font sizes + print(" Pre-calculating figure widths for font scaling...") + figure_widths = {} + anchor_explainer = 'lrp_epsilon_gamma_box' + explainers_for_width_calc = ['lrp_epsilon_gamma_box', 'smooth_grad', 'integrated_gradients', 'guided_grad_cam'] + + for key in explainers_for_width_calc: + if key in PLOTTING_CONFIGS: + plot_config = PLOTTING_CONFIGS[key] + column_hp_key = plot_config['column_hp'] + all_hps = {**EXPL_HYPERPARAMETERS.get(key, {}), **COMMON_HYPERPARAMETERS} + num_cols = len(all_hps.get(column_hp_key, [])) + + fig_settings = FIGURE_SIZE_CONFIG.get(key, FIGURE_SIZE_CONFIG['default']) + cell_size = fig_settings['cell_size'] + width_padding_size = fig_settings['width_padding'] + + colorbar_width = 0.2 + marg_row_width = 2.0 + + # Calculate and store the total figure width + w = (num_cols * cell_size) + colorbar_width + marg_row_width + width_padding_size + figure_widths[key] = w + + # Define the anchor font size and get the anchor width + anchor_fontsize = 22 + anchor_width = figure_widths.get(anchor_explainer, 1.0) + print(f" Anchor width ({anchor_explainer}) set to: {anchor_width:.2f} with font size {anchor_fontsize}") + + for explainer_key in args.eval_explainer: + print(f"\n--- Analyzing Explainer: {explainer_key} ---") + + # Load pre-computed analysis results and optimized hyperparameters + load_path = os.path.join(cwd, f'results/hpo_impact_imagenet/raw/{args.model}/{explainer_key}.pkl') + best_params_path = os.path.join(cwd, f'data/ImageNet/optimized_params/{args.model}/{explainer_key}.pkl') + + if not os.path.exists(load_path) or not os.path.exists(best_params_path): + print(f" ERROR: Missing results file or params file. Please run --analyze first.") + print(f" - Checked for results: {load_path}") + print(f" - Checked for params: {best_params_path}") + continue + + results = load_pickle_data(load_path) + best_params = load_pickle_data(best_params_path)['composite'] + all_images_eval_scores = results['all_images_eval_scores'] + + if not all_images_eval_scores: + print(f" WARNING: No evaluation scores found in {load_path}. Skipping.") + continue + + # Get HP configuration + plot_config = PLOTTING_CONFIGS.get(explainer_key, DEFAULT_PLOT_CONFIG) + column_hp_key = plot_config['column_hp'] + row_hp_keys = plot_config['row_hps'] + all_hps_for_explainer = {**EXPL_HYPERPARAMETERS.get(explainer_key, {}), **COMMON_HYPERPARAMETERS} + column_values = all_hps_for_explainer.get(column_hp_key, []) + row_param_values_lists = [all_hps_for_explainer.get(k, []) for k in row_hp_keys] + row_combinations = list(itertools.product(*row_param_values_lists)) + num_images, num_rows, num_cols = len(all_images_eval_scores), len(row_combinations), len(column_values) + + # Aggregate scores and calculate composite score + aggregated_scores = {m: np.full((num_images, num_rows, num_cols), np.nan) for m in METRICS_TO_PROCESS} + for img_idx, data_id in enumerate(all_images_eval_scores.keys()): + for r_idx in range(num_rows): + for c_idx in range(num_cols): + for m_key in TARGET_METRIC_KEYS: # Only load AbPC and Complexity + score = all_images_eval_scores[data_id].get((r_idx, c_idx, m_key)) + if score is not None and not torch.isnan(score): + aggregated_scores[m_key][img_idx, r_idx, c_idx] = score.item() + + # Calculate composite score from aggregated base metrics + aggregated_scores['composite'] = 0.7 * aggregated_scores['ab_pc'] - 0.3 * aggregated_scores['complexity'] + aggregated_scores['simplicity'] = -1 * aggregated_scores['complexity'] + + # Calculate mean and std for all metrics + mean_scores = {m: np.nanmean(aggregated_scores[m], axis=0) for m in METRICS_TO_PROCESS} + std_scores = {m: np.nanstd(aggregated_scores[m], axis=0) for m in METRICS_TO_PROCESS} + + # Find indices of default and optimized parameters + if explainer_key in DEFAULT_PARAMS_CONFIG: + print(f" Found specific default parameters for {explainer_key}.") + default_params_to_find = DEFAULT_PARAMS_CONFIG[explainer_key] + default_indices = _find_param_indices(default_params_to_find, row_hp_keys, row_combinations, column_hp_key, column_values) + else: + print(" Using (0, 0) as the default parameter index.") + default_indices = (0, 0) + + optimized_indices = _find_param_indices(best_params, row_hp_keys, row_combinations, column_hp_key, column_values) + + print(f" Default HP indices: {default_indices}") + print(f" Optimized HP indices: {optimized_indices}") + + # Generate composite plot + LABEL_MAP = { + "pooling_method": "Pooling", + "interpolate_mode": "Interpolation", + "normalization_method": "Normalization", + "n_steps": "Integration Steps", + "baseline_fn": "Baseline Function", + "noise_level": "Noise Level", + "n_iter": "Number of Samples", + } + POOLING_METHOD_VALUE_MAP = { + 'sumpos': 'sum,pos', 'sumabs': 'sum,abs', 'l1norm': 'l1-norm', + 'maxnorm': 'max-norm', 'l2norm': 'l2-norm', 'l2normsq': 'l2-norm-sq', + 'possum': 'pos,sum', 'posmaxnorm': 'pos,max-norm', + 'posl2norm': 'pos,l2-norm', 'posl2normsq': 'pos,l2-norm-sq' + } + LATEX_SYMBOL_MAP = { + 'gamma': r'$\gamma$', + 'epsilon': r'$\epsilon$' + } + + row_labels = ["\n".join([f"{LABEL_MAP.get(k, k)}={v:.2g}" if isinstance(v, float) else f"{LABEL_MAP.get(k, k)}={v}" for k, v in zip(row_hp_keys, combo)]) for combo in row_combinations] + col_labels = [f"{v:.2g}" if isinstance(v, float) else str(v) for v in column_values] + + # --- Export Data to CSV --- + # Define the header labels for CSVs + row_labels_for_csv = [] + for combo in row_combinations: + label_parts = [f"{key}={val:.2g}" if isinstance(val, float) else f"{key}={val}" for key, val in zip(row_hp_keys, combo)] + row_labels_for_csv.append(" | ".join(label_parts)) + + col_labels_for_csv = [] + for val in column_values: + val_str = f"{val:.2g}" if isinstance(val, float) else str(val) + col_labels_for_csv.append(f"{column_hp_key}={val_str}") + + print(f" Exporting aggregated scores to CSV for {explainer_key}...") + savedir_csv = os.path.join(cwd, f'results/hpo_impact_imagenet/raw/{args.model}') + os.makedirs(savedir_csv, exist_ok=True) + + # Export heatmap data (Composite Score: Mean ± Std) + heatmap_df_data = [] + for r in range(num_rows): + row_data = [] + for c in range(num_cols): + mean_val = mean_scores['composite'][r, c] + std_val = std_scores['composite'][r, c] + if not np.isnan(mean_val): + row_data.append(f"{mean_val:.4f} ± {std_val:.4f}") + else: + row_data.append("N/A") + heatmap_df_data.append(row_data) + + heatmap_df = pd.DataFrame(heatmap_df_data, index=row_labels_for_csv, columns=col_labels_for_csv) + heatmap_csv_path = os.path.join(savedir_csv, f'{explainer_key}_heatmap_composite.csv') + heatmap_df.to_csv(heatmap_csv_path) + print(f" - Saved heatmap data to: {heatmap_csv_path}") + + # Export row marginal plot data + row_marginal_data = {"Hyperparameter(s)": row_labels_for_csv} + for metric in ['ab_pc', 'simplicity', 'composite']: + row_marginal_data[f'{metric}_mean'] = np.nanmean(mean_scores[metric], axis=1) + row_marginal_data[f'{metric}_std_err'] = np.sqrt(np.nansum(std_scores[metric]**2, axis=1)) / num_cols + + row_df = pd.DataFrame(row_marginal_data) + row_csv_path = os.path.join(savedir_csv, f'{explainer_key}_marginal_rows.csv') + row_df.to_csv(row_csv_path, index=False) + print(f" - Saved row marginal data to: {row_csv_path}") + + # Export column marginal plot data + col_marginal_data = {"Hyperparameter": col_labels_for_csv} + for metric in ['ab_pc', 'simplicity', 'composite']: + col_marginal_data[f'{metric}_mean'] = np.nanmean(mean_scores[metric], axis=0) + col_marginal_data[f'{metric}_std_err'] = np.sqrt(np.nansum(std_scores[metric]**2, axis=0)) / num_rows + + col_df = pd.DataFrame(col_marginal_data) + col_csv_path = os.path.join(savedir_csv, f'{explainer_key}_marginal_columns.csv') + col_df.to_csv(col_csv_path, index=False) + print(f" - Saved column marginal data to: {col_csv_path}") + + # --- Plotting Setup --- + plot_settings = MARGINAL_PLOT_CONFIG.get(explainer_key, MARGINAL_PLOT_CONFIG['default']) + + # Get explainer-specific figure size settings + fig_settings = FIGURE_SIZE_CONFIG.get(explainer_key, FIGURE_SIZE_CONFIG['default']) + cell_size = fig_settings['cell_size'] + width_padding_size = fig_settings['width_padding'] + height_padding_size = fig_settings['height_padding'] + + # Define fixed physical sizes for plot components + marg_row_width = 2.0 + marg_col_height = 2.0 + colorbar_width = 0.2 + + fig_width = (num_cols * cell_size) + colorbar_width + marg_row_width + width_padding_size + fig_height = (num_rows * cell_size) + marg_col_height + height_padding_size + + fig = plt.figure(figsize=(fig_width, fig_height), constrained_layout=True) + + # Create a granual Gridspec for precise control + gs = gridspec.GridSpec( + nrows=2, ncols=3, + figure=fig, + width_ratios=[num_cols * cell_size, colorbar_width, marg_row_width], + height_ratios=[num_rows * cell_size, marg_col_height], + wspace=0.00, hspace=0.00, + ) + + ax_colorbar = fig.add_subplot(gs[0, 1]) + ax_heatmap = fig.add_subplot(gs[0, 0]) + ax_marg_row = fig.add_subplot(gs[0, 2], sharey=ax_heatmap) + ax_marg_col = fig.add_subplot(gs[1, 0], sharex=ax_heatmap) + ax_corner = fig.add_subplot(gs[1, 2]) + + # Define fontsize + additional_fontsize = fig_settings['additional_fontsize'] + fontsize_heatmap_score = 12 + additional_fontsize + 2 + fontsize_ticklabels = 12 + additional_fontsize + 2 + fontsize_marginal_ticks = 12 + additional_fontsize + 3 + fontsize_labels = 14 + additional_fontsize + 3 + fontsize_legend = 16 + additional_fontsize + 0 + + # Define linewidth + linewidth_boxes = 5 + + # Define markersize + markersize = 6 + + # Hide tick labels on marginal plots initially + plt.setp(ax_marg_col.get_xticklabels(), visible=False) + + # --- Plot Heatmap --- + im = ax_heatmap.imshow(mean_scores['composite'], cmap='BuPu', aspect='equal') + + # Add grid lines to separate cells + for i in range(num_rows + 1): + ax_heatmap.axhline(i - 0.5, color='black', linewidth=0.5) + for i in range(num_cols + 1): + ax_heatmap.axvline(i - 0.5, color='black', linewidth=0.5) + + ax_heatmap.set_xticks(np.arange(num_cols)) + ax_heatmap.set_yticks(np.arange(num_rows)) + + # Column labels (top) + short_col_hp_key = LATEX_SYMBOL_MAP.get(column_hp_key, LABEL_MAP.get(column_hp_key, column_hp_key)) + ax_heatmap.set_xlabel(short_col_hp_key, fontsize=fontsize_labels, fontweight='bold') + ax_heatmap.xaxis.set_ticks_position('top') + ax_heatmap.xaxis.set_label_position('top') + + col_tick_labels = [] + for val in column_values: + if column_hp_key == 'pooling_method': + col_tick_labels.append(POOLING_METHOD_VALUE_MAP.get(val, val)) + elif isinstance(val, float): + col_tick_labels.append(f"{val:.2g}") + else: + col_tick_labels.append(str(val)) + + if explainer_key != 'guided_grad_cam': + ax_heatmap.set_xticklabels(col_tick_labels, rotation=0, fontsize=fontsize_ticklabels) + else: + ax_heatmap.set_xticklabels(col_tick_labels, rotation=10, fontsize=fontsize_ticklabels) + + # Row labels (left) + short_row_hp_keys = [LATEX_SYMBOL_MAP.get(key, LABEL_MAP.get(key, key)) for key in row_hp_keys] + ax_heatmap.set_ylabel(" & ".join(short_row_hp_keys), fontsize=fontsize_labels, fontweight='bold') + + row_tick_labels = [] + for combo in row_combinations: + current_params_dict = dict(zip(row_hp_keys, combo)) + label_parts = [] + if not row_hp_keys: + label_parts.append("Default") + else: + for r_key in row_hp_keys: + val = current_params_dict[r_key] + if r_key == 'pooling_method': + label_parts.append(POOLING_METHOD_VALUE_MAP.get(val, val)) + elif isinstance(val, float): + label_parts.append(f"{val:.2g}") + else: + label_parts.append(str(val)) + row_tick_labels.append("\n& ".join(label_parts)) + + ax_heatmap.set_yticklabels(row_tick_labels, rotation=0, ha='right', va='center', fontsize=fontsize_ticklabels) + + # Calculate a threshold to switch text color from black to white for readability + all_scores = mean_scores['composite'] + min_score, max_score = np.nanmin(all_scores), np.nanmax(all_scores) + + # Set threshold at 50% of the score range + color_threshold = min_score + (max_score - min_score) * 0.5 + + # Add text annotations with 2 decimal places + for r in range(num_rows): + for c in range(num_cols): + mean_val, std_val = mean_scores['composite'][r, c], std_scores['composite'][r, c] + if not np.isnan(mean_val): + text_color = 'w' if mean_val > color_threshold else 'k' + ax_heatmap.text(c, r, f"{mean_val:.2f}\n(±{std_val:.2f})", ha="center", va="center", color=text_color, fontsize=fontsize_heatmap_score) + + # Add highlight boxes + highlight_cmap = plt.cm.get_cmap('Set2') + highlight_patches = [] + if default_indices: + def_r, def_c = default_indices + + patch_default = plt.Rectangle((default_indices[1]-0.5, default_indices[0]-0.5), 1, 1, + fill=False, edgecolor=highlight_cmap(1), lw=linewidth_boxes, label='Default') + ax_heatmap.add_patch(patch_default) + highlight_patches.append(patch_default) + + # lrp_epsilon_gamma_box + # Get the exact values from the config dictionary + default_params = DEFAULT_PARAMS_CONFIG[explainer_key] + exact_gamma = default_params.get('gamma') + exact_epsilon = default_params.get('epsilon') + + if exact_gamma is not None and exact_epsilon is not None: + annotation_text = f"Default\n(γ={exact_gamma:.2f}, ε={exact_epsilon:.2f})" + + # Place text in the bottom-right of the cell + ax_heatmap.text(def_c + 0.45, def_r + 0.45, annotation_text, + color="white", fontsize=9, + ha='right', va='bottom', + bbox=dict(boxstyle='round,pad=0.2', fc='black', ec='none', alpha=0.6)) + + if optimized_indices: + patch_optimized = plt.Rectangle((optimized_indices[1]-0.5, optimized_indices[0]-0.5), 1, 1, + fill=False, edgecolor=highlight_cmap(0), lw=linewidth_boxes, linestyle='-', label='Optimized') + ax_heatmap.add_patch(patch_optimized) + highlight_patches.append(patch_optimized) + + # lrp_epsilon_gamma_box + # Add text annotation for the precise optimized values + opt_r, opt_c = optimized_indices + + # Get the exact floating point values from the 'best_params' dictionary + exact_gamma = best_params.get('gamma') + exact_epsilon = best_params.get('epsilon') + + if exact_gamma is not None and exact_epsilon is not None: + annotation_text = f"Optimized\n(γ={exact_gamma:.2f}, ε={exact_epsilon:.2f})" + + # Place text near the patch + ax_heatmap.text(opt_c + 0.1, opt_r + 0.1, annotation_text, + color="white", fontsize=10, fontweight='bold', + ha='left', va='top', + bbox=dict(boxstyle='round,pad=0.2', fc='black', ec='none', alpha=0.6)) + + # Add colorbar with height matching heatmap + fig.colorbar(im, cax=ax_colorbar).set_label("Mean Composite Score", rotation=-90, va="bottom", fontsize=fontsize_labels, fontweight='bold') + + # --- Draw Column Marginalized Scores (Bottom Row) --- + ax_marg_col_twin = ax_marg_col.twinx() + colors = plt.cm.get_cmap('tab10') + + x_positions = np.arange(len(column_values)) + abpc_col_scores = np.nanmean(mean_scores['ab_pc'], axis=0) + comp_col_scores = np.nanmean(mean_scores['composite'], axis=0) + simplicity_col_scores = np.nanmean(mean_scores['simplicity'], axis=0) + + # Plot AbPC and Composite on the primary Y-axis + line_comp_col = ax_marg_col.plot(x_positions, comp_col_scores, marker='s', linestyle='-', color=colors(2), label='Composite', markersize=markersize)[0] + line_abpc_col = ax_marg_col.plot(x_positions, abpc_col_scores, marker='o', linestyle='--', color=colors(0), label='AbPC', markersize=markersize)[0] + ax_marg_col.set_ylabel('AbPC / Composite', color='black', fontsize=fontsize_labels, fontweight='bold') + + # Plot Simplicity on the twin Y-axis + line_simplicity_col = ax_marg_col_twin.plot(x_positions, simplicity_col_scores, marker='^', linestyle='--', color=colors(1), label='Simplicity', markersize=markersize)[0] + ax_marg_col_twin.set_ylabel('Simplicity', color='black', fontsize=fontsize_labels, fontweight='bold') + + # Capture the label artist objects to ensure they are not clipped + label_artist_col_1 = ax_marg_col.yaxis.get_label() + label_artist_col_2 = ax_marg_col_twin.yaxis.get_label() + + ax_marg_col.tick_params(axis='y', labelsize=fontsize_marginal_ticks) + ax_marg_col_twin.tick_params(axis='y', labelcolor='black', labelsize=fontsize_marginal_ticks) + ax_marg_col.yaxis.set_major_locator(mticker.MaxNLocator(nbins=plot_settings['col']['nbins'], prune='both')) + ax_marg_col_twin.yaxis.set_major_locator(mticker.MaxNLocator(nbins=plot_settings['col_twin']['nbins'], prune='both')) + + # Format tick labels + ax_marg_col.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f"{x:{plot_settings['col']['format']}}")) + ax_marg_col_twin.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f"{x:{plot_settings['col_twin']['format']}}")) + + ax_marg_col.set_xticks(x_positions) + ax_marg_col.grid(True, linestyle='--', alpha=0.6) + ax_marg_col.set_xlim(-0.5, len(column_values) - 0.5) + + # --- Draw Row Marginalized Scores (Right Column) --- + ax_marg_row_twin = ax_marg_row.twiny() + y_positions = np.arange(len(row_combinations)) + + abpc_row_scores = np.round(np.nanmean(mean_scores['ab_pc'], axis=1), decimals=4) + comp_row_scores = np.round(np.nanmean(mean_scores['composite'], axis=1), decimals=4) + simplicity_row_scores = np.round(np.nanmean(mean_scores['simplicity'], axis=1), decimals=4) + + # Plot Simplicity on the primary X-axis + line_simplicity_row = ax_marg_row.plot(simplicity_row_scores, y_positions, marker='^', linestyle='--', color=colors(1), label='Simplicity', markersize=markersize)[0] + ax_marg_row.set_xlabel('Simplicity', color='black', fontsize=fontsize_labels, fontweight='bold') + + # Plot AbPC and Composite on the twin X-axis + line_comp_row = ax_marg_row_twin.plot(comp_row_scores, y_positions, marker='s', linestyle='-', color=colors(2), label='Composite', markersize=markersize)[0] + line_abpc_row = ax_marg_row_twin.plot(abpc_row_scores, y_positions, marker='o', linestyle='--', color=colors(0), label='AbPC', markersize=markersize)[0] + ax_marg_row_twin.set_xlabel('AbPC / Composite', color='black', fontsize=fontsize_labels, fontweight='bold') + + # Capture the label artist objects to ensure they are not clipped + label_artist_row_1 = ax_marg_row.xaxis.get_label() + label_artist_row_2 = ax_marg_row_twin.xaxis.get_label() + + ax_marg_row.tick_params(axis='x', labelsize=fontsize_marginal_ticks) + ax_marg_row_twin.tick_params(axis='x', labelcolor='black', labelsize=fontsize_marginal_ticks) + ax_marg_row.xaxis.set_major_locator(mticker.MaxNLocator(nbins=plot_settings['row']['nbins'], prune='both')) + ax_marg_row_twin.xaxis.set_major_locator(mticker.MaxNLocator(nbins=plot_settings['row_twin']['nbins'], prune='both')) + + ax_marg_row_twin.xaxis.set_label_position("top") + ax_marg_row_twin.xaxis.tick_top() + + # Format tick labels + ax_marg_row.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f"{x:{plot_settings['row']['format']}}")) + ax_marg_row_twin.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f"{x:{plot_settings['row_twin']['format']}}")) + + ax_marg_row.grid(True, linestyle='--', alpha=0.6) + ax_marg_row.tick_params(axis='y', which='both', labelleft=False, labelright=False) + + # --- Corner Cell for Combined Legend --- + ax_corner.axis('off') + + # Collect all legend elements + metric_lines = [line_comp_col, line_abpc_col, line_simplicity_col] + metric_labels = [r'Composite ($\uparrow$)', r'AbPC ($\uparrow$)', r'Simplicity ($\uparrow$)'] + + all_lines = metric_lines + highlight_patches + all_labels = metric_labels + [patch.get_label() for patch in highlight_patches] + + # Create comprehensive legend + legend = ax_corner.legend( + all_lines, + all_labels, + loc='center', + fontsize=fontsize_legend, + frameon=True, + fancybox=True, + shadow=True, + borderpad=1.0, + columnspacing=1.0, + handlelength=1.0, + handletextpad=0.5 + ) + + # Calculate the dynamic font size proportional to the figure width + current_width = figure_widths.get(explainer_key, anchor_width) + dynamic_suptitle_fontsize = anchor_fontsize * (current_width / anchor_width) + + # Create a list of all extra artists that need to be included in the final saved area + extra_artists_to_include = (legend, label_artist_col_1, label_artist_col_2, label_artist_row_1, label_artist_row_2,) + + # --- Save Figure --- + savedir = os.path.join(cwd, f'results/hpo_impact_imagenet/figures/{args.model}') + os.makedirs(savedir, exist_ok=True) + save_fname = os.path.join(savedir, f'{explainer_key}_composite_heatmap.pdf') + plt.savefig( + save_fname, + dpi=300, + bbox_inches='tight', + bbox_extra_artists=extra_artists_to_include + ) + plt.close(fig) + print(f" Saved dataset-level composite plot to: {save_fname}") + + +def plot_attribution_comparison(args): + """ + Generates a figure showing qualitative examples of default vs. optimized attribution heatmaps. + """ + cwd = os.getcwd() + + # --- Load Dataset and Transform --- + _, transform = get_torchvision_model(args.model) + dataset = get_imagenet_val_dataset(transform, args.data_dir) + + # Define explainer-specific default parameters to locate them on the grid + DEFAULT_PARAMS_CONFIG = { + 'smooth_grad': { + 'n_iter': 20, + 'noise_level': 0.1, + }, + 'lrp_epsilon_gamma_box': { + 'epsilon': 1e-6, + 'gamma': 0.25, + }, + 'integrated_gradients': { + 'n_steps': 20, + 'baseline_fn': 'zeros', + }, + 'guided_grad_cam': { + 'interpolate_mode': 'nearest', + 'pooling_method': 'sumpos', + 'normalization_method': 'identity', + }, + } + FIGURE_SIZE_CONFIG = { + 'default': { + 'cell_size': 1.0, 'width_padding': 1.0, 'height_padding': 0.0, 'additional_fontsize': 0 + }, + 'smooth_grad': { + 'cell_size': 1.0, 'width_padding': 1.0, 'height_padding': 0.0, 'additional_fontsize': 0 + }, + 'lrp_epsilon_gamma_box': { + 'cell_size': 1.0, 'width_padding': 1.0, 'height_padding': 0.0, 'additional_fontsize': 0 + }, + 'guided_grad_cam': { + 'cell_size': 1.2, 'width_padding': 0.0, 'height_padding': 0.05, 'additional_fontsize': 2 + }, + 'integrated_gradients': { + 'cell_size': 1.2, 'width_padding': 0.0, 'height_padding': 0.35, 'additional_fontsize': 2 + }, + } + # Configuration for plot layouts and instance indices + QUALITATIVE_CONFIG = { + 'smooth_grad': { + 'instances': [66, 122], 'grid_shape': (1, 6) + }, + 'lrp_epsilon_gamma_box': { + 'instances': [81, 93], 'grid_shape': (1, 6) + }, + 'integrated_gradients': { + 'instances': [6, 16], 'grid_shape': (1, 6) + }, + 'guided_grad_cam': { + 'instances': [24, 68], 'grid_shape': (1, 6) + }, + } + # Configuration for the position of score annotations per explainer per instance + ANNOTATION_POS_CONFIG = { + 'default': ['bottom-left', 'bottom-left'], + 'smooth_grad': ['bottom-left', 'top-left'], + 'lrp_epsilon_gamma_box': ['bottom-left', 'bottom-left'], + 'guided_grad_cam': ['bottom-left', 'bottom-left'], + 'integrated_gradients': ['top-left', 'top-left'], + } + + for explainer_key in args.eval_explainer: + print(f"\n--- Visualizing Heatmap Examples: {explainer_key} ---") + + # Load pre-computed analysis results and optimized hyperparameters + hps_source = EXPL_HYPERPARAMETERS + load_path = os.path.join(cwd, f'results/hpo_impact_imagenet/raw/{args.model}/{explainer_key}.pkl') + best_params_path = os.path.join(cwd, f'data/ImageNet/optimized_params/{args.model}/{explainer_key}.pkl') + + # For lrp_epsilon_gamma_box, use the special 2x2 grid data and config + if explainer_key == 'lrp_epsilon_gamma_box': + print(" -> Using special 2x2 grid configuration for LRPEpsilonGammaBox qualitative examples.") + hps_source = QUALITATIVE_HYPERPARAMETERS + load_path = os.path.join(cwd, f'results/hpo_impact_imagenet/raw/{args.model}/{explainer_key}_2x2.pkl') + + if not os.path.exists(load_path) or not os.path.exists(best_params_path): + print(f" ERROR: Missing results file or params file. Please run --analyze first.") + print(f" - Checked for results: {load_path}") + print(f" - Checked for params: {best_params_path}") + continue + + results = load_pickle_data(load_path) + best_params = load_pickle_data(best_params_path)['composite'] + predicted_labels = results.get('predicted_labels', {}) + all_images_eval_scores = results.get('all_images_eval_scores', {}) + + # Get HP configuration + plot_config = PLOTTING_CONFIGS.get(explainer_key, DEFAULT_PLOT_CONFIG) + column_hp_key = plot_config['column_hp'] + row_hp_keys = plot_config['row_hps'] + all_hps_for_explainer = {**hps_source.get(explainer_key, {}), **COMMON_HYPERPARAMETERS} + column_values = all_hps_for_explainer.get(column_hp_key, []) + row_param_values_lists = [all_hps_for_explainer.get(k, []) for k in row_hp_keys] + row_combinations = list(itertools.product(*row_param_values_lists)) + + if explainer_key in DEFAULT_PARAMS_CONFIG: + print(f" Found specific default parameters for {explainer_key}.") + default_params_to_find = DEFAULT_PARAMS_CONFIG[explainer_key] + default_indices = _find_param_indices(default_params_to_find, row_hp_keys, row_combinations, column_hp_key, column_values) + else: + print(" Using (0, 0) as the default parameter index.") + default_indices = (0, 0) + + optimized_indices = _find_param_indices(best_params, row_hp_keys, row_combinations, column_hp_key, column_values) + + # Check if required data is available + if not default_indices or not optimized_indices: + print(" - Missing default or optimized indices. Skipping qualitative plot.") + return + + print(f" Default HP indices: {default_indices}") + print(f" Optimized HP indices: {optimized_indices}") + + print(f" Generating qualitative example plot for {explainer_key}...") + + # Check if the explainer has a defined qualitative plot config + if explainer_key not in QUALITATIVE_CONFIG: + print(f" - No qualitative config for {explainer_key}. Skipping plot.") + return + + config = QUALITATIVE_CONFIG[explainer_key] + instance_indices = config['instances'] + num_rows, num_cols = config['grid_shape'] + + # Verify that all required instances exist in the results + all_images_heatmaps = results.get('all_images_heatmaps', {}) + for idx in instance_indices: + if idx not in all_images_heatmaps: + print(f" - Instance index {idx} not found in results. Skipping qualitative plot.") + return + + # --- Plotting Setup --- + # Get explainer-specific figure size settings + fig_settings = FIGURE_SIZE_CONFIG.get(explainer_key, FIGURE_SIZE_CONFIG['default']) + cell_size = fig_settings['cell_size'] + width_padding_size = fig_settings['width_padding'] + height_padding_size = fig_settings['height_padding'] + + # Define fixed physical sizes for plot components + marg_row_width = 2.0 + marg_col_height = 2.0 + colorbar_width = 0.2 + + fig_height = (7 * cell_size) + marg_col_height + height_padding_size + fig_width = fig_height / num_rows * num_cols + + fig = plt.figure(figsize=(num_cols * 2.5, num_rows * 6)) + + # Create an outer grid (1x2) to separate the two instances. + outer_gs = gridspec.GridSpec(1, 2, figure=fig, wspace=0.15) + + # Create two inner grids (1x3 each), one for each outer cell. + inner_gs_1 = gridspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=outer_gs[0], wspace=0.1) + inner_gs_2 = gridspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=outer_gs[1], wspace=0.1) + + # Create the axes from the inner grids. + axes_instance_1 = [fig.add_subplot(inner_gs_1[0, i]) for i in range(3)] + axes_instance_2 = [fig.add_subplot(inner_gs_2[0, i]) for i in range(3)] + + # Combine into a single list of 6 + axes = axes_instance_1 + axes_instance_2 + + highlight_cmap = plt.cm.get_cmap('Set2') + default_color = highlight_cmap(1) + optimized_color = highlight_cmap(0) + edge_linewidth = 6 + + fontsize_labels = 16.8 + fontsize_text = 15 + fontsize_title = 20 + + # --- Plot Attribution Maps --- + # Get the two instances to plot + instance_info = [ + {'idx': instance_indices[0], 'axes': axes[0:3]}, + {'idx': instance_indices[1], 'axes': axes[3:6]} + ] + + for i, info in enumerate(instance_info): + instance_idx = info['idx'] + ax_original, ax_default, ax_optimized = info['axes'] + + instance_heatmaps = all_images_heatmaps[instance_idx] + instance_scores = all_images_eval_scores.get(instance_idx, {}) + + # Get the position config for the current explainer + positions = ANNOTATION_POS_CONFIG.get(explainer_key, ANNOTATION_POS_CONFIG['default']) + # Get the position for the current instance (i: the loop index - 0 or 1) + position_key = positions[i] + + # Set text position parameters based on the config + if position_key == 'top-left': + y_pos, v_align = 0.97, 'top' + else: # Default to 'bottom-left' + y_pos, v_align = 0.03, 'bottom' + + # --- Plotting --- + # Column 1: Original Image + image_tensor, _ = dataset[instance_idx] + denormalized_img = denormalize_image(image_tensor, transform.mean, transform.std) + ax_original.imshow(denormalized_img) + + # Column 2: Default Heatmap + default_heatmap = instance_heatmaps.get(default_indices, np.zeros((224, 224))) + ax_default.imshow(default_heatmap, cmap='Reds') + + # Column 3: Optimized Heatmap + optimized_heatmap = instance_heatmaps.get(optimized_indices, np.zeros((224, 224))) + ax_optimized.imshow(optimized_heatmap, cmap='Reds') + + # --- Labels and Annotations --- + image_label = predicted_labels.get(instance_idx, "Unknown") + ax_original.set_ylabel(f"Label: {image_label}", fontsize=fontsize_labels, fontweight='bold', rotation=90, labelpad=10) + + # Default scores annotation + abpc_def = instance_scores.get((default_indices[0], default_indices[1], 'ab_pc'), torch.tensor(np.nan)).item() + simp_def = -instance_scores.get((default_indices[0], default_indices[1], 'complexity'), torch.tensor(np.nan)).item() + def_text = f"AbPC: {abpc_def:.2f}\nSimp: {simp_def:.3f}" + ax_default.text(0.03, y_pos, def_text, color="white", fontsize=fontsize_text, ha='left', va=v_align, transform=ax_default.transAxes, + bbox=dict(boxstyle='round,pad=0.2', fc='black', ec='none', alpha=0.6)) + + # Optimized scores annotation + abpc_opt = instance_scores.get((optimized_indices[0], optimized_indices[1], 'ab_pc'), torch.tensor(np.nan)).item() + simp_opt = -instance_scores.get((optimized_indices[0], optimized_indices[1], 'complexity'), torch.tensor(np.nan)).item() + opt_text = f"AbPC: {abpc_opt:.2f}\nSimp: {simp_opt:.3f}" + ax_optimized.text(0.03, y_pos, opt_text, color="white", fontsize=fontsize_text, ha='left', va=v_align, transform=ax_optimized.transAxes, + bbox=dict(boxstyle='round,pad=0.2', fc='black', ec='none', alpha=0.6)) + + # --- Styling --- + # Set titles only for the first instance's columns + ax_original.set_title("Input", fontsize=fontsize_title, fontweight='bold', pad=10) + ax_default.set_title("Default", fontsize=fontsize_title, fontweight='bold', pad=10) + ax_optimized.set_title("Optimized", fontsize=fontsize_title, fontweight='bold', pad=10) + + # Apply styles to all axes + ax_original.set_xticks([]); ax_original.set_yticks([]) + for ax, color in [(ax_default, default_color), (ax_optimized, optimized_color)]: + ax.set_xticks([]); ax.set_yticks([]) + for spine in ax.spines.values(): + spine.set_edgecolor(color) + spine.set_linewidth(edge_linewidth) + + # --- Save Figure --- + fig.subplots_adjust(left=0.05, right=0.98, bottom=0.15, top=0.85) + savedir = os.path.join(cwd, f'results/hpo_impact_imagenet/figures/{args.model}') + os.makedirs(savedir, exist_ok=True) + save_fname = os.path.join(savedir, f'{explainer_key}_qualitative_examples.pdf') + plt.savefig(save_fname, dpi=150, bbox_inches='tight') + plt.close(fig) + print(f" Saved qualitative examples to: {save_fname}") + + +def main(): + """Main execution function.""" + # arguments + parser = argparse.ArgumentParser(description="Evaluate and Analyze HPO Impact in PnPXAI") + parser.add_argument('--model', type=str, choices=TORCHVISION_MODEL_CHOICES, required=True) + parser.add_argument('--data_dir', type=str, required=True) + parser.add_argument('--data_from', type=int, default=0) + parser.add_argument('--data_to', type=int, default=128) + parser.add_argument('--batch_size', type=int, default=1) + parser.add_argument('--disable_gpu', action='store_true') + parser.add_argument('--analyze', action='store_true', help="Run HPO impact analysis and save results.") + parser.add_argument('--visualize', action='store_true', help="Visualize HPO impact analysis results and attribution maps.") + parser.add_argument('--eval_explainer', type=str, nargs='+') + + args = parser.parse_args() + + if args.analyze: + run_grid_search_evaluation(args) + print("\nEvaluation finished.") + + if args.visualize: + plot_hpo_analysis(args) + plot_attribution_comparison(args) + print("\nVisualization finished.") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/experiments/utils/utils.py b/experiments/utils/utils.py index 5be5f59..bbc94d2 100644 --- a/experiments/utils/utils.py +++ b/experiments/utils/utils.py @@ -1,18 +1,18 @@ +from typing import Dict, Any, Tuple import os import pickle import dill import functools import random +import inspect import numpy as np import torch from torch import Tensor from torch.utils.data import Dataset, Subset, DataLoader - -# from pnpxai.explainers.utils.baselines import BASELINE_FUNCTIONS -# from pnpxai.explainers.utils.feature_masks import FEATURE_MASK_FUNCTIONS - import zennit + +from pnpxai.core.modality.modality import Modality from pnpxai.explainers import Explainer from pnpxai.explainers.lrp import ( LRPBase, @@ -171,138 +171,25 @@ def set_seed(seed): print(f"Set seed to {seed} for random, numpy, and torch (including CUDA if available).") -''' -def set_params(explainer_key, params, modality): - """ - Helper function to parse a parameter dictionary into explainer-specific - kwargs and postprocessor-specific kwargs. - It also handles instantiation of baseline/feature_mask functions from strings. - """ - if not params: - print(f"Warning: No optimized parameters found for {explainer_key}. Using defaults.") - explainer_kwargs = {} - post_kwargs = {} - else: - explainer_kwargs = {k: v for k, v in params.items() if k not in POSTPROCESS_PARAM_KEYS} - post_kwargs = {k: v for k, v in params.items() if k in POSTPROCESS_PARAM_KEYS} - - if modality.dtype.is_floating_point: - python_dtype = float - elif modality.dtype.is_complex: - print(f"Warning: Complex dtype {modality.dtype} detected. Parameter instantiation might fail if not handled in dictionaries.") - python_dtype = None - else: - python_dtype = int - modality_key = (python_dtype, modality.ndims) - - # Separate potential nested parameters for all baseline/feature_mask functions - # from the main explainer_kwargs before processing. - # This prevents parameters for one mask/baseline from being passed to another. - all_nested_baseline_params = {} - all_nested_mask_params = {} - cleaned_explainer_kwargs = {} - - for k, v in list(explainer_kwargs.items()): - if k.startswith('baseline_fn.'): - all_nested_baseline_params[k] = v - elif k.startswith('feature_mask_fn.'): - all_nested_mask_params[k] = v - else: - cleaned_explainer_kwargs[k] = v - explainer_kwargs = cleaned_explainer_kwargs # explainer_kwargs now only has non-nested parameters - - # Handle baseline_fn - baseline_fn_str = explainer_kwargs.get('baseline_fn') - if baseline_fn_str and isinstance(baseline_fn_str, str): - specific_baseline_params = {} - prefix_baseline = 'baseline_fn.' - try: - BaselineClass = BASELINE_FUNCTIONS.get(modality_key, {}).get(baseline_fn_str) - if BaselineClass: - init_signature = inspect.signature(BaselineClass.__init__) - # Get expected parameters, excluding 'self', '*args', '**kwargs' - expected_params = {p.name for p in init_signature.parameters.values() - if p.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) - and p.name != 'self'} - - # Pass 'dim' if the BaselineClass expects it - if 'dim' in expected_params: - specific_baseline_params['dim'] = modality.pooling_dim - - for param_key, param_value in all_nested_baseline_params.items(): - param_name = param_key[len(prefix_baseline):] - if param_name in expected_params: - specific_baseline_params[param_name] = param_value - - instance = BaselineClass(**specific_baseline_params) - explainer_kwargs['baseline_fn'] = (instance,) # Replace string with instance tuple - print(f" Instantiated baseline_fn: {baseline_fn_str} -> {type(instance)} with params {specific_baseline_params}") - - else: - print(f" Warning: Baseline function '{baseline_fn_str}' not found for modality {modality_key}. Removing related keys.") - if 'baseline_fn' in explainer_kwargs: del explainer_kwargs['baseline_fn'] - - except Exception as e: - print(f" Error instantiating baseline function '{baseline_fn_str}' with params {specific_baseline_params}: {e}. Removing related keys.") - if 'baseline_fn' in explainer_kwargs: del explainer_kwargs['baseline_fn'] - - # Handle feature_mask_fn - feature_mask_fn_str = explainer_kwargs.get('feature_mask_fn') - if feature_mask_fn_str and isinstance(feature_mask_fn_str, str): - specific_mask_params = {} - prefix_mask = 'feature_mask_fn.' - try: - FeatureMaskClass = FEATURE_MASK_FUNCTIONS.get(modality_key, {}).get(feature_mask_fn_str) - if FeatureMaskClass: - init_signature = inspect.signature(FeatureMaskClass.__init__) - expected_params = {p.name for p in init_signature.parameters.values() - if p.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD) - and p.name != 'self'} - - # Pass 'dim' if the FeatureMaskClass expects it - if 'dim' in expected_params: - specific_mask_params['dim'] = modality.pooling_dim - - for param_key, param_value in all_nested_mask_params.items(): - param_name = param_key[len(prefix_mask):] - if param_name in expected_params: - specific_mask_params[param_name] = param_value - - instance = FeatureMaskClass(**specific_mask_params) - explainer_kwargs['feature_mask_fn'] = (instance,) # Replace string with instance tuple - print(f" Instantiated feature_mask_fn: {feature_mask_fn_str} -> {type(instance)} with params {specific_mask_params}") - - else: - print(f" Warning: Feature mask function '{feature_mask_fn_str}' not found for modality {modality_key}. Removing related keys.") - for key in keys_to_remove_mask: - if key in explainer_kwargs: del explainer_kwargs[key] - - except Exception as e: - print(f" Error instantiating feature mask function '{feature_mask_fn_str}' with params {specific_mask_params}: {e}. Removing related keys.") - if 'feature_mask_fn' in explainer_kwargs: del explainer_kwargs['feature_mask_fn'] - - return explainer_kwargs, post_kwargs -''' -import inspect -from typing import Dict, Any, Tuple - -from pnpxai.core.modality.modality import Modality -from pnpxai.explainers.utils.postprocess import PostProcessor - -# Define keys that belong to the postprocessor, not the explainer -POSTPROCESS_PARAM_KEYS = {'pooling_method', 'normalization_method'} - def set_params(params: Dict[str, Any], modality: Modality) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ - Refactored set_params function for the new pnpxai API. + Processes a flat parameter dictionary and splits it for explainers and postprocessors. This function takes a flat dictionary of parameters and the experiment's modality object. It uses the modality's internal 'FunctionSelectors' - to correctly instantiate baseline and feature_mask functions. + (e.g., `modality.baseline_fn_selector`) to correctly instantiate callable + functions (like `baseline_fn` or `feature_mask_fn`) from their string names. - It splits parameters into explainer-ready kwargs and postprocessor-ready kwargs. - """ + It also handles nested parameters (e.g., 'baseline_fn.n_samples') by parsing + them and passing them as arguments to the selector. + Finally, it splits all parameters into two dictionaries: + 1. `explainer_kwargs`: For the explainer (e.g., `n_steps`, `baseline_fn`). + 2. `post_kwargs`: For the postprocessor (e.g., `pooling_method`). + """ + # Define keys that belong to the postprocessor, not the explainer + POSTPROCESS_PARAM_KEYS = {'pooling_method', 'normalization_method'} + # Separate explainer kwargs from postprocessor kwargs explainer_kwargs = {k: v for k, v in params.items() if k not in POSTPROCESS_PARAM_KEYS} post_kwargs = {k: v for k, v in params.items() if k in POSTPROCESS_PARAM_KEYS} @@ -362,33 +249,33 @@ def set_params(params: Dict[str, Any], modality: Modality) -> Tuple[Dict[str, An def patch_lrp_explainer(explainer: Explainer) -> Explainer: """ - Workaround for pnpxai LRP explainers' stateful initialization bug. + Utility function to synchronize pnpxai LRP explainers after `.set_kwargs()`. - This function addresses a bug where pnpxai LRP wrappers + This function addresses an initialization behavior where pnpxai LRP wrappers create their internal `zennit_composite` object during `__init__` using default parameters. A subsequent `.set_kwargs()` updates the wrapper's attributes (e.g., `explainer.epsilon`), - but does not recreate the stale `zennit_composite` object, which still holds - the stale default parameters in its internal rules. + but does not automatically propagate these changes to the internal `zennit_composite` + object, which was created during `__init__` with the default parameters. - This patch *forces* recreation of `zennit_composite` *after* `.set_kwargs()` - by manually calling the same private helper functions used in `lrp.py`'s `__init__`, - but this time feeding them the *updated* attributes from the explainer instance. + This utility manually recreates the `zennit_composite` after `.set_kwargs()` + by calling the same private helper functions used in `lrp.py`'s `__init__`, + but this time feeding them the updated attributes from the explainer instance. Args: explainer: The LRP explainer instance (e.g., LRPEpsilonGammaBox) that has just been configured with `.set_kwargs()`. Returns: - The same explainer instance, now patched with a correct `zennit_composite`. + The same explainer instance, now updated with a `zennit_composite` object + reflecting the new parameters. """ new_composite = None explainer_class_name = explainer.__class__.__name__ if isinstance(explainer, LRPEpsilonGammaBox): - # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}, gamma={explainer.gamma}") new_composite = _get_epsilon_gamma_box_composite( low=explainer.low, high=explainer.high, @@ -398,21 +285,18 @@ def patch_lrp_explainer(explainer: Explainer) -> Explainer: zennit_canonizers=explainer.zennit_canonizers ) elif isinstance(explainer, LRPUniformEpsilon): - # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}") new_composite = _get_uniform_epsilon_composite( epsilon=explainer.epsilon, # Use the UPDATED attribute stabilizer=explainer.stabilizer, zennit_canonizers=explainer.zennit_canonizers ) elif isinstance(explainer, LRPEpsilonPlus): - # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}") new_composite = _get_epsilon_plus_composite( epsilon=explainer.epsilon, # Use the UPDATED attribute stabilizer=explainer.stabilizer, zennit_canonizers=explainer.zennit_canonizers ) elif isinstance(explainer, LRPEpsilonAlpha2Beta1): - # print(f"Patching {explainer_class_name} with epsilon={explainer.epsilon}") new_composite = _get_epsilon_alpha2_beta1_composite( epsilon=explainer.epsilon, # Use the UPDATED attribute stabilizer=explainer.stabilizer, @@ -423,5 +307,5 @@ def patch_lrp_explainer(explainer: Explainer) -> Explainer: if new_composite is not None: explainer.zennit_composite = new_composite - + return explainer \ No newline at end of file From 0771ebc81fc8506b45c8801b7b7766e1b2fcf5dc Mon Sep 17 00:00:00 2001 From: seongun-kim Date: Tue, 4 Nov 2025 20:21:28 +0900 Subject: [PATCH 05/20] refactor: Update import paths to reflect utils --- experiments/scripts/analyze_imagenet_hpo.py | 2 +- experiments/utils/__init__.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/experiments/scripts/analyze_imagenet_hpo.py b/experiments/scripts/analyze_imagenet_hpo.py index 0fc9f97..8585e00 100644 --- a/experiments/scripts/analyze_imagenet_hpo.py +++ b/experiments/scripts/analyze_imagenet_hpo.py @@ -17,7 +17,7 @@ from pnpxai.evaluator.metrics import MoRF, LeRF, AbPC from pnpxai.evaluator.optimizer import Objective, optimize -from experiments.utils.helpers import ( +from experiments.utils import ( set_seed, get_torchvision_model, get_imagenet_sample_from_hf, diff --git a/experiments/utils/__init__.py b/experiments/utils/__init__.py index a71183f..67c4f3c 100644 --- a/experiments/utils/__init__.py +++ b/experiments/utils/__init__.py @@ -20,7 +20,8 @@ VQADataset, get_vqa_dataset, get_livertumor_dataset, - get_livertumor_dataset_from_hf + get_livertumor_dataset_from_hf, + get_imagenet_sample_from_hf ) from .models import ( @@ -48,6 +49,7 @@ 'IMDBDataset', 'get_imdb_dataset', 'VQADataset', 'get_vqa_dataset', 'get_livertumor_dataset', 'get_livertumor_dataset_from_hf', + 'get_imagenet_sample_from_hf', # models 'get_torchvision_model', 'get_livertumor_model', 'get_livertumor_model_from_hf', From 5376489cfc6e6e4db2d4368a4f960a50edf70e91 Mon Sep 17 00:00:00 2001 From: seongun-kim Date: Tue, 4 Nov 2025 20:40:40 +0900 Subject: [PATCH 06/20] docs: Update README.md --- README.md | 113 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 95 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index f182c57..a02e0ef 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,12 @@ We provide two ways to set up the environment: using Docker (recommended for exa ``` 3. **Run the Docker container:** - This command starts an interactive container, mounts the project code, and assigns GPUs. Adjust `-v` source path and `--gpus` devices as needed. + This command starts an interactive container, mounts the project code, and assigns GPUs. Adjust mount paths and `--gpus` devices as needed. The ImageNet paths are recommended for running Experiment 2. ```bash docker run -it \ -v "$(pwd)":/root/pnpxai-experiments \ + -v /PATH_TO_IMAGENET/ImageNet1k:/root/pnpxai-experiments/data/ImageNet/ImageNet1k:ro \ + -v /PATH_TO_IMAGENET/ImageNet1k_info:/root/pnpxai-experiments/data/ImageNet/ImageNet1k_info:ro \ --gpus '"device=0"' \ # Example: Assign GPU 0 --name pnpxai_exp \ seongun/ubuntu22.04-cuda12.2.2-cudnn8-pytorch2.1:base @@ -77,35 +79,108 @@ We provide two ways to set up the environment: using Docker (recommended for exa pip install -e . ``` -## Data +## Running Experiments -The **Liver Tumor Classification dataset** used in these experiments is hosted on Hugging Face Hub: -[➡️ seongun/liver-tumor-classification](https://huggingface.co/datasets/seongun/liver-tumor-classification) +This repository contains code for various experiments presented in the PnPXAI paper. Each experiment can typically be run using scripts located in [`experiments/scripts/`](experiments/scripts/). -This dataset contains individual 2D CT scan slices derived from the original [LiTS dataset](https://doi.org/10.1016/j.media.2022.102680). Data is stored as PNG images (`sample`, `w_sample`, `mask`) linked via a `metadata.jsonl` file. +### Experiment 1: ImageNet Explanation -The experiment scripts (`experiments/scripts/`) **automatically download** the necessary data files (metadata and specific image instances required) using the `huggingface_hub` library when first executed. The downloaded data will be stored in the Hugging Face cache directory (usually `~/.cache/huggingface/datasets`). +This experiment qualitatively analyzes the effect of HPO (optimizing for AbPC) on explanations of `LRPUniformEpsilon`, `IntegratedGradients`, and `KernelShap` on [ImageNet1k](https://www.image-net.org/index.php) samples, evaluating the change in faithfulness metrics (MoRF, LeRF, AbPC). -For more details on the data loading process, refer to the `get_livertumor_dataset_from_hf` function within [`experiments/utils/helpers.py`](./experiments/utils/helpers.py). +#### Data and Model -## Pre-trained Model + * **Data (ImageNet)**: The subset of ImageNet1k for this experiment, one sample per label, a totle of 1,000 samples, is hosted on Hugging Face Hub: [➡️ geonhyeongkim/imagenet-samples-for-pnpxai-experiments](https://huggingface.co/datasets/geonhyeongkim/imagenet-samples-for-pnpxai-experiments). The script **automatically downloads** the necessary files when first executed. For more details on the data loading process, refer to the `get_imagenet_samples_from_hf` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py). -The pre-trained **ResNet-50 model** adapted for liver tumor classification is hosted on Hugging Face Hub: -[➡️ seongun/resnet50-livertumor](https://huggingface.co/seongun/resnet50-livertumor) + * **Model (ResNet-18):** This script uses a standard `ResNet-18` model pre-trained on ImageNet, loaded directly from `torchvision.models`. -Similar to the dataset, the experiment scripts **automatically download** the model weights using the `huggingface_hub` library when needed. The model architecture definition (`ResNet50LiverTumor`) is included in [`experiments/models/liver_tumor.py`](./experiments/models/liver_tumor.py). +#### Usage -For more details on model loading, refer to the `get_livertumor_model_from_hf` function within [`experiments/utils/helpers.py`](experiments/utils/helpers.py). +```bash +python -m experiments.scripts.analyze_imagenet_hpo \ + --data_id 72 \ + --save_dir results/analyze_imagenet_hpo/ \ + --seed 42 \ + --n_trials 100 \ + --analyze \ + --visualize +``` -## Running Experiments +#### Arguments -This repository contains code for various experiments presented in the PnPXAI paper. Each experiment can typically be run using scripts located in [`experiments/scripts/`](experiments/scripts/). + * `--data_id `: The specific index (`0`-`999`) of the data instance from the Hugging Face dataset to analyze. + * `--save_dir `: Data directory where experiment results are saved. + * `--n_trials `: Number of trials for hyperparameter optimization (HPO). Defaults to `100`. + * `--analyze`: Runs the HPO process and saves the raw results to `/raw/.pkl`. + * `--visualize`: Loads the previously saved results for the specified `--data_id` and generates a visualization PDF comparing default vs. optimized attributions and metrics. Saves the figure to `/figures/.pdf`. Requires results to be saved first (using `--analyze`). + +#### Output + +Results will be saved under the `` directory, organized by data instance ID. + +--- + +### Experiment 2: Hyperparameter Impact Analysis + +This experiment evaluates a grid of hyperparameter combinations for various explainers on a subset of the ImageNet validation set and generates plots comparing the impact on evaluation metrics. + +#### Data and Model + + * **Data (ImageNet):** This script requires the **ImageNet 1k dataset**. You must download it from the [official site](https://image-net.org/download.php) (requires registration). The script assumes the validation set is organized in the `data/ImageNet/` directory as follows. The `docker run` command in the Setup section already includes the recommended mounts for these paths. + ``` + data/ + └── ImageNet/ + ├── ImageNet1k/ + │ └── val/ + │ └── val/ + │ └── ILSVRC2012_val_IDX.JPEG + └── ImageNet1k_info/ + ├── ImageNet_class_index.json + └── ImageNet_val_label.txt + ``` -### Experiment 1: Liver Tumor Explanation + * **Model (ResNet-18):** This script uses a standard `ResNet-18` model pre-trained on ImageNet, loaded directly from `torchvision.models`. + +#### Usage + +```bash +python -m experiments.scripts.analyze_imagenet_hpo_impact \ + --data_dir data/ImageNet \ + --batch_size 4 \ + --analyze \ + --visualize \ + --eval_explainer smooth_grad lrp_epsilon_gamma_box guided_grad_cam integrated_gradients +``` + +#### Arguments + + * `--data_dir `: Path to the root `ImageNet` directory (which contains `ImageNet1k` and `ImageNet1k_info`). + * `--analyze`: Runs the full grid search evaluation and saves the raw metric results. + * `--visualize`: loads the raw results and generates the final plots. + * `--eval_explainer `: A space-separated list of explainers to analyze (e.g., `smooth_grad`, `guided_grad_cam`). + +#### Note on `batch_size` + + * This experiment runs on a 128-image subset (default for `--data_to 128`). + * Most explainers (e.g., `smooth_grad`, `guided_grad_cam`, `lrp_epsilon_gamma_box`) can run with a large batch size, e.g., `--batch_size 128`. + * `integrated_gradients` is memory-intensive and may require a smaller batch size (e.g., `--batch_size 4`), depending on your GPU. + +#### Output + +Raw results (`.pkl`, `.csv`) will be saved under the `results/hpo_impact_imagenet/raw/resnet18/` directory, and the generated figures (`.pdf`) will be saved in `results/hpo_impact_imagenet/figures/resnet18/`. + +--- + +### Experiment 3: Liver Tumor Explanation This experiment analyzes the effect of HPO (optimizing for AbPC) on explanations for a liver tumor CT slice, evaluating the change in ground truth agreement (Relevance Mass/Rank Accuracy). -**Usage:** +#### Data and Model + + * **Data (Liver Tumor):** The **Liver Tumor Classification dataset** used in this experiment is hosted on Hugging Face Hub: [➡️ seongun/liver-tumor-classification](https://huggingface.co/datasets/seongun/liver-tumor-classification). This dataset contains individual 2D CT scan slices derived from the original [LiTS dataset](https://doi.org/10.1016/j.media.2022.102680). The script **automatically downloads** the necessary files when first executed. For more details on the data loading process, refer to the `get_livertumor_dataset_from_hf` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py) + + * **Model (ResNet-50 Liver Tumor):** The pre-trained **ResNet-50 model** adapted for this task is hosted on Hugging Face Hub: [➡️ seongun/resnet50-livertumor](https://huggingface.co/seongun/resnet50-livertumor). Similar to the dataset, the script **automatically downloads** the model weights. The model architecture is defined in [`experiments/models/liver_tumor.py`](./experiments/models/liver_tumor.py). For more details on model loading, refer to the `get_livertumor_model_from_hf` function within [`experiments/utils/models.py`](./experiments/utils/models.py). + +#### Usage ```bash python -m experiments.scripts.analyze_livertumor_hpo \ @@ -115,13 +190,15 @@ python -m experiments.scripts.analyze_livertumor_hpo \ --visualize ``` -**Arguments:** +#### Arguments + * `--data_id `: The specific index (e.g., `2280`) of the data instance from the Hugging Face dataset to analyze. * `--n_trials `: Number of trials for hyperparameter optimization (HPO). Defaults to `100`. * `--analyze`: Runs the HPO process and saves the raw results (`.pkl` files for default run, and optimized run) to `results/hpo_analysis_livertumor/raw//`. * `--visualize`: Loads the previously saved results for the specified `--data_id` and generates a visualization PDF comparing default vs. optimized attributions and metrics. Saves the figure to `results/hpo_analysis_livertumor/figures/.pdf`. Requires results to be saved first (using `--analyze`). -**Output:** +#### Output + Results will be saved under the `results/hpo_analysis_livertumor/` directory, organized by data instance ID. From 44087fbe0bf7432f45eaefc4b6a11aa2da91aa7c Mon Sep 17 00:00:00 2001 From: Enver Menadjiev Date: Thu, 13 Nov 2025 14:12:18 +0900 Subject: [PATCH 07/20] feat: add aki experiment --- experiments/datasets/__init__.py | 3 +- experiments/datasets/aki.py | 117 +++++++ experiments/models/__init__.py | 3 +- experiments/models/aki.py | 39 +++ experiments/scripts/analyze_aki_hpo.py | 468 +++++++++++++++++++++++++ experiments/utils/__init__.py | 6 +- experiments/utils/datasets.py | 174 ++++++--- experiments/utils/models.py | 79 +++-- 8 files changed, 799 insertions(+), 90 deletions(-) create mode 100644 experiments/datasets/aki.py create mode 100644 experiments/models/aki.py create mode 100644 experiments/scripts/analyze_aki_hpo.py diff --git a/experiments/datasets/__init__.py b/experiments/datasets/__init__.py index 494f706..71a3201 100644 --- a/experiments/datasets/__init__.py +++ b/experiments/datasets/__init__.py @@ -1 +1,2 @@ -from .liver_tumor import LiverTumorDataset, LiverTumorDatasetHf \ No newline at end of file +from .liver_tumor import LiverTumorDataset, LiverTumorDatasetHf +from .aki import AKIDataset, AKI_COLUMNS \ No newline at end of file diff --git a/experiments/datasets/aki.py b/experiments/datasets/aki.py new file mode 100644 index 0000000..7eb9974 --- /dev/null +++ b/experiments/datasets/aki.py @@ -0,0 +1,117 @@ +import numpy as np +from torch.utils.data import Dataset + + +class AKIDataset(Dataset): + def __init__(self, X: np.ndarray, Y: np.ndarray): + self.X = X.astype(np.float32) + self.Y = Y.astype(int) + + self.balanced_X = self.X + self.balanced_Y = self.Y + + def rebalance(self): + aki_ids = np.argwhere(self.Y != 0).ravel() + non_aki_ids = np.argwhere(self.Y == 0).ravel() + + if len(aki_ids) > len(non_aki_ids): + return + + target_len = len(aki_ids) + + non_aki_ids = np.random.choice(non_aki_ids, target_len, replace=False) + rebalanced_ids = np.concatenate((aki_ids, non_aki_ids)) + np.random.shuffle(rebalanced_ids) + self.balanced_X = self.X[rebalanced_ids] + self.balanced_Y = self.Y[rebalanced_ids] + + def __len__(self): + return len(self.balanced_X) + + def __getitem__(self, idx): + return (self.balanced_X[idx], self.balanced_Y[idx]) + + +AKI_COLUMNS = [ + "AKI", + "AKI_STAGE_7DAY", + "CREATININE_MAX", + "CREATININE_MIN", + "CREAT", + "EGFR", + "POTASSIUM_MAX", + "GLUCOSE_MAX", + "PLATELET_MIN", + "BUN_MAX", + "WBC_MIN", + "PLATELET_MAX", + "TEMPC_MEAN", + "GLUCOSE_MEAN", + "PTT_MAX", + "TEMPC_MIN", + "BUN_MIN", + "HEMATOCRIT_MIN", + "SPO2_MEAN", + "MEANBP_MEAN", + "AGE", + "DBSOURCE", + "HEARTRATE_MEAN", + "PT_MAX", + "TEMPC_MAX", + "RESPRATE_MEAN", + "CHLORIDE_MAX", + "GLUCOSE_MIN", + "WBC_MAX", + "DIASBP_MEAN", + "SYSBP_MAX", + "DIASBP_MIN", + "CHLORIDE_MIN", + "SPO2_MIN", + "HEARTRATE_MAX", + "HEMOGLOBIN_MAX", + "SYSBP_MEAN", + "HEMATOCRIT_MAX", + "DIASBP_MAX", + "HEARTRATE_MIN", + "SYSBP_MIN", + "SODIUM_MIN", + "MEANBP_MAX", + "BICARBONATE_MAX", + "MEANBP_MIN", + "SODIUM_MAX", + "ANIONGAP_MAX", + "ANIONGAP_MIN", + "HEMOGLOBIN_MIN", + "LACTATE_MIN", + "BICARBONATE_MIN", + "PTT_MIN", + "PT_MIN", + "BILIRUBIN_MAX", + "RESPRATE_MIN", + "LACTATE_MAX", + "RESPRATE_MAX", + "ALBUMIN_MIN", + "POTASSIUM_MIN", + "INR_MAX", + "ALBUMIN_MAX", + "BILIRUBIN_MIN", + "INR_MIN", + "BANDS_MIN", + "ETHNICITY", + "BANDS_MAX", + "HYPERTENSION", + "DIABETES_UNCOMPLICATED", + "VALVULAR_DISEASE", + "CONGESTIVE_HEART_FAILURE", + "SPO2_MAX", + "ALCOHOL_ABUSE", + "GENDER", + "CARDIAC_ARRHYTHMIAS", + "PERIPHERAL_VASCULAR", + "OBESITY", + "HYPOTHYROIDISM", + "DIABETES_COMPLICATED", + "LIVER_DISEASE", + "DRUG_ABUSE", + "RENAL_FAILURE", +] diff --git a/experiments/models/__init__.py b/experiments/models/__init__.py index dcc83fe..8584e17 100644 --- a/experiments/models/__init__.py +++ b/experiments/models/__init__.py @@ -1 +1,2 @@ -from .liver_tumor import ResNet50LiverTumor \ No newline at end of file +from .liver_tumor import ResNet50LiverTumor +from .aki import AKIClassifier \ No newline at end of file diff --git a/experiments/models/aki.py b/experiments/models/aki.py new file mode 100644 index 0000000..fb8f195 --- /dev/null +++ b/experiments/models/aki.py @@ -0,0 +1,39 @@ +from torch import nn, Tensor +from huggingface_hub import PyTorchModelHubMixin +from typing import Optional + + +class AKIClassifier(nn.Module, PyTorchModelHubMixin): + + def __init__(self, input_size: int, n_classes: int): + super(AKIClassifier, self).__init__() + + is_binary = n_classes <= 2 + + layers = [ + self.nn_block(input_size, 256), + self.nn_block(256), + self.nn_block(256), + self.nn_block(256), + self.nn_block(256), + self.nn_block(256), + self.nn_block(256), + ] + + if is_binary: + layers.extend((nn.Linear(256, 1), nn.Sigmoid(), nn.Flatten(0, -1))) + else: + layers.extend((nn.Linear(256, n_classes), nn.Softmax(dim=-1))) + self.layers = nn.Sequential(*layers) + + def nn_block( + self, in_c: int, out_c: Optional[int] = None, dropout: float = 0.2 + ) -> nn.Module: + return nn.Sequential( + nn.Linear(in_c, out_c or in_c), + nn.ReLU(), + nn.Dropout(dropout), + ) + + def forward(self, x: Tensor) -> Tensor: + return self.layers(x) diff --git a/experiments/scripts/analyze_aki_hpo.py b/experiments/scripts/analyze_aki_hpo.py new file mode 100644 index 0000000..fe88b33 --- /dev/null +++ b/experiments/scripts/analyze_aki_hpo.py @@ -0,0 +1,468 @@ +import os +import gc +from argparse import ArgumentParser, Namespace +from collections import OrderedDict +from typing import Sequence, Type + +import matplotlib.pyplot as plt +from matplotlib.axes import Axes + +from tqdm.auto import tqdm + +from sklearn.metrics import auc + +import numpy as np + +import torch +from torch import nn +from torch.utils.data import DataLoader + +from pnpxai import Experiment +from pnpxai.explainers import ( + Gradient, + GradientXInput, + IntegratedGradients, + KernelShap, + Lime, + LRPEpsilonAlpha2Beta1, + LRPEpsilonGammaBox, + LRPEpsilonPlus, + LRPUniformEpsilon, + SmoothGrad, + VarGrad, + RAP, + Explainer, +) + +from pnpxai import Experiment, AutoExplanationForTSClassification +from pnpxai.core._types import DataSource + +from experiments.datasets import AKI_COLUMNS +from experiments.utils import set_seed, get_aki_model_from_hf, get_aki_dataset_from_hf + +import json + + +def get_attrs( + explainer: Explainer, data: DataSource, device: torch.device +) -> np.ndarray: + attrs = [] + for batch in tqdm(data): + x, y = [datum.to(device) for datum in batch] + cur_attrs = explainer.attribute(x, y).detach().cpu().numpy() + attrs.append(cur_attrs) + + return np.concatenate(attrs, axis=0) + + +def get_optimized_attrs( + args: Namespace, + expr: Experiment, + data_ids: Sequence[int], + explainer_id: int, + metric_id: int, + device: torch.device, +) -> np.ndarray: + attrs = [] + metric = expr.manager.get_metrics([metric_id])[0][0] + explainer = expr.manager.get_explainers([explainer_id])[0][0] + n_successful = 0 + + for data_id in data_ids: + try: + optimized = expr.optimize( + data_ids=[data_id], + explainer_id=explainer_id, + metric_id=metric_id, + direction="maximize", + sampler="tpe", # Literal['tpe','random'] + n_trials=args.n_trials, + ) + + datum, _ = expr.manager.get_data([data_id]) + datum = next(iter(datum)) + datum_in, datum_tgt = datum + + attr = optimized.explainer.attribute( + inputs=datum_in.to(device), targets=datum_tgt.to(device) + ) + attr = optimized.postprocessor(attr) + attr = attr.clamp(min=-1 + 1e-9, max=1 - 1e-9).detach().cpu().numpy() + + attrs.append(attr) + + del optimized + n_successful += 1 + except Exception as e: + print( + f"[FAILED!!!] Metric: {metric.__class__.__name__}; Explainer: {explainer.__class__.__name__} with error:\n{e}" + ) + gc.collect() + torch.cuda.empty_cache() + + return np.concatenate(attrs, axis=0) + + +def get_topk_attrs(attrs: np.ndarray, filepath: str): + data_cols = np.array(AKI_COLUMNS[2:]) + n_data_cols = len(data_cols) + + all_in_k = [] + any_in_k = [] + k_cols = [] + + for k in range(2, n_data_cols): + top_ids = attrs.argsort(axis=-1).astype(int) + topk_ids = top_ids[:, -k:] + all_occur = ( + np.isin(topk_ids, [0, 1, 2]).any(-1) # CREAT + & np.isin(topk_ids, [3]).any(-1) # EGFR + ).sum() + any_occur = np.isin(topk_ids, [0, 1, 2, 3, 7, 14, 78]).any(-1).sum() + all_in_k.append(all_occur.item()) + any_in_k.append(any_occur.item()) + + top_col_ids, top_cols_counts = np.unique(topk_ids, return_counts=True) + ids = np.argpartition(-top_cols_counts, kth=k)[:k] + + cur_k_top_cols = dict( + zip( + data_cols[top_col_ids[ids]].tolist(), + top_cols_counts[ids].tolist(), + ) + ) + cur_k_top_cols = OrderedDict( + sorted(cur_k_top_cols.items(), key=lambda x: x[1], reverse=True) + ) + + k_cols.append(cur_k_top_cols) + + dir_path = os.path.dirname(filepath) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(os.path.join(f"{filepath}.json"), "w") as f: + json.dump({"all_in_k": all_in_k, "any_in_k": any_in_k, "k_cols": k_cols}, f) + + +def analyze( + args: Namespace, + expr: AutoExplanationForTSClassification, + device: torch.device, + data_ids_relative: Sequence, +): + results_root = "results/hpo_analysis_aki" + out_topk_path = os.path.join(results_root, "topk") + out_attr_path = os.path.join(results_root, "attr") + + metric_id = 1 # metric_id to be used as objective: AbPC + + orig_data, _ = expr.manager.get_data() + + expr.predict_batch(data_ids_relative) + + for explainer, explainer_id in zip(*expr.manager.get_explainers()): + exp_name = explainer.__class__.__name__ + + print(f"[{exp_name}] Getting optimized attrs") + attrs = get_optimized_attrs( + args, expr, data_ids_relative, explainer_id, metric_id, device + ) + + attr_dir = os.path.join(out_attr_path, explainer.__class__.__name__) + os.makedirs(attr_dir, exist_ok=True) + + attr_path = os.path.join(attr_dir, "optimized.npy") + np.save(attr_path, attrs) + + print(f"[{exp_name}] Plotting original topk attrs") + get_topk_attrs(attrs, os.path.join(out_topk_path, f"{exp_name}_optimized")) + + print(f"[{exp_name}] Getting original attrs") + attrs = get_attrs(explainer, orig_data, device) + + os.makedirs(attr_dir, exist_ok=True) + attr_path = os.path.join(attr_dir, "original.npy") + np.save(attr_path, attrs) + + get_topk_attrs(attrs, os.path.join(out_topk_path, exp_name)) + + +def filter_and_normalize(data: np.ndarray) -> np.ndarray: + data = data[~np.isnan(data).any(axis=-1)] + data = data[~(data == 0).all(axis=-1)] + + data_min = np.min(data, axis=-1, keepdims=True) + data_max = np.max(data, axis=-1, keepdims=True) + data = (data - data_min) / (data_max - data_min) + return data + + +def get_rma(data: np.ndarray, top_columns: Sequence) -> float: + data = filter_and_normalize(data) + + gt_sum = data[:, top_columns].sum(axis=-1) + all_sum = data.sum(axis=-1) + + rma = gt_sum / all_sum + rma = rma[~np.isnan(rma)] + rma = rma.mean().item() + + return rma + + +def get_rra(data: np.ndarray, top_columns: Sequence) -> float: + data = filter_and_normalize(data) + + data = np.nan_to_num(data, nan=0) + n_top_cols = len(top_columns) + sorted_ids = data.argsort(axis=-1)[:, -n_top_cols:] + rra = np.isin(sorted_ids, top_columns, assume_unique=True).astype(int).sum(axis=-1) + rra = rra / len(top_columns) + rra = rra.mean().item() + return rra + + +def get_auc(data: Sequence) -> float: + data_len = len(data) + x = np.arange(data_len) + y = np.array(data) / data_len + return auc(x, y) + + +def plot_explainers_comparison_bar_chart( + ax: Axes, + explainer_names: Sequence, + orig_val_getter: callable, + opt_val_getter: callable, + explainer_meta_map: dict, +) -> Axes: + w, x = 0.4, np.arange(len(explainer_names)) + for idx, explainer_name in enumerate(explainer_names): + orig_val = orig_val_getter(explainer_name) + opt_val = opt_val_getter(explainer_name) + + ax.bar( + x[idx] - w / 2, + orig_val, + width=w, + color=explainer_meta_map[explainer_name][1] + "40", + edgecolor="#d9d9d9", + linewidth=2, + ) + ax.bar( + x[idx] + w / 2, + opt_val, + width=w, + label=explainer_meta_map[explainer_name][0], + color=explainer_meta_map[explainer_name][1], + edgecolor="#808080", + linewidth=2, + ) + + return ax + + +def visualize(): + plt.rcParams["text.usetex"] = True + plt.rcParams["font.family"] = "Times New Roman" + plt.rcParams["pdf.fonttype"] = 42 + plt.rcParams["mathtext.fontset"] = "stix" + plt.rcParams["axes.unicode_minus"] = False + + EXPLAINER_MAP = { + "Gradient": ("Gradient", "#aec7e8"), + "GradientXInput": ("Gradient$\\times$Input", "#ffbb78"), + "IntegratedGradients": ("Int. Gradients", "#d62628"), + "KernelShap": ("KernelSHAP", "#ff9896"), + "Lime": ("Lime", "#c7c7c7"), + "LRPEpsilonAlpha2Beta1": ("LRP-$\\epsilon \\alpha_2 \\beta_1$", "#c5b1d5"), + "LRPEpsilonGammaBox": ("LRP-$\\epsilon \gamma \\mathcal{B}$", "#8c564b"), + "LRPEpsilonPlus": ("LRP-$\\epsilon^+$", "#e377c2"), + "LRPUniformEpsilon": ("LRP-Uniform-$\\epsilon$", "#f7b6d2"), + "RAP": ("RAP", "#bcbd22"), + "SmoothGrad": ("SmoothGrad", "#16becf"), + "VarGrad": ("VarGrad", "#9ddae5"), + } + + TOP_COLUMNS = [ + 0, # CREAT + 1, # CREAT + 2, # CREAT + 3, # EGFR + 7, # BUN + 14, # BUN + 78, # RENAL_DISEASE + ] + + fig = plt.figure(figsize=(15, 5)) + axes: Sequence[Axes] + axes = fig.subplots(1, 3) + + y_ticks = np.arange(0, 11, 2) / 10 + y_label_kwargs = {"fontname": "Times New Roman", "size": 16, "fontweight": "bold"} + + attr_path = "results/hpo_analysis_aki/topk" + + exp_top_k_feats = {} + exp_all_in_k = {} + + exp_top_k_feats_optimized = {} + exp_all_in_k_optimized = {} + + def read_file(filepath): + with open(os.path.join(filepath), "r") as f: + data = json.loads(f.read()) + n_items = max(data["any_in_k"]) + all_in_k = [datum / n_items for datum in data["all_in_k"]] + top_k_feats = list(data["k_cols"][5].keys()) + return top_k_feats, all_in_k + + for file in os.listdir(attr_path): + if not file.endswith(".json"): + continue + + file_data = read_file(os.path.join(attr_path, file)) + + explainer_name = file.split(".")[0] + is_optimized = "optimized" in explainer_name + if is_optimized: + explainer_name = explainer_name[:-10] + ( + exp_top_k_feats_optimized[explainer_name], + exp_all_in_k_optimized[explainer_name], + ) = file_data + else: + exp_top_k_feats[explainer_name], exp_all_in_k[explainer_name] = file_data + + attr_path = "results/hpo_analysis_aki/attr" + + exp_attrs_orig = {} + exp_attrs_opt = {} + + for explainer in os.listdir(attr_path): + exp_attrs_opt[explainer] = np.load( + os.path.join(attr_path, explainer, "optimized.npy") + ) + exp_attrs_orig[explainer] = np.load( + os.path.join(attr_path, explainer, "original.npy") + ) + + explainer_names = sorted( + list(set(exp_attrs_orig.keys()).intersection(set(exp_attrs_opt.keys()))) + ) + + axes[0] = plot_explainers_comparison_bar_chart( + axes[0], + explainer_names, + lambda name: get_auc(exp_all_in_k[name]), + lambda name: get_auc(exp_all_in_k_optimized[name]), + EXPLAINER_MAP, + ) + axes[0].set_ylabel("AUC of Recall@K", **y_label_kwargs) + + axes[1] = plot_explainers_comparison_bar_chart( + axes[1], + explainer_names, + lambda name: get_rra(exp_attrs_orig[name], TOP_COLUMNS), + lambda name: get_rra(exp_attrs_opt[name], TOP_COLUMNS), + EXPLAINER_MAP, + ) + axes[1].set_ylabel("Relevance Mass Accuracy", **y_label_kwargs) + + axes[2] = plot_explainers_comparison_bar_chart( + axes[2], + explainer_names, + lambda name: get_rra(exp_attrs_orig[name], TOP_COLUMNS), + lambda name: get_rra(exp_attrs_opt[name], TOP_COLUMNS), + EXPLAINER_MAP, + ) + axes[2].set_ylabel("Relevance Rank Accuracy", **y_label_kwargs) + + for ax in axes: + ax.grid(True) + ax.set_xticks([]) + ax.set_yticks(y_ticks) + ax.set_title("Attribution Methods", fontsize=20) + + handles, labels = axes[0].get_legend_handles_labels() + # fig.legend(handles, labels, loc='lower center', bbox_to_anchor=(0.34, -0.03), fancybox=True, ncol=5) + fig.legend( + handles, + labels, + loc="lower center", + bbox_to_anchor=(0.5, -0.08), + fancybox=True, + ncol=6, + ) + + fig.fontsize = 12 + fig.tight_layout() + fig.savefig( + "results/hpo_analysis_aki/explanations_summary.pdf", bbox_inches="tight" + ) + + +def main(): + """Main execution function.""" + # arguments + parser = ArgumentParser() + parser.add_argument( + "--n_trials", + default=20, + type=int, + help="Number of trials for hyper-parameter optimization", + ) + parser.add_argument( + "--analyze", + action="store_true", + help="Enable analysis mode", + ) + parser.add_argument( + "--visualize", + action="store_true", + help="Enable visualization mode", + ) + args = parser.parse_args() + + # setup + set_seed(42) + batch_size = 1 + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = get_aki_model_from_hf(repo_id="enver1323/aki-classifier") + model = model.to(device) + model.eval() + + dataset = get_aki_dataset_from_hf(repo_id="enver1323/mimic3-aki-detection") + if not dataset: + raise ValueError(f"Could not load dataset containing") + + data_ids_relative = range(len(dataset)) + + loader = DataLoader( + dataset, + batch_size=batch_size, + pin_memory=True, + num_workers=2, + shuffle=False, + ) + + # create auto explanation + expr = AutoExplanationForTSClassification( + model=model, + data=loader, + input_extractor=lambda batch: batch[0].to(device), + label_extractor=lambda batch: batch[-1].to(device), + target_extractor=lambda outputs: outputs.argmax(-1).to(device), + target_labels=False, + ) + + if args.analyze: + analyze(args, expr, device, data_ids_relative) + + if args.visualize: + visualize() + + +if __name__ == "__main__": + main() diff --git a/experiments/utils/__init__.py b/experiments/utils/__init__.py index 67c4f3c..2570e37 100644 --- a/experiments/utils/__init__.py +++ b/experiments/utils/__init__.py @@ -21,7 +21,8 @@ get_vqa_dataset, get_livertumor_dataset, get_livertumor_dataset_from_hf, - get_imagenet_sample_from_hf + get_imagenet_sample_from_hf, + get_aki_dataset_from_hf, ) from .models import ( @@ -36,6 +37,7 @@ get_vilt_model, get_vilt_processor, vilt_collate_fn, + get_aki_model_from_hf, ) __all__ = [ @@ -50,9 +52,11 @@ 'VQADataset', 'get_vqa_dataset', 'get_livertumor_dataset', 'get_livertumor_dataset_from_hf', 'get_imagenet_sample_from_hf', + 'get_aki_dataset_from_hf', # models 'get_torchvision_model', 'get_livertumor_model', 'get_livertumor_model_from_hf', 'Bert', 'get_bert_model', 'bert_collate_fn', 'get_bert_tokenizer', 'Vilt', 'get_vilt_model', 'get_vilt_processor', 'vilt_collate_fn', + 'get_aki_model_from_hf', ] \ No newline at end of file diff --git a/experiments/utils/datasets.py b/experiments/utils/datasets.py index 6a9ffdd..b7b8801 100644 --- a/experiments/utils/datasets.py +++ b/experiments/utils/datasets.py @@ -1,4 +1,4 @@ -from typing import Optional, List +from typing import Optional, List, Tuple import os import json import requests @@ -9,19 +9,28 @@ from urllib3 import disable_warnings from urllib3.exceptions import InsecureRequestWarning +import numpy as np +import pandas as pd +from sklearn.preprocessing import StandardScaler + from torch.utils.data import Dataset, Subset, DataLoader from torchvision import transforms from huggingface_hub import hf_hub_download +from datasets import load_dataset -from experiments.datasets import LiverTumorDataset, LiverTumorDatasetHf +from experiments.datasets import ( + LiverTumorDataset, + LiverTumorDatasetHf, + AKIDataset, + AKI_COLUMNS, +) class ImageNetDataset(Dataset): def __init__(self, root_dir, transform=None): self.root_dir = root_dir - self.img_dir = os.path.join(self.root_dir, 'samples/') - self.label_dir = os.path.join( - self.root_dir, 'imagenet_class_index.json') + self.img_dir = os.path.join(self.root_dir, "samples/") + self.label_dir = os.path.join(self.root_dir, "imagenet_class_index.json") with open(self.label_dir) as json_data: self.idx_to_labels = json.load(json_data) @@ -36,7 +45,7 @@ def __len__(self): def __getitem__(self, idx): img_path = os.path.join(self.img_dir, self.img_names[idx]) - image = Image.open(img_path).convert('RGB') + image = Image.open(img_path).convert("RGB") label = idx if self.transform: @@ -67,12 +76,14 @@ class ImageNetValDataset(Dataset): def __init__(self, img_dir, label_file, class_index_file, transform=None): self.img_dir = img_dir self.transform = transform - self.image_files = sorted([f for f in os.listdir(img_dir) if f.endswith('.JPEG')]) + self.image_files = sorted( + [f for f in os.listdir(img_dir) if f.endswith(".JPEG")] + ) - with open(label_file, 'r') as f: + with open(label_file, "r") as f: self.labels = [line.strip() for line in f] - with open(class_index_file, 'r') as f: + with open(class_index_file, "r") as f: self.class_index = json.load(f) self.synset_to_idx = {v[0]: int(k) for k, v in self.class_index.items()} @@ -82,8 +93,8 @@ def __len__(self): def __getitem__(self, idx): img_name = self.image_files[idx] img_path = os.path.join(self.img_dir, img_name) - image = Image.open(img_path).convert('RGB') - label_synset = self.labels[idx].split(' ')[-1] + image = Image.open(img_path).convert("RGB") + label_synset = self.labels[idx].split(" ")[-1] label = self.synset_to_idx[label_synset] if self.transform: @@ -93,16 +104,18 @@ def __getitem__(self, idx): def get_imagenet_val_dataset(transform, root_dir): - img_dir = os.path.join(root_dir, 'ImageNet1k', 'val', 'val') - info_dir = os.path.join(root_dir, 'ImageNet1k_info') - val_label_file = os.path.join(info_dir, 'ImageNet_val_label.txt') - val_class_index_file = os.path.join(info_dir, 'ImageNet_class_index.json') - dataset = ImageNetValDataset(img_dir, val_label_file, val_class_index_file, transform) + img_dir = os.path.join(root_dir, "ImageNet1k", "val", "val") + info_dir = os.path.join(root_dir, "ImageNet1k_info") + val_label_file = os.path.join(info_dir, "ImageNet_val_label.txt") + val_class_index_file = os.path.join(info_dir, "ImageNet_class_index.json") + dataset = ImageNetValDataset( + img_dir, val_label_file, val_class_index_file, transform + ) return dataset class IMDBDataset(Dataset): - def __init__(self, split='test'): + def __init__(self, split="test"): super().__init__() # data_iter = IMDB(split=split) # self.annotations = [(line, label-1) for label, line in tqdm(data_iter)] @@ -114,16 +127,17 @@ def __getitem__(self, idx): return self.annotations[idx] -def get_imdb_dataset(split='test'): +def get_imdb_dataset(split="test"): return IMDBDataset(split=split) + disable_warnings(InsecureRequestWarning) class VQADataset(Dataset): def __init__(self): super().__init__() - res = requests.get('https://visualqa.org/balanced_data.json') + res = requests.get("https://visualqa.org/balanced_data.json") self.annotations = eval(res.text) def __len__(self): @@ -131,12 +145,12 @@ def __len__(self): def __getitem__(self, idx): data = self.annotations[idx] - if isinstance(data['original_image'], str): + if isinstance(data["original_image"], str): print(f"Requesting {data['original_image']}...") - res = requests.get(data['original_image'], verify=False) - img = Image.open(BytesIO(res.content)).convert('RGB') - data['original_image'] = img - return data['original_image'], data['question'], data['original_answer'] + res = requests.get(data["original_image"], verify=False) + img = Image.open(BytesIO(res.content)).convert("RGB") + data["original_image"] = img + return data["original_image"], data["question"], data["original_answer"] def get_vqa_dataset(): @@ -179,7 +193,9 @@ def get_livertumor_dataset_from_hf( A PyTorch Dataset containing only the data for the requested indices. """ if indices is None: - print("Warning: No indices provided. Attempting to load metadata only, but image loading might fail later or be inefficient.") + print( + "Warning: No indices provided. Attempting to load metadata only, but image loading might fail later or be inefficient." + ) dataset_local_dir = os.path.join(data_root, hf_repo_id.replace("/", "_")) os.makedirs(dataset_local_dir, exist_ok=True) @@ -205,36 +221,44 @@ def get_livertumor_dataset_from_hf( # 2. Read metadata and filter for requested indices filtered_metadata = [] - required_image_paths = set() # Use set to avoid duplicate downloads + required_image_paths = set() # Use set to avoid duplicate downloads all_metadata = [] try: - with open(metadata_local_path, 'r') as f: + with open(metadata_local_path, "r") as f: all_metadata = [json.loads(line.strip()) for line in f] if indices is not None: - num_total = len(all_metadata) - for idx in indices: - if 0 <= idx < num_total: - entry = all_metadata[idx] - filtered_metadata.append(entry) - required_image_paths.add(entry['sample_path']) - required_image_paths.add(entry['w_sample_path']) - required_image_paths.add(entry['mask_path']) - else: - print(f"Warning: Requested index {idx} is out of range (0-{num_total-1}). Skipping.") + num_total = len(all_metadata) + for idx in indices: + if 0 <= idx < num_total: + entry = all_metadata[idx] + filtered_metadata.append(entry) + required_image_paths.add(entry["sample_path"]) + required_image_paths.add(entry["w_sample_path"]) + required_image_paths.add(entry["mask_path"]) + else: + print( + f"Warning: Requested index {idx} is out of range (0-{num_total-1}). Skipping." + ) else: - print("Warning: Loading without specific indices. Using all metadata entries.") - filtered_metadata = all_metadata # Less efficient if not all images are needed later + print( + "Warning: Loading without specific indices. Using all metadata entries." + ) + filtered_metadata = ( + all_metadata # Less efficient if not all images are needed later + ) except Exception as e: print(f"Error reading or processing metadata file {metadata_local_path}: {e}") raise e if not filtered_metadata: - raise ValueError("No valid metadata found for the requested indices.") + raise ValueError("No valid metadata found for the requested indices.") # 3. Download only the required images - print(f"Downloading {len(required_image_paths)} required image files (if not cached)...") + print( + f"Downloading {len(required_image_paths)} required image files (if not cached)..." + ) for img_rel_path in tqdm(list(required_image_paths), desc="Downloading images"): try: # hf_hub_download will download to the cache or find existing file @@ -255,7 +279,7 @@ def get_livertumor_dataset_from_hf( dataset = LiverTumorDatasetHf( metadata=filtered_metadata, base_download_dir=base_download_dir, - transform=transform + transform=transform, ) print(f"Created dataset with {len(dataset)} instances.") @@ -284,7 +308,9 @@ def get_imagenet_sample_from_hf( A PyTorch Dataset containing only the data for the requested indices. """ if indices is None: - print("Warning: No indices provided. Attempting to load metadata only, but image loading might fail later or be inefficient.") + print( + "Warning: No indices provided. Attempting to load metadata only, but image loading might fail later or be inefficient." + ) dataset_local_dir = os.path.join(data_root, hf_repo_id.replace("/", "_")) os.makedirs(dataset_local_dir, exist_ok=True) @@ -305,39 +331,49 @@ def get_imagenet_sample_from_hf( print(f"Base download/cache directory: {base_download_dir}") except Exception as e: - print(f"Failed to download imagenet_class_index.json from Hugging Face Hub: {e}") + print( + f"Failed to download imagenet_class_index.json from Hugging Face Hub: {e}" + ) raise e # 2. Read metadata and filter for requested indices filtered_metadata = {} - required_image_paths = set() # Use set to avoid duplicate downloads + required_image_paths = set() # Use set to avoid duplicate downloads all_metadata = {} try: - with open(metadata_local_path, 'r') as f: + with open(metadata_local_path, "r") as f: all_metadata = json.load(f) if indices is not None: - num_total = len(all_metadata) - for idx in indices: - if 0 <= idx < num_total: + num_total = len(all_metadata) + for idx in indices: + if 0 <= idx < num_total: metadata = all_metadata[str(idx)] filtered_metadata[idx] = metadata required_image_paths.add(f'samples/{"_".join(metadata)}.JPEG') - else: - print(f"Warning: Requested index {idx} is out of range (0-{num_total-1}). Skipping.") + else: + print( + f"Warning: Requested index {idx} is out of range (0-{num_total-1}). Skipping." + ) else: - print("Warning: Loading without specific indices. Using all metadata entries.") - filtered_metadata = {int(k): all_metadata[k] for k in all_metadata} # Less efficient if not all images are needed later + print( + "Warning: Loading without specific indices. Using all metadata entries." + ) + filtered_metadata = { + int(k): all_metadata[k] for k in all_metadata + } # Less efficient if not all images are needed later except Exception as e: print(f"Error reading or processing metadata file {metadata_local_path}: {e}") raise e if not filtered_metadata: - raise ValueError("No valid metadata found for the requested indices.") + raise ValueError("No valid metadata found for the requested indices.") # 3. Download only the required images - print(f"Downloading {len(required_image_paths)} required image files (if not cached)...") + print( + f"Downloading {len(required_image_paths)} required image files (if not cached)..." + ) for img_rel_path in tqdm(list(required_image_paths), desc="Downloading images"): try: # hf_hub_download will download to the cache or find existing file @@ -356,6 +392,30 @@ def get_imagenet_sample_from_hf( # 4. Create and return the Dataset using filtered metadata and base download dir fp_img = os.path.join(base_download_dir, list(required_image_paths)[0]) - img = transform(Image.open(fp_img).convert('RGB')) + img = transform(Image.open(fp_img).convert("RGB")) label = all_metadata[str(indices[0])][-1] - return img, label \ No newline at end of file + return img, label + + +def get_aki_dataset_from_hf( + repo_id: str = "enver1323/mimic3-aki-detection", + test_split: float = 0.2, +) -> AKIDataset: + data = load_dataset(repo_id)["train"].to_pandas() + + data = data.replace([np.inf, -np.inf], np.nan).dropna() + data = data[AKI_COLUMNS] + + scaler = StandardScaler() + scaler.fit(data.iloc[:, 2:]) + data.iloc[:, 2:] = scaler.transform(data.iloc[:, 2:]) + + n_entries = len(data) + df_test = data.iloc[-int(test_split * n_entries) :, :]w + + x_data = df_test.drop(["AKI_STAGE_7DAY", "AKI"], axis=1).values + y_data = df_test["AKI_STAGE_7DAY"].values + + dataset = AKIDataset(x_data, y_data) + + return dataset diff --git a/experiments/utils/models.py b/experiments/utils/models.py index 8bc5f07..7d86a29 100644 --- a/experiments/utils/models.py +++ b/experiments/utils/models.py @@ -4,7 +4,7 @@ from transformers import BertTokenizer, BertForSequenceClassification from transformers import ViltForQuestionAnswering, ViltProcessor -from experiments.models import ResNet50LiverTumor +from experiments.models import ResNet50LiverTumor, AKIClassifier def get_torchvision_model(model_name): @@ -18,14 +18,19 @@ def get_livertumor_model(model_path): model = ResNet50LiverTumor(in_channels=1, num_classes=2) checkpoint = torch.load(model_path) - state_dict = {k.replace('model.', '', 1).replace('module.', '', 1): v for k, v in checkpoint.items()} + state_dict = { + k.replace("model.", "", 1).replace("module.", "", 1): v + for k, v in checkpoint.items() + } model.load_state_dict(state_dict, strict=True) model.eval() - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Resize((224, 224), antialias=False), - ]) + transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Resize((224, 224), antialias=False), + ] + ) return model, transform @@ -34,21 +39,27 @@ def get_livertumor_model_from_hf(repo_id="seongun/resnet50-livertumor"): model = ResNet50LiverTumor.from_pretrained(repo_id) model.eval() - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Resize((224, 224), antialias=False), - ]) + transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Resize((224, 224), antialias=False), + ] + ) return model, transform class Bert(BertForSequenceClassification): def forward(self, input_ids, token_type_ids, attention_mask): - return super().forward( - input_ids=input_ids, - token_type_ids=token_type_ids, - attention_mask=attention_mask - ).logits + return ( + super() + .forward( + input_ids=input_ids, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + ) + .logits + ) def get_bert_model(model_name, num_labels): @@ -60,7 +71,7 @@ def bert_collate_fn(batch, tokenizer=None): [d[0] for d in batch], padding=True, truncation=True, - return_tensors='pt', + return_tensors="pt", ) labels = torch.tensor([d[1] for d in batch]) return tuple(inputs.values()), labels @@ -79,13 +90,17 @@ def forward( attention_mask, pixel_mask, ): - return super().forward( - input_ids=input_ids, - token_type_ids=token_type_ids, - attention_mask=attention_mask, - pixel_values=pixel_values, - pixel_mask=pixel_mask, - ).logits + return ( + super() + .forward( + input_ids=input_ids, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + pixel_values=pixel_values, + pixel_mask=pixel_mask, + ) + .logits + ) def get_vilt_model(model_name): @@ -104,14 +119,18 @@ def vilt_collate_fn(batch, processor=None, label2id=None): text=qsts, padding=True, truncation=True, - return_tensors='pt', + return_tensors="pt", ) labels = torch.tensor([label2id[d[2]] for d in batch]) return ( - inputs['pixel_values'], - inputs['input_ids'], - inputs['token_type_ids'], - inputs['attention_mask'], - inputs['pixel_mask'], + inputs["pixel_values"], + inputs["input_ids"], + inputs["token_type_ids"], + inputs["attention_mask"], + inputs["pixel_mask"], labels, - ) \ No newline at end of file + ) + + +def get_aki_model_from_hf(repo_id: str = "enver1323/aki-classifier") -> AKIClassifier: + return AKIClassifier.from_pretrained(repo_id) From 3ecd81030002d280757cdd969886783d77b57009 Mon Sep 17 00:00:00 2001 From: Enver Menadjiev Date: Thu, 13 Nov 2025 14:37:40 +0900 Subject: [PATCH 08/20] fix: remove datasets.py typo --- experiments/utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/utils/datasets.py b/experiments/utils/datasets.py index b7b8801..78c24ac 100644 --- a/experiments/utils/datasets.py +++ b/experiments/utils/datasets.py @@ -411,7 +411,7 @@ def get_aki_dataset_from_hf( data.iloc[:, 2:] = scaler.transform(data.iloc[:, 2:]) n_entries = len(data) - df_test = data.iloc[-int(test_split * n_entries) :, :]w + df_test = data.iloc[-int(test_split * n_entries) :, :] x_data = df_test.drop(["AKI_STAGE_7DAY", "AKI"], axis=1).values y_data = df_test["AKI_STAGE_7DAY"].values From c6d04f427069dd3e67f444e05cec4eac343f7382 Mon Sep 17 00:00:00 2001 From: Enver Menadjiev Date: Tue, 18 Nov 2025 15:33:12 +0900 Subject: [PATCH 09/20] feat: add ecg experiment --- experiments/models/__init__.py | 3 +- experiments/models/ecg/__init__.py | 2 + experiments/models/ecg/patchtst.py | 704 +++++++++++++++++++++++++ experiments/models/ecg/resnet_plus.py | 233 ++++++++ experiments/scripts/analyze_ecg_hpo.py | 368 +++++++++++++ experiments/utils/__init__.py | 3 + experiments/utils/datasets.py | 19 +- experiments/utils/models.py | 8 +- 8 files changed, 1336 insertions(+), 4 deletions(-) create mode 100644 experiments/models/ecg/__init__.py create mode 100644 experiments/models/ecg/patchtst.py create mode 100644 experiments/models/ecg/resnet_plus.py create mode 100644 experiments/scripts/analyze_ecg_hpo.py diff --git a/experiments/models/__init__.py b/experiments/models/__init__.py index 8584e17..fe5d623 100644 --- a/experiments/models/__init__.py +++ b/experiments/models/__init__.py @@ -1,2 +1,3 @@ from .liver_tumor import ResNet50LiverTumor -from .aki import AKIClassifier \ No newline at end of file +from .aki import AKIClassifier +from .ecg import ResNetPlus, PatchTST \ No newline at end of file diff --git a/experiments/models/ecg/__init__.py b/experiments/models/ecg/__init__.py new file mode 100644 index 0000000..88824f2 --- /dev/null +++ b/experiments/models/ecg/__init__.py @@ -0,0 +1,2 @@ +from .patchtst import PatchTST +from .resnet_plus import ResNetPlus diff --git a/experiments/models/ecg/patchtst.py b/experiments/models/ecg/patchtst.py new file mode 100644 index 0000000..fecc95d --- /dev/null +++ b/experiments/models/ecg/patchtst.py @@ -0,0 +1,704 @@ +from typing import Optional + +import torch +from torch import nn, Tensor +import torch.nn.functional as F + +from huggingface_hub import PyTorchModelHubMixin + + +class Transpose(nn.Module): + def __init__(self, *dims, contiguous=False): + super(Transpose, self).__init__() + self.dims, self.contiguous = dims, contiguous + + def forward(self, x): + if self.contiguous: + return x.transpose(*self.dims).contiguous() + else: + return x.transpose(*self.dims) + + def __repr__(self): + if self.contiguous: + return f"{self.__class__.__name__}(dims={', '.join([str(d) for d in self.dims])}).contiguous()" + else: + return ( + f"{self.__class__.__name__}({', '.join([str(d) for d in self.dims])})" + ) + + +pytorch_acts = [ + nn.ELU, + nn.LeakyReLU, + nn.PReLU, + nn.ReLU, + nn.ReLU6, + nn.SELU, + nn.CELU, + nn.GELU, + nn.Sigmoid, + nn.Softplus, + nn.Tanh, + nn.Softmax, +] +pytorch_act_names = [a.__name__.lower() for a in pytorch_acts] + + +def get_act_fn(act, **act_kwargs): + if act is None: + return + elif isinstance(act, nn.Module): + return act + elif callable(act): + return act(**act_kwargs) + idx = pytorch_act_names.index(act.lower()) + return pytorch_acts[idx](**act_kwargs) + + +class RevIN(nn.Module): + def __init__( + self, + c_in: int, + affine: bool = True, + subtract_last: bool = False, + dim: int = 2, + eps: float = 1e-5, + ): + super().__init__() + self.c_in, self.affine, self.subtract_last, self.dim, self.eps = ( + c_in, + affine, + subtract_last, + dim, + eps, + ) + if self.affine: + self.weight = nn.Parameter(torch.ones(1, c_in, 1)) + self.bias = nn.Parameter(torch.zeros(1, c_in, 1)) + + def forward(self, x: Tensor, mode: Tensor): + if mode: + return self.normalize(x) + else: + return self.denormalize(x) + + def normalize(self, x): + if self.subtract_last: + self.sub = x[..., -1].unsqueeze(-1).detach() + else: + self.sub = torch.mean(x, dim=-1, keepdim=True).detach() + self.std = ( + torch.std(x, dim=-1, keepdim=True, unbiased=False).detach() + self.eps + ) + if self.affine: + x = x.sub(self.sub) + x = x.div(self.std) + x = x.mul(self.weight) + x = x.add(self.bias) + return x + else: + x = x.sub(self.sub) + x = x.div(self.std) + return x + + def denormalize(self, x): + if self.affine: + x = x.sub(self.bias) + x = x.div(self.weight) + x = x.mul(self.std) + x = x.add(self.sub) + return x + else: + x = x.mul(self.std) + x = x.add(self.sub) + return x + + +class MovingAverage(nn.Module): + def __init__( + self, + kernel_size: int, + ): + super().__init__() + padding_left = (kernel_size - 1) // 2 + padding_right = kernel_size - padding_left - 1 + self.padding = torch.nn.ReplicationPad1d((padding_left, padding_right)) + self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=1) + + def forward(self, x: Tensor): + return self.avg(self.padding(x)) + + +class SeriesDecomposition(nn.Module): + def __init__( + self, + kernel_size: int, # the size of the window + ): + super().__init__() + self.moving_avg = MovingAverage(kernel_size) + + def forward(self, x: Tensor): + moving_mean = self.moving_avg(x) + residual = x - moving_mean + return residual, moving_mean + + +class _ScaledDotProductAttention(nn.Module): + def __init__(self, d_model, n_heads, attn_dropout=0.0, res_attention=False): + super().__init__() + self.attn_dropout = nn.Dropout(attn_dropout) + self.res_attention = res_attention + head_dim = d_model // n_heads + self.scale = nn.Parameter(torch.tensor(head_dim**-0.5), requires_grad=False) + + def forward(self, q: Tensor, k: Tensor, v: Tensor, prev: Optional[Tensor] = None): + attn_scores = torch.matmul(q, k) * self.scale + + if prev is not None: + attn_scores = attn_scores + prev + + attn_weights = F.softmax(attn_scores, dim=-1) + attn_weights = self.attn_dropout(attn_weights) + + output = torch.matmul(attn_weights, v) + + if self.res_attention: + return output, attn_weights, attn_scores + else: + return output, attn_weights + + +class _MultiheadAttention(nn.Module): + def __init__( + self, + d_model, + n_heads, + d_k=None, + d_v=None, + res_attention=False, + attn_dropout=0.0, + proj_dropout=0.0, + qkv_bias=True, + ): + "Multi Head Attention Layer" + + super().__init__() + d_k = d_v = d_model // n_heads + + self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v + + self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias) + self.W_K = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias) + self.W_V = nn.Linear(d_model, d_v * n_heads, bias=qkv_bias) + + # Scaled Dot-Product Attention (multiple heads) + self.res_attention = res_attention + self.sdp_attn = _ScaledDotProductAttention( + d_model, + n_heads, + attn_dropout=attn_dropout, + res_attention=self.res_attention, + ) + + # Poject output + self.to_out = nn.Sequential( + nn.Linear(n_heads * d_v, d_model), nn.Dropout(proj_dropout) + ) + + def forward( + self, + Q: Tensor, + K: Optional[Tensor] = None, + V: Optional[Tensor] = None, + prev: Optional[Tensor] = None, + ): + bs = Q.size(0) + if K is None: + K = Q + if V is None: + V = Q + + # Linear (+ split in multiple heads) + q_s = ( + self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1, 2) + ) # q_s: [bs x n_heads x max_q_len x d_k] + k_s = ( + self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0, 2, 3, 1) + ) # k_s: [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3) + v_s = ( + self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1, 2) + ) # v_s: [bs x n_heads x q_len x d_v] + + # Apply Scaled Dot-Product Attention (multiple heads) + if self.res_attention: + output, attn_weights, attn_scores = self.sdp_attn(q_s, k_s, v_s, prev=prev) + else: + output, attn_weights = self.sdp_attn(q_s, k_s, v_s) + # output: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len], scores: [bs x n_heads x max_q_len x q_len] + + # back to the original inputs dimensions + output = ( + output.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.d_v) + ) # output: [bs x q_len x n_heads * d_v] + output = self.to_out(output) + + if self.res_attention: + return output, attn_weights, attn_scores + else: + return output, attn_weights + + +class Flatten_Head(nn.Module): + def __init__(self, individual, n_vars, nf, pred_dim): + super().__init__() + + if isinstance(pred_dim, (tuple, list)): + pred_dim = pred_dim[-1] + self.individual = individual + self.n = n_vars if individual else 1 + self.nf, self.pred_dim = nf, pred_dim + + if individual: + self.layers = nn.ModuleList() + for i in range(self.n): + self.layers.append( + nn.Sequential(nn.Flatten(start_dim=-2), nn.Linear(nf, pred_dim)) + ) + else: + self.layer = nn.Sequential( + nn.Flatten(start_dim=-2), nn.Linear(nf, pred_dim) + ) + + def forward(self, x: Tensor): + """ + Args: + x: [bs x nvars x d_model x n_patch] + output: [bs x nvars x pred_dim] + """ + if self.individual: + x_out = [] + for i, layer in enumerate(self.layers): + x_out.append(layer(x[:, i])) + x = torch.stack(x_out, dim=1) + return x + else: + return self.layer(x) + + +class _TSTiEncoderLayer(nn.Module): + def __init__( + self, + q_len, + d_model, + n_heads, + d_k=None, + d_v=None, + d_ff=256, + store_attn=False, + norm="BatchNorm", + attn_dropout=0, + dropout=0.0, + bias=True, + activation="gelu", + res_attention=False, + pre_norm=False, + ): + super().__init__() + assert ( + not d_model % n_heads + ), f"d_model ({d_model}) must be divisible by n_heads ({n_heads})" + d_k = d_model // n_heads if d_k is None else d_k + d_v = d_model // n_heads if d_v is None else d_v + + # Multi-Head attention + self.res_attention = res_attention + self.self_attn = _MultiheadAttention( + d_model, + n_heads, + d_k, + d_v, + attn_dropout=attn_dropout, + proj_dropout=dropout, + res_attention=res_attention, + ) + + # Add & Norm + self.dropout_attn = nn.Dropout(dropout) + if "batch" in norm.lower(): + self.norm_attn = nn.Sequential( + Transpose(1, 2), nn.BatchNorm1d(d_model), Transpose(1, 2) + ) + else: + self.norm_attn = nn.LayerNorm(d_model) + + # Position-wise Feed-Forward + self.ff = nn.Sequential( + nn.Linear(d_model, d_ff, bias=bias), + get_act_fn(activation), + nn.Dropout(dropout), + nn.Linear(d_ff, d_model, bias=bias), + ) + + # Add & Norm + self.dropout_ffn = nn.Dropout(dropout) + if "batch" in norm.lower(): + self.norm_ffn = nn.Sequential( + Transpose(1, 2), nn.BatchNorm1d(d_model), Transpose(1, 2) + ) + else: + self.norm_ffn = nn.LayerNorm(d_model) + + self.pre_norm = pre_norm + self.store_attn = store_attn + + def forward(self, src: Tensor, prev: Optional[Tensor] = None): + """ + Args: + src: [bs x q_len x d_model] + """ + + # Multi-Head attention sublayer + if self.pre_norm: + src = self.norm_attn(src) + ## Multi-Head attention + if self.res_attention: + src2, attn, scores = self.self_attn(src, src, src, prev) + else: + src2, attn = self.self_attn(src, src, src) + if self.store_attn: + self.attn = attn + ## Add & Norm + src = src + self.dropout_attn( + src2 + ) # Add: residual connection with residual dropout + if not self.pre_norm: + src = self.norm_attn(src) + + # Feed-forward sublayer + if self.pre_norm: + src = self.norm_ffn(src) + ## Position-wise Feed-Forward + src2 = self.ff(src) + ## Add & Norm + src = src + self.dropout_ffn( + src2 + ) # Add: residual connection with residual dropout + if not self.pre_norm: + src = self.norm_ffn(src) + + if self.res_attention: + return src, scores + else: + return src + + +class _TSTiEncoder(nn.Module): # i means channel-independent + def __init__( + self, + c_in, + patch_num, + patch_len, + n_layers=3, + d_model=128, + n_heads=16, + d_k=None, + d_v=None, + d_ff=256, + norm="BatchNorm", + attn_dropout=0.0, + dropout=0.0, + act="gelu", + store_attn=False, + res_attention=True, + pre_norm=False, + ): + + super().__init__() + + self.patch_num = patch_num + self.patch_len = patch_len + + # Input encoding + q_len = patch_num + self.W_P = nn.Linear( + patch_len, d_model + ) # Eq 1: projection of feature vectors onto a d-dim vector space + self.seq_len = q_len + + # Positional encoding + W_pos = torch.empty((q_len, d_model)) + nn.init.uniform_(W_pos, -0.02, 0.02) + self.W_pos = nn.Parameter(W_pos) + + # Residual dropout + self.dropout = nn.Dropout(dropout) + + # Encoder + self.layers = nn.ModuleList( + [ + _TSTiEncoderLayer( + q_len, + d_model, + n_heads=n_heads, + d_k=d_k, + d_v=d_v, + d_ff=d_ff, + norm=norm, + attn_dropout=attn_dropout, + dropout=dropout, + activation=act, + res_attention=res_attention, + pre_norm=pre_norm, + store_attn=store_attn, + ) + for i in range(n_layers) + ] + ) + self.res_attention = res_attention + + def forward(self, x: Tensor): + """ + Args: + x: [bs x nvars x patch_len x patch_num] + """ + + n_vars = x.shape[1] + # Input encoding + x = x.permute(0, 1, 3, 2) # x: [bs x nvars x patch_num x patch_len] + x = self.W_P(x) # x: [bs x nvars x patch_num x d_model] + + x = torch.reshape( + x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]) + ) # x: [bs * nvars x patch_num x d_model] + x = self.dropout(x + self.W_pos) # x: [bs * nvars x patch_num x d_model] + + # Encoder + if self.res_attention: + scores = None + for mod in self.layers: + x, scores = mod(x, prev=scores) + else: + for mod in self.layers: + x = mod(x) + x = torch.reshape( + x, (-1, n_vars, x.shape[-2], x.shape[-1]) + ) # x: [bs x nvars x patch_num x d_model] + x = x.permute(0, 1, 3, 2) # x: [bs x nvars x d_model x patch_num] + + return x + + +class _PatchTST_backbone(nn.Module): + def __init__( + self, + c_in, + seq_len, + pred_dim, + patch_len, + stride, + n_layers=3, + d_model=128, + n_heads=16, + d_k=None, + d_v=None, + d_ff=256, + norm="BatchNorm", + attn_dropout=0.0, + dropout=0.0, + act="gelu", + res_attention=True, + pre_norm=False, + store_attn=False, + padding_patch=True, + individual=False, + revin=True, + affine=True, + subtract_last=False, + ): + + super().__init__() + + self.revin = revin + self.revin_layer = RevIN(c_in, affine=affine, subtract_last=subtract_last) + + self.patch_len = patch_len + self.stride = stride + self.padding_patch = padding_patch + patch_num = int((seq_len - patch_len) / stride + 1) + 1 + self.patch_num = patch_num + self.padding_patch_layer = nn.ReplicationPad1d((stride, 0)) + + self.unfold = nn.Unfold(kernel_size=(1, patch_len), stride=stride) + self.patch_len = patch_len + + self.backbone = _TSTiEncoder( + c_in, + patch_num=patch_num, + patch_len=patch_len, + n_layers=n_layers, + d_model=d_model, + n_heads=n_heads, + d_k=d_k, + d_v=d_v, + d_ff=d_ff, + attn_dropout=attn_dropout, + dropout=dropout, + act=act, + res_attention=res_attention, + pre_norm=pre_norm, + store_attn=store_attn, + ) + + # Head + self.head_nf = d_model * patch_num + self.n_vars = c_in + self.individual = individual + self.head = Flatten_Head(self.individual, self.n_vars, self.head_nf, pred_dim) + + def forward(self, z: Tensor): + """ + Args: + z: [bs x c_in x seq_len] + """ + + if self.revin: + z = self.revin_layer(z, torch.tensor(True, dtype=torch.bool)) + + z = self.padding_patch_layer(z) + b, c, s = z.size() + z = z.reshape(-1, 1, 1, s) + z = self.unfold(z) + z = z.permute(0, 2, 1).reshape(b, c, -1, self.patch_len).permute(0, 1, 3, 2) + + z = self.backbone(z) + z = self.head(z) + + if self.revin: + z = self.revin_layer(z, torch.tensor(False, dtype=torch.bool)) + return z + + +class PatchTST(nn.Module, PyTorchModelHubMixin): + def __init__( + self, + c_in, + c_out, + seq_len, + pred_dim=None, + n_layers=2, + n_heads=8, + d_model=512, + d_ff=2048, + dropout=0.05, + attn_dropout=0.0, + patch_len=16, + stride=8, + padding_patch=True, + revin=True, + affine=False, + individual=False, + subtract_last=False, + decomposition=False, + kernel_size=25, + activation="gelu", + norm="BatchNorm", + pre_norm=False, + res_attention=True, + store_attn=False, + classification=False, + ): + + super().__init__() + + if pred_dim is None: + pred_dim = seq_len + + self.decomposition = decomposition + if self.decomposition: + self.decomp_module = SeriesDecomposition(kernel_size) + self.model_trend = _PatchTST_backbone( + c_in=c_in, + seq_len=seq_len, + pred_dim=pred_dim, + patch_len=patch_len, + stride=stride, + n_layers=n_layers, + d_model=d_model, + n_heads=n_heads, + d_ff=d_ff, + norm=norm, + attn_dropout=attn_dropout, + dropout=dropout, + act=activation, + res_attention=res_attention, + pre_norm=pre_norm, + store_attn=store_attn, + padding_patch=padding_patch, + individual=individual, + revin=revin, + affine=affine, + subtract_last=subtract_last, + ) + self.model_res = _PatchTST_backbone( + c_in=c_in, + seq_len=seq_len, + pred_dim=pred_dim, + patch_len=patch_len, + stride=stride, + n_layers=n_layers, + d_model=d_model, + n_heads=n_heads, + d_ff=d_ff, + norm=norm, + attn_dropout=attn_dropout, + dropout=dropout, + act=activation, + res_attention=res_attention, + pre_norm=pre_norm, + store_attn=store_attn, + padding_patch=padding_patch, + individual=individual, + revin=revin, + affine=affine, + subtract_last=subtract_last, + ) + self.patch_num = self.model_trend.patch_num + else: + self.model = _PatchTST_backbone( + c_in=c_in, + seq_len=seq_len, + pred_dim=pred_dim, + patch_len=patch_len, + stride=stride, + n_layers=n_layers, + d_model=d_model, + n_heads=n_heads, + d_ff=d_ff, + norm=norm, + attn_dropout=attn_dropout, + dropout=dropout, + act=activation, + res_attention=res_attention, + pre_norm=pre_norm, + store_attn=store_attn, + padding_patch=padding_patch, + individual=individual, + revin=revin, + affine=affine, + subtract_last=subtract_last, + ) + self.patch_num = self.model.patch_num + self.classification = classification + + def forward(self, x): + if self.decomposition: + res_init, trend_init = self.decomp_module(x) + res = self.model_res(res_init) + trend = self.model_trend(trend_init) + x = res + trend + else: + x = self.model(x) + + if self.classification: + x = x.squeeze(-2) + return x \ No newline at end of file diff --git a/experiments/models/ecg/resnet_plus.py b/experiments/models/ecg/resnet_plus.py new file mode 100644 index 0000000..4b804d8 --- /dev/null +++ b/experiments/models/ecg/resnet_plus.py @@ -0,0 +1,233 @@ +from collections import OrderedDict + +import torch +from torch import nn, Tensor +from torch.nn import functional as F + +from huggingface_hub import PyTorchModelHubMixin + + +class Pad1d(nn.ConstantPad1d): + def __init__(self, padding, value=0.0): + super().__init__(padding, value) + + +def same_padding1d(seq_len, ks, stride=1, dilation=1): + "Same padding formula as used in Tensorflow" + p = (seq_len - 1) * stride + (ks - 1) * dilation + 1 - seq_len + return p // 2, p - p // 2 + + +class SameConv1d(nn.Module): + def __init__( + self, + ni: int, + nf: int, + ks: int = 3, + stride: int = 1, + dilation: int = 1, + **kwargs, + ): + super.__init__(self, SameConv1d) + self.ks, self.stride, self.dilation = ks, stride, dilation + self.conv1d_same = nn.Conv1d( + ni, nf, ks, stride=stride, dilation=dilation, **kwargs + ) + self.weight = self.conv1d_same.weight + self.bias = self.conv1d_same.bias + self.pad = Pad1d + + def forward(self, x: Tensor) -> Tensor: + self.padding = same_padding1d( + x.shape[-1], self.ks, dilation=self.dilation + ) # stride=self.stride not used in padding calculation! + return self.conv1d_same(self.pad(self.padding)(x)) + + +def normal_(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> Tensor: + if torch.overrides.has_torch_function_variadic(tensor): + return torch.overrides.handle_torch_function( + normal_, (tensor,), tensor=tensor, mean=mean, std=std + ) + with torch.no_grad(): + return tensor.normal_(mean, std) + + +def init_linear(m: nn.Module, act_func=None, init="auto", bias_std=0.01): + if getattr(m, "bias", None) is not None and bias_std is not None: + if bias_std != 0: + normal_(m.bias, 0, bias_std) + else: + m.bias.data.zero_() + + if init == "auto" and act_func in (F.relu_, F.leaky_relu_): + init = torch.nn.init.kaiming_uniform_ + + if callable(init): + init(m.weight) + + +def Conv1d( + ni: int, + nf: int, + kernel_size: int = None, + stride: int = 1, + padding="same", + dilation: int = 1, + init="auto", + bias_std: float = 0.01, + **kwargs, +): + if kernel_size % 2 == 1: + conv = nn.Conv1d( + ni, + nf, + kernel_size, + stride=stride, + padding=kernel_size // 2 * dilation, + dilation=dilation, + **kwargs, + ) + else: + conv = SameConv1d( + ni, nf, kernel_size, stride=stride, dilation=dilation, **kwargs + ) + + init_linear(conv, None, init=init, bias_std=bias_std) + return conv + + +class ConvBlock(nn.Sequential): + def __init__( + self, + ni: int, + nf: int, + kernel_size: int = None, + stride: int = 1, + padding="same", + bias=None, + bias_std: float = 0.01, + bn_1st: bool = True, + act=nn.ReLU, + act_kwargs={}, + init="auto", + dropout=0.0, + xtra=None, + **kwargs, + ): + layers = [] + + conv = Conv1d( + ni, nf, kernel_size, bias=bias, stride=stride, padding=padding, **kwargs + ) + act = None if act is None else act(**act_kwargs) + init_linear(conv, act, init=init, bias_std=bias_std) + + layers += [conv] + act_bn = [] + if act is not None: + act_bn.append(act) + + act_bn.append(self._get_norm(nf)) + + if bn_1st: + act_bn.reverse() + + if dropout: + layers += [nn.Dropout(dropout)] + layers += act_bn + if xtra: + layers.append(xtra) + super().__init__(*layers) + + def _get_norm(self, nf, **kwargs) -> nn.Module: + bn = nn.BatchNorm1d(nf, **kwargs) + if bn.affine: + bn.bias.data.fill_(1e-3) + bn.weight.data.fill_(1.0) + return bn + + +class Flatten(nn.Module): + def __init__(self): + super.__init__(self, Flatten) + + def forward(self, x: Tensor) -> Tensor: + return x.view(x.size(0), -1) + + +class ResBlockPlus(nn.Module): + def __init__( + self, + ni, + nf, + ks=[7, 5, 3], + bn_1st=True, + act=nn.ReLU, + act_kwargs={}, + ): + super(ResBlockPlus, self).__init__() + self.convblock1 = ConvBlock( + ni, nf, ks[0], bn_1st=bn_1st, act=act, act_kwargs=act_kwargs + ) + self.convblock2 = ConvBlock( + nf, nf, ks[1], bn_1st=bn_1st, act=act, act_kwargs=act_kwargs + ) + self.convblock3 = ConvBlock(nf, nf, ks[2], act=None) + + self.shortcut = ConvBlock(ni, nf, 1, act=None) + self.act = act(**act_kwargs) + + self._init_cnn(self) + + def _init_cnn(self, m): + if getattr(self, "bias", None) is not None: + nn.init.constant_(self.bias, 0) + if isinstance(self, (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.Linear)): + nn.init.kaiming_normal_(self.weight) + for l in m.children(): + self._init_cnn(l) + + def forward(self, x): + res = x + x = self.convblock1(x) + x = self.convblock2(x) + x = self.convblock3(x) + + x = x + self.shortcut(res) + x = self.act(x) + return x + + +class AdaptiveConcatPool1d(nn.Module): + def __init__(self, size=None): + super.__init__(self, AdaptiveConcatPool1d) + self.size = size or 1 + self.ap = nn.AdaptiveAvgPool1d(self.size) + self.mp = nn.AdaptiveMaxPool1d(self.size) + + def forward(self, x): + return torch.cat([self.mp(x), self.ap(x)], 1) + + +class GAP1d(nn.Module): + def __init__(self, output_size=1): + super(GAP1d, self).__init__() + self.gap = nn.AdaptiveAvgPool1d(output_size) + + def forward(self, x: Tensor) -> Tensor: + x = self.gap(x) + x = x.reshape(x.shape[0], -1) + return x + + +class ResNetPlus(nn.Sequential, PyTorchModelHubMixin): + def __init__(self, c_in: int, c_out: int, nf: int = 64, **kwargs): + + resblock1 = ResBlockPlus(c_in, nf, **kwargs) + resblock2 = ResBlockPlus(nf, nf * 2, **kwargs) + resblock3 = ResBlockPlus(nf * 2, nf * 2, **kwargs) + backbone = nn.Sequential(resblock1, resblock2, resblock3) + + head = nn.Sequential(GAP1d(1), nn.Linear(nf * 2, c_out)) + super().__init__(OrderedDict([("backbone", backbone), ("head", head)])) diff --git a/experiments/scripts/analyze_ecg_hpo.py b/experiments/scripts/analyze_ecg_hpo.py new file mode 100644 index 0000000..5322b36 --- /dev/null +++ b/experiments/scripts/analyze_ecg_hpo.py @@ -0,0 +1,368 @@ +import os +import gc +from argparse import ArgumentParser, Namespace +from typing import Union, Type, Sequence, Optional, List + +import torch +from torch import Tensor, nn +from torch.utils.data import Dataset, DataLoader + +from experiments.models import PatchTST, ResNetPlus +from experiments.utils import ( + get_ecg_dataset_from_hf, + get_ecg_patchtst_from_hf, + get_ecg_resnet_from_hf, +) + +from captum import attr as captum_exp + +from pnpxai import Experiment +from pnpxai.core.modality import TimeSeriesModality +from pnpxai import explainers as pnp_exp +from pnpxai.evaluator.metrics import Metric, AbPC, Complexity, Sensitivity + +MODEL_GENERATORS = { + "patchtst": get_ecg_patchtst_from_hf, + "resnet_plus": get_ecg_resnet_from_hf, +} + +PNPXAI_EXPLAINER_TYPES = { + pnp_exp.Gradient: "Gradient", + pnp_exp.GradientXInput: "GradXInput", + pnp_exp.IntegratedGradients: "IntegratedGradients", + pnp_exp.KernelShap: "KernelSHAP", + pnp_exp.Lime: "LIME", + pnp_exp.LRPEpsilonAlpha2Beta1: "LRP_EA2B1", + pnp_exp.LRPEpsilonGammaBox: "LRP_EGB", + pnp_exp.LRPEpsilonPlus: "LRP_E", + pnp_exp.LRPUniformEpsilon: "LRP_UniformE", + pnp_exp.SmoothGrad: "SmoothGrad", + pnp_exp.VarGrad: "VarGrad", +} + + +CAPTUM_EXPLAINER_TYPES = { + captum_exp.InputXGradient: "GradXInput", + captum_exp.IntegratedGradients: "IntegratedGradients", + captum_exp.KernelShap: "KernelSHAP", + captum_exp.Lime: "LIME", +} + + +class Composite(Metric): + def __init__(self, metrics: Sequence[Metric], agg_func: callable): + self.metrics = metrics + self.agg_func = agg_func + + def set_explainer(self, explainer: pnp_exp.Explainer) -> "Composite": + metrics = self.metrics + self.metrics = [] + + clone = self.copy() + + self.metrics = metrics + clone.metrics = [metric.set_explainer(explainer) for metric in metrics] + + return clone + + def evaluate( + self, + inputs: Tensor, + targets: Tensor, + attributions: Optional[Tensor] = None, + **kwargs, + ) -> Tensor: + return self.agg_func( + *[ + metric.evaluate( + inputs=inputs, targets=targets, attributions=attributions, **kwargs + ) + for metric in self.metrics + ] + ) + + +class CaptumExplainerWrapper: + def __init__(self, explainer_type: Type[captum_exp.Attribution], model: nn.Module): + self.model = model + self.explainer: captum_exp.Attribution = explainer_type(self.model) + + def attribute(self, inputs: Tensor, targets: Tensor) -> Tensor: + return self.explainer.attribute(inputs=inputs, target=targets) + + def __call__(self, *args, **kwargs) -> Tensor: + return self.attribute(*args, **kwargs) + + +def metric_to_str(metric: Metric) -> str: + formatter_map = { + AbPC: lambda _: "AbPC", + Complexity: lambda _: "Complexity", + Sensitivity: lambda _: "Sensitivity", + Composite: lambda metric: f"Composite({','.join([str(metric.__class__.__name__) for metric in metric.metrics])})", + } + metric_type = metric.__class__ + if metric_type not in formatter_map: + raise Exception(f"Metric: {metric_type} is not supported.") + + return formatter_map[metric_type](metric) + + +def get_composite_agg_func(params): + def func(*args): + device = args[0].device + total_val = 0 + for arg, param in zip(args, params): + total_val = total_val + arg.to(device) * param + return total_val + + return func + + +def get_explainers( + explainer_types: Sequence[Type[pnp_exp.Explainer]], + model: nn.Module, + modality: TimeSeriesModality, +): + default_kwargs = { + "feature_mask_fn": modality.get_default_feature_mask_fn(), + "baseline_fn": modality.get_default_baseline_fn(), + } + explainers = [] + for explainer_type in explainer_types: + explainer = explainer_type(model=model) + for k, v in default_kwargs.items(): + if hasattr(explainer, k): + explainer = explainer.set_kwargs(**{k: v}) + explainers.append(explainer) + + return explainers + + +def get_metrics( + metric_types: Sequence[Type[Metric]], + model: nn.Module, + modality: TimeSeriesModality, + agg_dim: int, +): + default_kwargs = { + "baseline_fn": modality.get_default_baseline_fn(), + "feature_mask_fn": modality.get_default_feature_mask_fn(), + "channel_dim": modality.channel_dim, + "mask_agg_dim": agg_dim, + } + + metrics = [] + for metric_type in metric_types: + metric = metric_type(model=model) + for k, v in default_kwargs.items(): + if hasattr(metric, k): + metric = metric.set_kwargs(**{k: v}) + metrics.append(metric) + + return metrics + + +def collect_exp_metrics( + model: nn.Module, modality: TimeSeriesModality, agg_dim: int +) -> List[Metric]: + metrics = [] + metrics = get_metrics([AbPC, Complexity, Sensitivity], model, modality, agg_dim) + metrics += [ + Composite( + [metrics[0], metrics[1]], get_composite_agg_func([0.8, -0.2]) + ), # AbPC, Sensitivity + Composite( + [metrics[0], metrics[2]], get_composite_agg_func([0.8, -0.2]) + ), # AbPC, Sensitivity + Composite( + [metrics[0], metrics[1], metrics[2]], + get_composite_agg_func([0.6, -0.2, -0.2]), + ), # AbPC, Complexity, Sensitivity + ] + return metrics + + +def log_data( + filename: str, exp_name: str, metric: str, explainer: str, best_value: float +): + os.makedirs(os.path.dirname(filename), exist_ok=True) + with open(filename, "a+") as file: + file.seek(0, 0) + if len(file.read(1)) == 0: + file.write("experiment,metric,explainer,value\n") + file.seek(2, 0) + file.write(f"{exp_name},{metric},{explainer},{best_value:.4f}\n") + + +def analyze_pnp( + model: nn.Module, + dataset: Dataset, + modality: TimeSeriesModality, + explainer_map: dict, + metrics: Sequence[Metric], + out_filename: str, + device: torch.device, +): + all_data_ids = list(range(len(dataset))) + loader = DataLoader(dataset, batch_size=1) + + explainers = get_explainers(explainer_map.keys(), model, modality) + + expr = Experiment( + model=model, + data=loader, + modality=modality, + explainers=explainers, + postprocessors=modality.get_default_postprocessors(), + metrics=metrics, + input_extractor=lambda batch: batch[0].to(device), + label_extractor=lambda batch: batch[-1].to(device), + target_extractor=lambda outputs: outputs.argmax(-1).to(device), + target_labels=False, + ) + + expr.predict_batch(all_data_ids) + + optimization_directions = { + Complexity: "minimize", + AbPC: "maximize", + Composite: "maximize", + Sensitivity: "minimize", + } + + for metric, metric_id in zip(*expr.manager.get_metrics()): + for explainer, explainer_id in zip(*expr.manager.get_explainers()): + best_value = 0 + n_processed = 0 + + for idx, data_id in enumerate(all_data_ids): + try: + optimized = expr.optimize( + data_ids=[data_id], + explainer_id=explainer_id, + metric_id=metric_id, + direction=optimization_directions[metric.__class__], + sampler="tpe", # Literal['tpe','random'] + n_trials=20, + seed=42, # seed for sampler: by default, None + ) + cur_best_value = optimized.study.best_trial.value + best_value += cur_best_value + print( + f"[{idx + 1}] {str(metric)} | {explainer.__class__.__name__} = {cur_best_value}" + ) # get the optimized value + + del optimized + n_processed += 1 + except Exception as e: + print( + f"[Error] Metric: {metric.__class__.__name__}; Explainer: {explainer.__class__.__name__} with error:\n{e}" + ) + gc.collect() + torch.cuda.empty_cache() + + best_value /= n_processed + print(f"Metric: {str(metric)}; Explainer: {explainer.__class__.__name__};") + print("Best/value:", best_value) # get the optimized value + + log_data( + out_filename, + "PnP XAI", + metric_to_str(metric), + explainer_map[explainer.__class__], + best_value, + ) + + torch.cuda.empty_cache() + gc.collect() + + +def analyze_captum( + model: nn.Module, + dataset: Dataset, + explainer_map: dict, + metrics: Sequence[Metric], + out_filename: str, + device: torch.device, +): + explainer_wrappers = [ + CaptumExplainerWrapper(exp_type, model) for exp_type in explainer_map.keys() + ] + inputs, target = [tensor.to(device) for tensor in dataset.tensors] + + for metric in metrics: + for wrapper in explainer_wrappers: + try: + attributions = wrapper.attribute(inputs, target) + metric = metric.set_explainer(wrapper) + evals = metric.evaluate(inputs, target, attributions) + evals = (sum(evals) / len(evals)).item() + + print( + f"Metric: {str(metric)}; Explainer: {wrapper.__class__.__name__};" + ) + print("Best/value:", evals) + + torch.cuda.empty_cache() + log_data( + out_filename, + "Captum", + metric_to_str(metric), + explainer_map[wrapper.explainer.__class__], + evals, + ) + except Exception as e: + print( + f"[Error] Metric: {metric.__class__.__name__}; Explainer: {wrapper.__class__.__name__}. {e}" + ) + raise e + + +def analyze(args: Namespace): + out_filename = args.out_file + dataset = get_ecg_dataset_from_hf(repo_id="enver1323/ucr-twoleadecg") + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = MODEL_GENERATORS[args.model]().to(device) + + seq_dim = -1 + agg_dim = -2 + modality = TimeSeriesModality(seq_dim) + metrics = collect_exp_metrics(model, modality, agg_dim) + + analyze_pnp( + model, dataset, modality, PNPXAI_EXPLAINER_TYPES, metrics, out_filename, device + ) + analyze_captum( + model, dataset, CAPTUM_EXPLAINER_TYPES, metrics, out_filename, device + ) + + +def main(): + """Main execution function.""" + # arguments + parser = ArgumentParser( + description="Compare explanations by PnPXAI and Captum on ECG dataset" + ) + parser.add_argument( + "--model", + type=str, + choices=MODEL_GENERATORS.keys(), + required=True, + ) + parser.add_argument( + "--out_file", + type=str, + default="results/hpo_analysis_ecg/explanations_summary.csv", + help="Filepath for results' collection", + ) + + args = parser.parse_args() + + analyze(args) + print("\nEvaluation finished.") + + +if __name__ == "__main__": + main() diff --git a/experiments/utils/__init__.py b/experiments/utils/__init__.py index 2570e37..75a00da 100644 --- a/experiments/utils/__init__.py +++ b/experiments/utils/__init__.py @@ -23,6 +23,7 @@ get_livertumor_dataset_from_hf, get_imagenet_sample_from_hf, get_aki_dataset_from_hf, + get_ecg_dataset_from_hf, ) from .models import ( @@ -38,6 +39,8 @@ get_vilt_processor, vilt_collate_fn, get_aki_model_from_hf, + get_ecg_resnet_from_hf, + get_ecg_patchtst_from_hf ) __all__ = [ diff --git a/experiments/utils/datasets.py b/experiments/utils/datasets.py index 78c24ac..e55373f 100644 --- a/experiments/utils/datasets.py +++ b/experiments/utils/datasets.py @@ -11,9 +11,10 @@ import numpy as np import pandas as pd -from sklearn.preprocessing import StandardScaler +from sklearn.preprocessing import StandardScaler, LabelEncoder -from torch.utils.data import Dataset, Subset, DataLoader +import torch +from torch.utils.data import Dataset, Subset, DataLoader, TensorDataset from torchvision import transforms from huggingface_hub import hf_hub_download from datasets import load_dataset @@ -419,3 +420,17 @@ def get_aki_dataset_from_hf( dataset = AKIDataset(x_data, y_data) return dataset + + +def get_ecg_dataset_from_hf(repo_id: str = "enver1323/ucr-twoleadecg") -> TensorDataset: + data = load_dataset(repo_id)["test"].with_format("numpy") + x_data = np.stack(data['segment']) + y_data = data['label'] + + encoder = LabelEncoder() + y_data = encoder.fit_transform(y_data) + + return TensorDataset( + torch.from_numpy(x_data), + torch.from_numpy(y_data) + ) \ No newline at end of file diff --git a/experiments/utils/models.py b/experiments/utils/models.py index 7d86a29..31e597b 100644 --- a/experiments/utils/models.py +++ b/experiments/utils/models.py @@ -4,7 +4,7 @@ from transformers import BertTokenizer, BertForSequenceClassification from transformers import ViltForQuestionAnswering, ViltProcessor -from experiments.models import ResNet50LiverTumor, AKIClassifier +from experiments.models import ResNet50LiverTumor, AKIClassifier, ResNetPlus, PatchTST def get_torchvision_model(model_name): @@ -134,3 +134,9 @@ def vilt_collate_fn(batch, processor=None, label2id=None): def get_aki_model_from_hf(repo_id: str = "enver1323/aki-classifier") -> AKIClassifier: return AKIClassifier.from_pretrained(repo_id) + +def get_ecg_resnet_from_hf(repo_id: str = "enver1323/resnetplus-classification-ecg") -> ResNetPlus: + return ResNetPlus.from_pretrained(repo_id) + +def get_ecg_patchtst_from_hf(repo_id: str = "enver1323/patchtst-classification-ecg") -> PatchTST: + return PatchTST.from_pretrained(repo_id) From cc218dc7fce350b7f749f319799274c03f7d4c27 Mon Sep 17 00:00:00 2001 From: Enver Menadjiev Date: Fri, 21 Nov 2025 13:23:40 +0900 Subject: [PATCH 10/20] feat: add mimiciii preprocessing --- README.md | 62 ++ data/mimiciii/cleanup.py | 197 +++++ data/mimiciii/parse_db.py | 1020 ++++++++++++++++++++++++ data/mimiciii/preprocess.py | 410 ++++++++++ experiments/scripts/analyze_aki_hpo.py | 10 +- experiments/utils/datasets.py | 4 +- 6 files changed, 1699 insertions(+), 4 deletions(-) create mode 100644 data/mimiciii/cleanup.py create mode 100644 data/mimiciii/parse_db.py create mode 100644 data/mimiciii/preprocess.py diff --git a/README.md b/README.md index a02e0ef..0e8c3aa 100644 --- a/README.md +++ b/README.md @@ -201,6 +201,68 @@ python -m experiments.scripts.analyze_livertumor_hpo \ Results will be saved under the `results/hpo_analysis_livertumor/` directory, organized by data instance ID. +--- + +### Experiment 4: Acute Kidney Injury (AKI) Explanation + +This experiment analyzes the effect of HPO (optimizing for AbPC) on explanations for medical data for acute kidney injury (AKI) detection, evaluating the change in ground truth agreement (Relevance Mass/Rank Accuracy). + +#### Data and Model + + * **Data (MIMIC III):** The **MIMIC III dataset** used in this experiment is hosted on Hugging Face Hub: [➡️ enver1323/mimic3-aki-detection](https://huggingface.co/datasets/enver1323/mimic3-aki-detection). This dataset contains patient data derived from the original [MIMIC III dataset](https://doi.org/10.13026/C2XW26). The script **automatically downloads** the necessary files when first executed. For more details on the data loading process, refer to the `get_aki_dataset_from_hf` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py) and the [➡️ enver1323/mimic3-aki-detection](https://huggingface.co/datasets/enver1323/mimic3-aki-detection). + + * **Model (AKI Classifier):** The pre-trained **Linear model** adapted for this task is hosted on Hugging Face Hub: [➡️ enver1323/aki-classifier](https://huggingface.co/enver1323/aki-classifier). Similar to the dataset, the script **automatically downloads** the model weights. The model architecture is defined in [`experiments/models/aki.py`](./experiments/models/aki.py). For more details on model loading, refer to the `get_aki_model_from_hf` function within [`experiments/utils/models.py`](./experiments/utils/models.py). + +#### Usage + +```bash +python -m experiments.scripts.analyze_aki_hpo \ + --n_trials 100 \ + --analyze \ + --visualize +``` + +#### Arguments + + * `--n_trials `: Number of trials for hyperparameter optimization (HPO). Defaults to `20`. + * `--analyze`: Runs the HPO process and saves the top-K columns as well as attributions (`.json` and `.npy` files correspondingly for default run, and optimized run) to `results/hpo_analysis_aki/topk//`. + * `--visualize`: Loads the previously saved results and generates a visualization PDF comparing default vs. optimized attributions and metrics. Saves the figure to `results/hpo_analysis_aki/explanation_summary.pdf`. Requires results to be saved first (using `--analyze`). + +#### Output + +Results will be saved under the `results/hpo_analysis_aki/` directory, organized by explainer name. + +--- + +### Experiment 5: ECG Explanation + +This experiment analyzes the effects of HPO (optimizing for multiple metrics) on explanations for a ECG time series dataset, evaluating the change in metric values. + +#### Data and Model + + * **Data (ECG):** The **ECG dataset** used in this experiment is hosted on Hugging Face Hub: [➡️ enver1323/ucr-twoleadecg](https://huggingface.co/datasets/enver1323/ucr-twoleadecg). This dataset contains time series ecg segments derived from the original [UCR dataset](https://doi.org/10.48550/arXiv.1810.07758). The script **automatically downloads** the necessary files when first executed. For more details on the data loading process, refer to the `get_ecg_dataset_from_hf` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py) + + * **Model (ResNetPlus):** The pre-trained **ResNetPlus model** adapted for this task is hosted on Hugging Face Hub: [➡️ enver1323/resnetplus-classification-ecg](https://huggingface.co/enver1323/resnetplus-classification-ecg). Similar to the dataset, the script **automatically downloads** the model weights. The model architecture is defined in [`experiments/models/ecg/resnet_plus.py`](./experiments/models/ecg/resnet_plus.py). For more details on model loading, refer to the `get_ecg_resnet_from_hf` function within [`experiments/utils/models.py`](./experiments/utils/models.py). + + * **Model (PatchTST):** The pre-trained **PatchTST model** adapted for this task is hosted on Hugging Face Hub: [➡️ enver1323/patchtst-classification-ecg](https://huggingface.co/enver1323/patchtst-classification-ecg). Similar to the dataset, the script **automatically downloads** the model weights. The model architecture is defined in [`experiments/models/ecg/patchtst.py`](./experiments/models/ecg/patchtst.py). For more details on model loading, refer to the `get_ecg_patchtst_from_hf` function within [`experiments/utils/models.py`](./experiments/utils/models.py). + +#### Usage + +```bash +python -m experiments.scripts.analyze_ecg_hpo \ + --model resnet_plus \ + --out_file results/hpo_analysis_ecg/explanations_summary.csv +``` + +#### Arguments + + * `--model `: The name of model (`resnet_plus`, `patchtst`) to analyze. + * `--out_file `: The name of the output file to store the explanation summary. The value defaults to `results/hpo_analysis_ecg/explanations_summary.csv`. + +#### Output + +Results will be saved to the file path specified in the `FILENAME` of `--out_file` argument. + ## Citation diff --git a/data/mimiciii/cleanup.py b/data/mimiciii/cleanup.py new file mode 100644 index 0000000..34d6824 --- /dev/null +++ b/data/mimiciii/cleanup.py @@ -0,0 +1,197 @@ +from argparse import ArgumentParser +import pandas as pd +import os + + +def code_ethnicity(ethinicity: str) -> int: + if ethinicity in { + "UNKNOWN/NOT SPECIFIED", + "OTHER", + "PATIENT DECLINED TO ANSWER", + "MULTI RACE ETHNICITY", + "UNABLE TO OBTAIN", + }: + return -1 + if ethinicity in { + "WHITE", + "WHITE - RUSSIAN", + "WHITE - EASTERN EUROPEAN", + "WHITE - OTHER EUROPEAN", + "WHITE - BRAZILIAN", + }: + return 0 + if ethinicity in { + "BLACK/AFRICAN AMERICAN", + "BLACK/AFRICAN", + "BLACK/HAITIAN", + "BLACK/CAPE VERDEAN", + }: + return 1 + if ethinicity in { + "ASIAN", + "ASIAN - ASIAN INDIAN", + "ASIAN - VIETNAMESE", + "ASIAN - CHINESE", + "ASIAN - FILIPINO", + "ASIAN - CAMBODIAN", + "ASIAN - THAI", + "ASIAN - OTHER", + "ASIAN - KOREAN", + "ASIAN - JAPANESE", + }: + return 2 + if ethinicity in { + "HISPANIC OR LATINO", + "HISPANIC/LATINO - GUATEMALAN", + "HISPANIC/LATINO - PUERTO RICAN", + "HISPANIC/LATINO - DOMINICAN", + "HISPANIC/LATINO - SALVADORAN", + "HISPANIC/LATINO - COLOMBIAN", + "PORTUGUESE", + "HISPANIC/LATINO - CENTRAL AMERICAN (OTHER)", + "HISPANIC/LATINO - HONDURAN", + "HISPANIC/LATINO - CUBAN", + "HISPANIC/LATINO - MEXICAN", + }: + return 3 + if ethinicity in { + "AMERICAN INDIAN/ALASKA NATIVE", + "AMERICAN INDIAN/ALASKA NATIVE FEDERALLY RECOGNIZED TRIBE", + }: + return 4 + if ethinicity in {"CARIBBEAN ISLAND", "NATIVE HAWAIIAN OR OTHER PACIFIC ISLANDER"}: + return 5 + if ethinicity == "SOUTH AMERICAN": + return 6 + if ethinicity == "MIDDLE EASTERN": + return 7 + + raise NotImplementedError() + + +def code_system(system: str) -> int: + if system == "carevue": + return 0 + + if system == "metavision": + return 1 + + raise NotImplementedError() + + +def code_gender(gender: str) -> int: + return int(gender == "M") + + +def cleanup_data(filename: str) -> pd.DataFrame: + # read the data from the CSV + df = pd.read_csv(open(filename, "r"), delimiter=",") + df.columns = map(str.upper, df.columns) + print(df.shape) + + # exclude CKD and AKI on admission patients + df = df[~(df["AKI"] == 2)] + df = df[~(df["AKI"] == 3)] + df = df[~(df["AKI"] == 4)] + + print(df.groupby("AKI")["ICUSTAY_ID"].nunique()) + + # Consider only adults + df = df[~(df["AGE"] < 18)] + + df["ETHNICITY"] = df["ETHNICITY"].apply(lambda x: code_ethnicity(x)) + df["GENDER"] = df["GENDER"].apply(lambda x: code_gender(x)) + + print(df.groupby("ETHNICITY")["ICUSTAY_ID"].nunique()) + + df = df.rename( + columns={ + "HADM_ID_X": "HADM_ID", + "GLUCOSE_MIN_X": "GLUCOSE_MIN", + "GLUCOSE_MAX_X": "GLUCOSE_MAX", + "SUBJECT_ID_Y": "SUBJECT_ID", + "SUBJECT_ID_X.1": "SUBJECT_ID", + "DBSOURCE_Y": "DBSOURCE", + } + ) + df = df.fillna(0) + + df = df.drop(df.columns[1], axis=1) + + print(df.groupby("AKI")["ICUSTAY_ID"].nunique()) + print(df.groupby("AKI_STAGE_7DAY")["ICUSTAY_ID"].nunique()) + + print( + "Non AKI Patients : {}".format( + df.loc[df["AKI_STAGE_7DAY"] == 0]["ICUSTAY_ID"].count() + ) + ) + print( + "AKI patients STAGE 1: {}".format( + df.loc[df["AKI_STAGE_7DAY"] == 1]["ICUSTAY_ID"].count() + ) + ) + print( + "AKI Patients STAGE 2: {}".format( + df.loc[df["AKI_STAGE_7DAY"] == 2]["ICUSTAY_ID"].count() + ) + ) + print( + "AKI Patients STAGE 3: {}".format( + df.loc[df["AKI_STAGE_7DAY"] == 3]["ICUSTAY_ID"].count() + ) + ) + print("NAN patients: {}".format(df["AKI"].isna().sum())) + + df = df.drop( + [ + "ADMITTIME", + "DISCHTIME", + "OUTTIME", + "INTIME", + "DOB", + "CHARTTIME_CREAT", + "UNNAMED: 0", + "AKI_STAGE_CREAT", + "AKI_7DAY", + "GLUCOSE_MAX_Y", + "GLUCOSE_MIN_Y", + "DBSOURCE_X", + ], + axis=1, + ) + + if isinstance(df["DBSOURCE"], pd.DataFrame): + df["DBSOURCE_NEW"] = df["DBSOURCE"].iloc[:, 0] + df = df.drop(["DBSOURCE"], axis=1) + df = df.rename(columns={"DBSOURCE_NEW": "DBSOURCE"}) + + df = df[~(df["DBSOURCE"] == "both")] + + df["DBSOURCE"] = df["DBSOURCE"].apply(lambda x: code_system(x)) + + return df + + +def main(): + parser = ArgumentParser() + parser.add_argument( + "--data_path", + type=str, + default="./formatted/INFO_DATASET_7days_creatinine.csv", + help='Path to formatted MIMIC III data from "preprocess.py"', + ) + parser.add_argument( + "--formatted_path", + type=str, + default="./formatted", + help="Output path to store cleaned data", + ) + args = parser.parse_args() + + df = cleanup_data(args.data_path) + df.to_csv(os.path.join(args.formatted_path, "data.csv")) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/data/mimiciii/parse_db.py b/data/mimiciii/parse_db.py new file mode 100644 index 0000000..81da442 --- /dev/null +++ b/data/mimiciii/parse_db.py @@ -0,0 +1,1020 @@ +import os +import argparse + +import pandas as pd + +import psycopg2 + + +def urine_output(cursor): + + view = "DROP MATERIALIZED VIEW IF EXISTS urineoutput CASCADE; \ + CREATE MATERIALIZED VIEW urineoutput as \ + select oe.icustay_id, oe.charttime \ + , SUM( \ + case when oe.itemid = 227488 then -1*value \ + else value end \ + ) as value \ + from mimiciii.outputevents oe \ + where oe.itemid in \ + ( \ + 40055, \ + 43175, \ + 40069, \ + 40094, \ + 40715, \ + 40473, \ + 40085, \ + 40057, \ + 40056, \ + 40405, \ + 40428, \ + 40086, \ + 40096, \ + 40651, \ + 226559, \ + 226560, \ + 226561, \ + 226584, \ + 226563, \ + 226564, \ + 226565, \ + 226567, \ + 226557, \ + 226558, \ + 227488, \ + 227489 \ + ) \ + and oe.value < 5000 \ + and oe.icustay_id is not null \ + group by icustay_id, charttime;" + + cursor.execute(view) + + +def creatinine(cursor): + + view = "DROP MATERIALIZED VIEW IF EXISTS kdigo_creat CASCADE; \ + CREATE MATERIALIZED VIEW kdigo_creat as \ + with cr as \ + ( \ + select \ + ie.icustay_id \ + , ie.intime, ie.outtime \ + , le.valuenum as creat \ + , le.charttime \ + , ie.DBSOURCE \ + from mimiciii.icustays ie \ + left join mimiciii.labevents le \ + on ie.subject_id = le.subject_id \ + and le.ITEMID = 50912 \ + and le.VALUENUM is not null \ + and le.CHARTTIME between (ie.intime - interval '7' day) and (ie.intime + interval '7' day) \ + ) \ + SELECT \ + cr.icustay_id \ + , cr.charttime \ + , cr.creat \ + , MIN(cr48.creat) AS creat_low_past_48hr \ + , MIN(cr7.creat) AS creat_low_past_7day \ + FROM cr \ + LEFT JOIN cr cr48 \ + ON cr.icustay_id = cr48.icustay_id \ + AND cr48.charttime < cr.charttime \ + AND cr48.charttime >= (cr.charttime - INTERVAL '48' HOUR) \ + LEFT JOIN cr cr7 \ + ON cr.icustay_id = cr7.icustay_id \ + AND cr7.charttime < cr.charttime \ + AND cr7.charttime >= (cr.charttime - INTERVAL '7' DAY) \ + GROUP BY cr.icustay_id, cr.charttime, cr.creat \ + ORDER BY cr.icustay_id, cr.charttime, cr.creat;" + + cursor.execute(view) + + +def echo_data(cursor): + + # -- This code extracts structured data from echocardiographies + # -- You can join it to the text notes using ROW_ID + # -- Just note that ROW_ID will differ across versions of MIMIC-III. + + view = "DROP MATERIALIZED VIEW IF EXISTS echodata CASCADE; \ + CREATE MATERIALIZED VIEW echodata AS \ + select ROW_ID \ + , subject_id, hadm_id \ + , chartdate \ + , cast(to_timestamp( (to_char( chartdate, 'DD-MM-YYYY' ) || substring(ne.text, 'Date/Time: [\[\]0-9*-]+ at ([0-9:]+)')), \ + 'DD-MM-YYYYHH24:MI') as timestamp without time zone) \ + as charttime \ + , substring(ne.text, 'Indication: (.*?)\n') as Indication \ + , case \ + when substring(ne.text, 'Height: \(in\) (.*?)\n') like '%*%' \ + then null \ + else cast(substring(ne.text, 'Height: \(in\) (.*?)\n') as numeric) \ + end as Height \ + , case \ + when substring(ne.text, 'Weight \(lb\): (.*?)\n') like '%*%' \ + then null \ + else cast(substring(ne.text, 'Weight \(lb\): (.*?)\n') as numeric) \ + end as Weight \ + , case \ + when substring(ne.text, 'BSA \(m2\): (.*?) m2\n') like '%*%' \ + then null \ + else cast(substring(ne.text, 'BSA \(m2\): (.*?) m2\n') as numeric) \ + end as BSA \ + , substring(ne.text, 'BP \(mm Hg\): (.*?)\n') as BP \ + , case \ + when substring(ne.text, 'BP \(mm Hg\): ([0-9]+)/[0-9]+?\n') like '%*%' \ + then null \ + else cast(substring(ne.text, 'BP \(mm Hg\): ([0-9]+)/[0-9]+?\n') as numeric) \ + end as BPSys \ + , case \ + when substring(ne.text, 'BP \(mm Hg\): [0-9]+/([0-9]+?)\n') like '%*%' \ + then null \ + else cast(substring(ne.text, 'BP \(mm Hg\): [0-9]+/([0-9]+?)\n') as numeric) \ + end as BPDias \ + , case \ + when substring(ne.text, 'HR \(bpm\): ([0-9]+?)\n') like '%*%' \ + then null \ + else cast(substring(ne.text, 'HR \(bpm\): ([0-9]+?)\n') as numeric) \ + end as HR \ + , substring(ne.text, 'Status: (.*?)\n') as Status \ + , substring(ne.text, 'Test: (.*?)\n') as Test \ + , substring(ne.text, 'Doppler: (.*?)\n') as Doppler \ + , substring(ne.text, 'Contrast: (.*?)\n') as Contrast \ + , substring(ne.text, 'Technical Quality: (.*?)\n') as TechnicalQuality \ + from mimiciii.noteevents ne \ + where category = 'Echo';" + + cursor.execute(view) + + +def weight_duration(cursor): + + # -- This query extracts weights for ICU patients with start/stop times + # -- if only an admission weight is given, then this is assigned from intime to outtime + + view = " DROP MATERIALIZED VIEW IF EXISTS weightdurations CASCADE; \ + CREATE MATERIALIZED VIEW weightdurations as \ + WITH wt_neonate AS \ + (\ + SELECT c.icustay_id, c.charttime \ + , MAX(CASE WHEN c.itemid = 3580 THEN c.valuenum END) as wt_kg \ + , MAX(CASE WHEN c.itemid = 3581 THEN c.valuenum END) as wt_lb \ + , MAX(CASE WHEN c.itemid = 3582 THEN c.valuenum END) as wt_oz \ + FROM mimiciii.chartevents c \ + WHERE c.itemid in (3580, 3581, 3582) \ + AND c.icustay_id IS NOT NULL \ + AND c.error IS DISTINCT FROM 1 \ + AND c.valuenum > 0 \ + GROUP BY c.icustay_id, c.charttime \ + ) \ + , birth_wt AS \ + ( \ + SELECT c.icustay_id, c.charttime \ + , MAX( \ + CASE \ + WHEN c.itemid = 4183 THEN \ + CASE \ + WHEN c.value ~ '[^0-9\.]' THEN NULL \ + WHEN CAST(c.value AS NUMERIC) > 100 THEN CAST(c.value AS NUMERIC)/1000 \ + WHEN CAST(c.value AS NUMERIC) < 10 THEN CAST(c.value AS NUMERIC) \ + ELSE NULL END \ + WHEN c.itemid = 3723 AND c.valuenum < 10 THEN c.valuenum \ + ELSE NULL END) as wt_kg \ + FROM mimiciii.chartevents c \ + WHERE c.itemid in (3723, 4183) \ + AND c.icustay_id IS NOT NULL \ + AND c.error IS DISTINCT FROM 1 \ + GROUP BY c.icustay_id, c.charttime \ + ) \ + , wt_stg as \ + ( \ + SELECT \ + c.icustay_id \ + , c.charttime \ + , case when c.itemid in (762,226512) then 'admit' \ + else 'daily' end as weight_type \ + , c.valuenum as weight \ + FROM mimiciii.chartevents c \ + WHERE c.valuenum IS NOT NULL \ + AND c.itemid in \ + ( \ + 762,226512 \ + , 763,224639 \ + ) \ + AND c.icustay_id IS NOT NULL \ + AND c.valuenum > 0 \ + AND c.error IS DISTINCT FROM 1 \ + UNION ALL \ + SELECT \ + n.icustay_id \ + , n.charttime \ + , 'daily' AS weight_type \ + , CASE \ + WHEN wt_kg IS NOT NULL THEN wt_kg \ + WHEN wt_lb IS NOT NULL THEN wt_lb*0.45359237 + wt_oz*0.0283495231 \ + ELSE NULL END AS weight \ + FROM wt_neonate n \ + UNION ALL \ + SELECT \ + b.icustay_id \ + , b.charttime \ + , 'admit' AS weight_type \ + , wt_kg as weight \ + FROM birth_wt b \ + ) \ + , wt_stg1 as \ + ( \ + select \ + icustay_id \ + , charttime \ + , weight_type \ + , weight \ + , ROW_NUMBER() OVER (partition by icustay_id, weight_type order by charttime) as rn \ + from wt_stg \ + WHERE weight IS NOT NULL \ + ) \ + , wt_stg2 AS \ + ( \ + SELECT \ + wt_stg1.icustay_id \ + , ie.intime, ie.outtime \ + , ie.DBSOURCE \ + , case when wt_stg1.weight_type = 'admit' and wt_stg1.rn = 1 \ + then ie.intime - interval '2' hour \ + else wt_stg1.charttime end as starttime \ + , wt_stg1.weight \ + from wt_stg1 \ + INNER JOIN mimiciii.icustays ie \ + on ie.icustay_id = wt_stg1.icustay_id \ + ) \ + , wt_stg3 as \ + ( \ + select \ + icustay_id \ + , intime, outtime \ + , starttime \ + , DBSOURCE \ + , coalesce( \ + LEAD(starttime) OVER (PARTITION BY icustay_id ORDER BY starttime), \ + outtime + interval '2' hour \ + ) as endtime \ + , weight \ + from wt_stg2 \ + ) \ + , wt1 as \ + ( \ + select \ + icustay_id \ + , starttime \ + , DBSOURCE \ + , coalesce(endtime, \ + LEAD(starttime) OVER (partition by icustay_id order by starttime), \ + outtime + interval '2' hour) \ + as endtime \ + , weight \ + from wt_stg3 \ + ) \ + , wt_fix as \ + ( \ + select ie.icustay_id \ + , ie.intime - interval '2' hour as starttime \ + , wt.starttime as endtime \ + , wt.weight \ + from mimiciii.icustays ie \ + inner join \ + ( \ + SELECT wt1.icustay_id, wt1.starttime, wt1.weight , wt1.DBSOURCE \ + , ROW_NUMBER() OVER (PARTITION BY wt1.icustay_id ORDER BY wt1.starttime) as rn \ + FROM wt1 \ + ) wt \ + ON ie.icustay_id = wt.icustay_id \ + AND wt.rn = 1 \ + and ie.intime < wt.starttime \ + ) \ + , wt2 as \ + ( \ + select \ + wt1.icustay_id \ + , wt1.starttime \ + , wt1.endtime \ + , wt1.weight \ + from wt1 \ + UNION \ + SELECT \ + wt_fix.icustay_id \ + , wt_fix.starttime \ + , wt_fix.endtime \ + , wt_fix.weight \ + from wt_fix \ + ) \ + , echo_lag as \ + ( \ + select \ + ie.icustay_id \ + , ie.intime, ie.outtime \ + , 0.453592*ec.weight as weight_echo \ + , ROW_NUMBER() OVER (PARTITION BY ie.icustay_id ORDER BY ec.charttime) as rn \ + , ec.charttime as starttime \ + , LEAD(ec.charttime) OVER (PARTITION BY ie.icustay_id ORDER BY ec.charttime) as endtime \ + from mimiciii.icustays ie \ + inner join echodata ec \ + on ie.hadm_id = ec.hadm_id \ + where ec.weight is not null \ + ) \ + , echo_final as \ + ( \ + select \ + el.icustay_id \ + , el.starttime \ + , coalesce(el.endtime, el.outtime + interval '2' hour) as endtime \ + , weight_echo \ + from echo_lag el \ + UNION \ + select \ + el.icustay_id \ + , el.intime - interval '2' hour as starttime \ + , el.starttime as endtime \ + , el.weight_echo \ + from echo_lag el \ + where el.rn = 1 \ + and el.starttime > el.intime - interval '2' hour \ + ) \ + select \ + wt2.icustay_id, wt2.starttime, wt2.endtime, wt2.weight\ + from wt2 \ + UNION \ + select \ + ef.icustay_id, ef.starttime, ef.endtime, ef.weight_echo as weight \ + from echo_final ef \ + where ef.icustay_id not in (select distinct icustay_id from wt2) \ + order by icustay_id, starttime, endtime;" + + cursor.execute(view) + + +def urine_kidigo(cursor): + + # -- we have joined each row to all rows preceding within 24 hours \ + # -- we can now sum these rows to get total UO over the last 24 hours \ + # -- we can use case statements to restrict it to only the last 6/12 hours \ + # -- therefore we have three sums: \ + # -- 1) over a 6 hour period \ + # -- 2) over a 12 hour period \ + # -- 3) over a 24 hour period \ + # -- note that we assume data charted at charttime corresponds to 1 hour of UO \ + # -- therefore we use '5' and '11' to restrict the period, rather than 6/12 \ + # -- this assumption may overestimate UO rate when documentation is done less than hourly \ + # -- 6 hours \ + + view = " DROP MATERIALIZED VIEW IF EXISTS kdigo_uo CASCADE; \ + CREATE MATERIALIZED VIEW kdigo_uo AS \ + with ur_stg as \ + ( \ + select io.icustay_id, io.charttime \ + , sum(case when io.charttime <= iosum.charttime + interval '5' hour \ + then iosum.VALUE \ + else null end) as UrineOutput_6hr \ + , sum(case when io.charttime <= iosum.charttime + interval '11' hour \ + then iosum.VALUE \ + else null end) as UrineOutput_12hr \ + , sum(iosum.VALUE) as UrineOutput_24hr \ + , ROUND(CAST(EXTRACT(EPOCH FROM \ + io.charttime - \ + MIN(case when io.charttime <= iosum.charttime + interval '5' hour \ + then iosum.charttime \ + else null end) \ + )/3600.0 AS NUMERIC), 4) AS uo_tm_6hr \ + , ROUND(CAST(EXTRACT(EPOCH FROM \ + io.charttime - \ + MIN(case when io.charttime <= iosum.charttime + interval '11' hour \ + then iosum.charttime \ + else null end) \ + )/3600.0 AS NUMERIC), 4) AS uo_tm_12hr \ + , ROUND(CAST(EXTRACT(EPOCH FROM \ + io.charttime - MIN(iosum.charttime) \ + )/3600.0 AS NUMERIC), 4) AS uo_tm_24hr \ + from urineoutput io \ + left join urineoutput iosum \ + on io.icustay_id = iosum.icustay_id \ + and io.charttime >= iosum.charttime \ + and io.charttime <= (iosum.charttime + interval '23' hour) \ + group by io.icustay_id, io.charttime \ + ) \ + select \ + ur.icustay_id \ + , ur.charttime \ + , wd.weight \ + , ur.UrineOutput_6hr \ + , ur.UrineOutput_12hr \ + , ur.UrineOutput_24hr \ + , ROUND((ur.UrineOutput_6hr/wd.weight/(uo_tm_6hr+1))::NUMERIC, 4) AS uo_rt_6hr \ + , ROUND((ur.UrineOutput_12hr/wd.weight/(uo_tm_12hr+1))::NUMERIC, 4) AS uo_rt_12hr \ + , ROUND((ur.UrineOutput_24hr/wd.weight/(uo_tm_24hr+1))::NUMERIC, 4) AS uo_rt_24hr \ + , uo_tm_6hr \ + , uo_tm_12hr \ + , uo_tm_24hr \ + from ur_stg ur \ + left join weightdurations wd \ + on ur.icustay_id = wd.icustay_id \ + and ur.charttime >= wd.starttime \ + and ur.charttime < wd.endtime \ + order by icustay_id, charttime; " + + cursor.execute(view) + + +def kidigo_7_days_creatinine(cursor): + + # -- This query checks if the patient had AKI during the first 7 days of their ICU + # -- stay according to the KDIGO guideline. + # -- https://kdigo.org/wp-content/uploads/2016/10/KDIGO-2012-AKI-Guideline-English.pdf + + view = "DROP MATERIALIZED VIEW IF EXISTS kdigo_7_days_creatinine; \ + CREATE MATERIALIZED VIEW kdigo_7_days_creatinine AS \ + WITH cr_aki AS \ + ( \ + SELECT \ + k.icustay_id \ + , k.DBSOURCE \ + , k.charttime \ + , k.creat \ + , k.aki_stage_creat \ + , ROW_NUMBER() OVER (PARTITION BY k.icustay_id ORDER BY k.aki_stage_creat DESC, k.creat DESC) AS rn \ + FROM mimiciii.icustays ie \ + INNER JOIN kdigo_stages_creatinine k \ + ON ie.icustay_id = k.icustay_id \ + WHERE k.charttime > (ie.intime - interval '6' hour) \ + AND k.charttime <= (ie.intime + interval '7' day) \ + AND k.aki_stage_creat IS NOT NULL \ + ) \ + select \ + ie.icustay_id \ + , ie.DBSOURCE \ + , cr.charttime as charttime_creat \ + , cr.creat \ + , cr.aki_stage_creat \ + , cr.aki_stage_creat AS aki_stage_7day \ + , CASE WHEN (cr.aki_stage_creat > 0) THEN 1 ELSE 0 END AS aki_7day \ + FROM mimiciii.icustays ie \ + LEFT JOIN cr_aki cr \ + ON ie.icustay_id = cr.icustay_id \ + AND cr.rn = 1 \ + order by ie.icustay_id; " + + cursor.execute(view) + + +def kidigo_stages_creatinine(cursor): + + # -- This query checks if the patient had AKI according to KDIGO. + # -- AKI is calculated every time a creatinine or urine output measurement occurs. + # -- Baseline creatinine is defined as the lowest creatinine in the past 7 days. + + view = " DROP MATERIALIZED VIEW IF EXISTS kdigo_stages_creatinine CASCADE; \ + CREATE MATERIALIZED VIEW kdigo_stages_creatinine AS \ + with cr_stg AS \ + ( \ + SELECT \ + cr.icustay_id \ + , cr.charttime \ + , cr.creat \ + , case \ + when cr.creat >= (cr.creat_low_past_7day*3.0) then 3 \ + when cr.creat >= 4 \ + and (cr.creat_low_past_48hr <= 3.7 OR cr.creat >= (1.5*cr.creat_low_past_7day)) \ + then 3 \ + when cr.creat >= (cr.creat_low_past_7day*2.0) then 2 \ + when cr.creat >= (cr.creat_low_past_48hr+0.3) then 1 \ + when cr.creat >= (cr.creat_low_past_7day*1.5) then 1 \ + else 0 end as aki_stage_creat \ + FROM kdigo_creat cr \ + ) \ + , tm_stg AS \ + ( \ + SELECT \ + icustay_id, charttime \ + FROM cr_stg \ + ) \ + select \ + ie.icustay_id \ + , ie.DBSOURCE \ + , tm.charttime \ + , cr.creat \ + , cr.aki_stage_creat \ + , cr.aki_stage_creat AS aki_stage \ + FROM mimiciii.icustays ie \ + LEFT JOIN tm_stg tm \ + ON ie.icustay_id = tm.icustay_id \ + LEFT JOIN cr_stg cr \ + ON ie.icustay_id = cr.icustay_id \ + AND tm.charttime = cr.charttime \ + order by ie.icustay_id, tm.charttime; " + + cursor.execute(view) + + +def get_labevents(cursor): + + # -- This query pivots lab values taken during the 7 first days of a patient's stay + # -- Have already confirmed that the unit of measurement is always the same: null or the correct unit + + # -- Extract all bicarbonate, blood urea nitrogen (BUN), calcium, chloride, creatinine, + # hemoglobin, international normalized ratio (INR), platelet, potassium, prothrombin time (PT), + # partial throm- boplastin time (PTT), and white blood count (WBC) values from labevents around patient's ICU stay + + view = "DROP MATERIALIZED VIEW IF EXISTS labstay CASCADE; \ + CREATE materialized VIEW labstay AS \ + SELECT \ + pvt.subject_id, pvt.hadm_id, pvt.icustay_id , pvt.DBSOURCE \ + , min(CASE WHEN label = 'ANION GAP' THEN valuenum ELSE null END) as ANIONGAP_min \ + , max(CASE WHEN label = 'ANION GAP' THEN valuenum ELSE null END) as ANIONGAP_max \ + , min(CASE WHEN label = 'ALBUMIN' THEN valuenum ELSE null END) as ALBUMIN_min \ + , max(CASE WHEN label = 'ALBUMIN' THEN valuenum ELSE null END) as ALBUMIN_max \ + , min(CASE WHEN label = 'BANDS' THEN valuenum ELSE null END) as BANDS_min \ + , max(CASE WHEN label = 'BANDS' THEN valuenum ELSE null END) as BANDS_max \ + , min(CASE WHEN label = 'BICARBONATE' THEN valuenum ELSE null END) as BICARBONATE_min \ + , max(CASE WHEN label = 'BICARBONATE' THEN valuenum ELSE null END) as BICARBONATE_max \ + , min(CASE WHEN label = 'BILIRUBIN' THEN valuenum ELSE null END) as BILIRUBIN_min \ + , max(CASE WHEN label = 'BILIRUBIN' THEN valuenum ELSE null END) as BILIRUBIN_max \ + , min(CASE WHEN label = 'CREATININE' THEN valuenum ELSE null END) as CREATININE_min \ + , max(CASE WHEN label = 'CREATININE' THEN valuenum ELSE null END) as CREATININE_max \ + , min(CASE WHEN label = 'CHLORIDE' THEN valuenum ELSE null END) as CHLORIDE_min \ + , max(CASE WHEN label = 'CHLORIDE' THEN valuenum ELSE null END) as CHLORIDE_max \ + , min(CASE WHEN label = 'GLUCOSE' THEN valuenum ELSE null END) as GLUCOSE_min \ + , max(CASE WHEN label = 'GLUCOSE' THEN valuenum ELSE null END) as GLUCOSE_max \ + , min(CASE WHEN label = 'HEMATOCRIT' THEN valuenum ELSE null END) as HEMATOCRIT_min \ + , max(CASE WHEN label = 'HEMATOCRIT' THEN valuenum ELSE null END) as HEMATOCRIT_max \ + , min(CASE WHEN label = 'HEMOGLOBIN' THEN valuenum ELSE null END) as HEMOGLOBIN_min \ + , max(CASE WHEN label = 'HEMOGLOBIN' THEN valuenum ELSE null END) as HEMOGLOBIN_max \ + , min(CASE WHEN label = 'LACTATE' THEN valuenum ELSE null END) as LACTATE_min \ + , max(CASE WHEN label = 'LACTATE' THEN valuenum ELSE null END) as LACTATE_max \ + , min(CASE WHEN label = 'PLATELET' THEN valuenum ELSE null END) as PLATELET_min \ + , max(CASE WHEN label = 'PLATELET' THEN valuenum ELSE null END) as PLATELET_max \ + , min(CASE WHEN label = 'POTASSIUM' THEN valuenum ELSE null END) as POTASSIUM_min \ + , max(CASE WHEN label = 'POTASSIUM' THEN valuenum ELSE null END) as POTASSIUM_max \ + , min(CASE WHEN label = 'PTT' THEN valuenum ELSE null END) as PTT_min \ + , max(CASE WHEN label = 'PTT' THEN valuenum ELSE null END) as PTT_max \ + , min(CASE WHEN label = 'INR' THEN valuenum ELSE null END) as INR_min \ + , max(CASE WHEN label = 'INR' THEN valuenum ELSE null END) as INR_max \ + , min(CASE WHEN label = 'PT' THEN valuenum ELSE null END) as PT_min \ + , max(CASE WHEN label = 'PT' THEN valuenum ELSE null END) as PT_max \ + , min(CASE WHEN label = 'SODIUM' THEN valuenum ELSE null END) as SODIUM_min \ + , max(CASE WHEN label = 'SODIUM' THEN valuenum ELSE null end) as SODIUM_max \ + , min(CASE WHEN label = 'BUN' THEN valuenum ELSE null end) as BUN_min \ + , max(CASE WHEN label = 'BUN' THEN valuenum ELSE null end) as BUN_max \ + , min(CASE WHEN label = 'WBC' THEN valuenum ELSE null end) as WBC_min \ + , max(CASE WHEN label = 'WBC' THEN valuenum ELSE null end) as WBC_max \ + FROM \ + ( SELECT ie.subject_id, ie.hadm_id, ie.icustay_id ,ie.DBSOURCE \ + , CASE \ + WHEN itemid = 50868 THEN 'ANION GAP' \ + WHEN itemid = 50862 THEN 'ALBUMIN' \ + WHEN itemid = 51144 THEN 'BANDS' \ + WHEN itemid = 50882 THEN 'BICARBONATE' \ + WHEN itemid = 50885 THEN 'BILIRUBIN' \ + WHEN itemid = 50912 THEN 'CREATININE' \ + WHEN itemid = 50806 THEN 'CHLORIDE' \ + WHEN itemid = 50902 THEN 'CHLORIDE' \ + WHEN itemid = 50809 THEN 'GLUCOSE' \ + WHEN itemid = 50931 THEN 'GLUCOSE' \ + WHEN itemid = 50810 THEN 'HEMATOCRIT' \ + WHEN itemid = 51221 THEN 'HEMATOCRIT' \ + WHEN itemid = 50811 THEN 'HEMOGLOBIN' \ + WHEN itemid = 51222 THEN 'HEMOGLOBIN' \ + WHEN itemid = 50813 THEN 'LACTATE' \ + WHEN itemid = 51265 THEN 'PLATELET' \ + WHEN itemid = 50822 THEN 'POTASSIUM' \ + WHEN itemid = 50971 THEN 'POTASSIUM' \ + WHEN itemid = 51275 THEN 'PTT' \ + WHEN itemid = 51237 THEN 'INR' \ + WHEN itemid = 51274 THEN 'PT' \ + WHEN itemid = 50824 THEN 'SODIUM' \ + WHEN itemid = 50983 THEN 'SODIUM' \ + WHEN itemid = 51006 THEN 'BUN' \ + WHEN itemid = 51300 THEN 'WBC' \ + WHEN itemid = 51301 THEN 'WBC' \ + ELSE null \ + END AS label \ + , CASE \ + WHEN itemid = 50862 and valuenum > 10 THEN null \ + WHEN itemid = 50868 and valuenum > 10000 THEN null \ + WHEN itemid = 51144 and valuenum < 0 THEN null \ + WHEN itemid = 51144 and valuenum > 100 THEN null \ + WHEN itemid = 50882 and valuenum > 10000 THEN null \ + WHEN itemid = 50885 and valuenum > 150 THEN null \ + WHEN itemid = 50806 and valuenum > 10000 THEN null \ + WHEN itemid = 50902 and valuenum > 10000 THEN null \ + WHEN itemid = 50912 and valuenum > 150 THEN null \ + WHEN itemid = 50809 and valuenum > 10000 THEN null \ + WHEN itemid = 50931 and valuenum > 10000 THEN null \ + WHEN itemid = 50810 and valuenum > 100 THEN null \ + WHEN itemid = 51221 and valuenum > 100 THEN null \ + WHEN itemid = 50811 and valuenum > 50 THEN null \ + WHEN itemid = 51222 and valuenum > 50 THEN null \ + WHEN itemid = 50813 and valuenum > 50 THEN null \ + WHEN itemid = 51265 and valuenum > 10000 THEN null \ + WHEN itemid = 50822 and valuenum > 30 THEN null \ + WHEN itemid = 50971 and valuenum > 30 THEN null \ + WHEN itemid = 51275 and valuenum > 150 THEN null \ + WHEN itemid = 51237 and valuenum > 50 THEN null \ + WHEN itemid = 51274 and valuenum > 150 THEN null \ + WHEN itemid = 50824 and valuenum > 200 THEN null \ + WHEN itemid = 50983 and valuenum > 200 THEN null \ + WHEN itemid = 51006 and valuenum > 300 THEN null \ + WHEN itemid = 51300 and valuenum > 1000 THEN null \ + WHEN itemid = 51301 and valuenum > 1000 THEN null \ + ELSE le.valuenum \ + END AS valuenum \ + FROM mimiciii.icustays ie \ + LEFT JOIN mimiciii.labevents le \ + ON le.subject_id = ie.subject_id AND le.hadm_id = ie.hadm_id \ + AND le.CHARTTIME between (ie.intime - interval '6' hour) and (ie.intime + interval '7' day)\ + AND le.ITEMID in \ + ( \ + 50868, \ + 50862, \ + 51144, \ + 50882, \ + 50885, \ + 50912, \ + 50902, \ + 50806, \ + 50931, \ + 50809, \ + 51221, \ + 50810, \ + 51222, \ + 50811, \ + 50813, \ + 51265, \ + 50971, \ + 50822, \ + 51275, \ + 51237, \ + 51274, \ + 50983, \ + 50824, \ + 51006, \ + 51301, \ + 51300 \ + ) \ + AND valuenum IS NOT null AND valuenum > 0 \ + ) pvt \ + GROUP BY pvt.subject_id, pvt.hadm_id, pvt.icustay_id , pvt.DBSOURCE \ + ORDER BY pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.DBSOURCE;" + + cursor.execute(view) + + +def get_vitals_chart(cursor): + + # -- This query pivots the vital signs during the first 7 days of a patient's stay + # -- Vital signs include heart rate, blood pressure, respiration rate, and temperature + + view = "DROP MATERIALIZED VIEW IF EXISTS vitalsfirstday CASCADE; \ + create materialized view vitalsfirstday as \ + SELECT pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.DBSOURCE \ + , min(case when VitalID = 1 then valuenum else null end) as HeartRate_Min \ + , max(case when VitalID = 1 then valuenum else null end) as HeartRate_Max \ + , avg(case when VitalID = 1 then valuenum else null end) as HeartRate_Mean \ + , min(case when VitalID = 2 then valuenum else null end) as SysBP_Min \ + , max(case when VitalID = 2 then valuenum else null end) as SysBP_Max \ + , avg(case when VitalID = 2 then valuenum else null end) as SysBP_Mean \ + , min(case when VitalID = 3 then valuenum else null end) as DiasBP_Min \ + , max(case when VitalID = 3 then valuenum else null end) as DiasBP_Max \ + , avg(case when VitalID = 3 then valuenum else null end) as DiasBP_Mean \ + , min(case when VitalID = 4 then valuenum else null end) as MeanBP_Min \ + , max(case when VitalID = 4 then valuenum else null end) as MeanBP_Max \ + , avg(case when VitalID = 4 then valuenum else null end) as MeanBP_Mean \ + , min(case when VitalID = 5 then valuenum else null end) as RespRate_Min \ + , max(case when VitalID = 5 then valuenum else null end) as RespRate_Max \ + , avg(case when VitalID = 5 then valuenum else null end) as RespRate_Mean \ + , min(case when VitalID = 6 then valuenum else null end) as TempC_Min \ + , max(case when VitalID = 6 then valuenum else null end) as TempC_Max \ + , avg(case when VitalID = 6 then valuenum else null end) as TempC_Mean \ + , min(case when VitalID = 7 then valuenum else null end) as SpO2_Min \ + , max(case when VitalID = 7 then valuenum else null end) as SpO2_Max \ + , avg(case when VitalID = 7 then valuenum else null end) as SpO2_Mean \ + , min(case when VitalID = 8 then valuenum else null end) as Glucose_Min \ + , max(case when VitalID = 8 then valuenum else null end) as Glucose_Max \ + , avg(case when VitalID = 8 then valuenum else null end) as Glucose_Mean \ + FROM ( \ + select ie.subject_id, ie.hadm_id, ie.icustay_id, ie.DBSOURCE\ + , case \ + when itemid in (211,220045) and valuenum > 0 and valuenum < 300 then 1 \ + when itemid in (51,442,455,6701,220179,220050) and valuenum > 0 and valuenum < 400 then 2 \ + when itemid in (8368,8440,8441,8555,220180,220051) and valuenum > 0 and valuenum < 300 then 3 \ + when itemid in (456,52,6702,443,220052,220181,225312) and valuenum > 0 and valuenum < 300 then 4 \ + when itemid in (615,618,220210,224690) and valuenum > 0 and valuenum < 70 then 5 \ + when itemid in (223761,678) and valuenum > 70 and valuenum < 120 then 6 \ + when itemid in (223762,676) and valuenum > 10 and valuenum < 50 then 6 \ + when itemid in (646,220277) and valuenum > 0 and valuenum <= 100 then 7 \ + when itemid in (807,811,1529,3745,3744,225664,220621,226537) and valuenum > 0 then 8 \ + else null end as VitalID \ + , case when itemid in (223761,678) then (valuenum-32)/1.8 else valuenum end as valuenum \ + from mimiciii.icustays ie \ + left join mimiciii.chartevents ce \ + on ie.subject_id = ce.subject_id and ie.hadm_id = ce.hadm_id and ie.icustay_id = ce.icustay_id \ + and ce.charttime between ie.intime - interval '6' hour and ie.intime + interval '7' day \ + and ce.error IS DISTINCT FROM 1 \ + where ce.itemid in \ + ( \ + 211, \ + 220045, \ + 51, \ + 442, \ + 455, \ + 6701, \ + 220179, \ + 220050, \ + 8368, \ + 8440, \ + 8441, \ + 8555, \ + 220180, \ + 220051, \ + 456, \ + 52, \ + 6702, \ + 443, \ + 220052, \ + 220181, \ + 225312, \ + 618, \ + 615, \ + 220210, \ + 224690, \ + 646, 220277, \ + 807, \ + 811, \ + 1529, \ + 3745, \ + 3744, \ + 225664, \ + 220621, \ + 226537, \ + 223762, \ + 676, \ + 223761, \ + 678 \ + ) \ + ) pvt \ + group by pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.DBSOURCE \ + order by pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.DBSOURCE;" + + cursor.execute(view) + + +def get_comorbidities(cursor): + + view = "DROP MATERIALIZED VIEW IF EXISTS COMORBIDITIES CASCADE; \ + CREATE MATERIALIZED VIEW COMORBIDITIES AS \ + with icd as \ + ( \ + select hadm_id, seq_num, icd9_code \ + from mimiciii.diagnoses_icd \ + where seq_num != 1 \ + ) \ + , eliflg as \ + (\ + select hadm_id, seq_num, icd9_code\ + , CASE\ + when icd9_code in ('39891','40201','40211','40291','40401','40403','40411','40413','40491','40493') then 1\ + when SUBSTRING(icd9_code FROM 1 for 4) in ('4254','4255','4257','4258','4259') then 1\ + when SUBSTRING(icd9_code FROM 1 for 3) in ('428') then 1\ + else 0 end as CHF\ + , CASE \ + when icd9_code in ('42613','42610','42612','99601','99604') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('4260','4267','4269','4270','4271','4272','4273','4274','4276','4278','4279','7850','V450','V533') then 1 \ + else 0 end as ARRHY \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('0932','7463','7464','7465','7466','V422','V433') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('394','395','396','397','424') then 1 \ + else 0 end as VALVE \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('0930','4373','4431','4432','4438','4439','4471','5571','5579','V434') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('440','441') then 1 \ + else 0 end as PERIVASC \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('401') then 1 \ + else 0 end as HTN \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('402','403','404','405') then 1 \ + else 0 end as HTNCX \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('2500','2501','2502','2503') then 1 \ + else 0 end as DM \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('2504','2505','2506','2507','2508','2509') then 1 \ + else 0 end as DMCX \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('2409','2461','2468') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('243','244') then 1 \ + else 0 end as HYPOTHY \ + , CASE \ + when icd9_code in ('40301','40311','40391','40402','40403','40412','40413','40492','40493') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('5880','V420','V451') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('585','586','V56') then 1 \ + else 0 end as RENLFAIL \ + , CASE \ + when icd9_code in ('07022','07023','07032','07033','07044','07054') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('0706','0709','4560','4561','4562','5722','5723','5724','5728','5733','5734','5738','5739','V427') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('570','571') then 1 \ + else 0 end as LIVER \ + , CASE \ + when icd9_code in ('72889','72930') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('7010','7100','7101','7102','7103','7104','7108','7109','7112','7193','7285') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('446','714','720','725') then 1 \ + else 0 end as ARTH \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('2871','2873','2874','2875') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('286') then 1 \ + else 0 end as COAG \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('2780') then 1 \ + else 0 end as OBESE \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('2536') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('276') then 1 \ + else 0 end as LYTES \ + , CASE \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('2652','2911','2912','2913','2915','2918','2919','3030','3039','3050','3575','4255','5353','5710','5711','5712','5713','V113') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('980') then 1 \ + else 0 end as ALCOHOL \ + , CASE \ + when icd9_code in ('V6542') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 4) in ('3052','3053','3054','3055','3056','3057','3058','3059') then 1 \ + when SUBSTRING(icd9_code FROM 1 for 3) in ('292','304') then 1 \ + else 0 end as DRUG \ + from icd \ + )\ + , eligrp as \ + ( \ + select hadm_id \ + , max(chf) as chf \ + , max(arrhy) as arrhy \ + , max(valve) as valve \ + , max(perivasc) as perivasc \ + , max(htn) as htn \ + , max(htncx) as htncx \ + , max(renlfail) as renlfail \ + , max(dm) as dm \ + , max(dmcx) as dmcx \ + , max(hypothy) as hypothy \ + , max(liver) as liver \ + , max(arth) as arth \ + , max(coag) as coag \ + , max(obese) as obese \ + , max(lytes) as lytes \ + , max(alcohol) as alcohol \ + , max(drug) as drug \ + from eliflg \ + group by hadm_id \ + ) \ + select adm.hadm_id \ + , chf as CONGESTIVE_HEART_FAILURE \ + , arrhy as CARDIAC_ARRHYTHMIAS \ + , valve as VALVULAR_DISEASE \ + , perivasc as PERIPHERAL_VASCULAR \ + , renlfail as RENAL_FAILURE \ + , case \ + when htn = 1 then 1 \ + when htncx = 1 then 1 \ + else 0 end as HYPERTENSION \ + , case \ + when dmcx = 1 then 0 \ + when dm = 1 then 1 \ + else 0 end as DIABETES_UNCOMPLICATED \ + , dmcx as DIABETES_COMPLICATED \ + , hypothy as HYPOTHYROIDISM \ + , liver as LIVER_DISEASE \ + , obese as OBESITY \ + , alcohol as ALCOHOL_ABUSE \ + , drug as DRUG_ABUSE \ + from mimiciii.admissions adm \ + left join eligrp eli \ + on adm.hadm_id = eli.hadm_id \ + order by adm.hadm_id;" + + cursor.execute(view) + + +def count_icustays(cursor): + query = "select * from mimiciii.icustays" + cursor.execute(query) + + rows = cursor.fetchall() + print(len(rows)) + + +def preprocess(db: str, host: str, user: str, password: str, output_path: str): + os.makedirs(output_path, exist_ok=True) + + try: + conn = psycopg2.connect(host=host, user=user, password=password, database=db) + cursor = conn.cursor() + except Exception as error: + print(error) + + print("connection succeded") + + urine_output(cursor) + print("view urine_output created") + echo_data(cursor) + print("view echo_data created") + weight_duration(cursor) + print("view weight_duration created") + urine_kidigo(cursor) + print("view urine_kidigo created") + creatinine(cursor) + print("view creatinine created") + + kidigo_stages_creatinine(cursor) + print("view kidigo_stages_creatinine created") + kidigo_7_days_creatinine(cursor) + print("view kidigo_7_days_creatinine created") + query = "select * from kdigo_7_days_creatinine" + df = pd.read_sql_query(query, conn) + + df.to_csv( + os.path.join(output_path, "AKI_KIDIGO_7D_SQL_CREATININE_DBSOURCE.csv"), + encoding="utf-8", + header=True, + ) + + get_labevents(cursor) + + query = "select * from labstay" + df = pd.read_sql_query(query, conn) + df.to_csv( + os.path.join(output_path, "labstay_DBSOURCE.csv"), + encoding="utf-8", + header=True, + ) + + get_vitals_chart(cursor) + + query = "select * from vitalsfirstday" + df = pd.read_sql_query(query, conn) + df.to_csv( + os.path.join(output_path, "chart_vitals_stay_DBSOURCE.csv"), + encoding="utf-8", + header=True, + ) + + get_comorbidities(cursor) + + query = "select * from COMORBIDITIES" + df = pd.read_sql_query(query, conn) + df.to_csv( + os.path.join(output_path, "comorbidities_DBSOURCE.csv"), + encoding="utf-8", + header=True, + ) + + count_icustays(cursor) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--host", + type=str, + default="localhost", + help="Database Host containing MIMIC III", + ) + parser.add_argument( + "--db", + type=str, + default="mimic", + help="Database name containing MIMIC III data", + ) + parser.add_argument( + "--user", + type=str, + default="postgres", + help="Database user", + ) + parser.add_argument( + "--password", + type=str, + default="postgres", + help="Database user's password", + ) + parser.add_argument( + "--out_path", + type=str, + default="./formatted", + help="Output path to save formatted MIMIC III", + ) + + args = parser.parse_args() + + preprocess(args.db, args.host, args.user, args.password, args.path) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/data/mimiciii/preprocess.py b/data/mimiciii/preprocess.py new file mode 100644 index 0000000..14b4f71 --- /dev/null +++ b/data/mimiciii/preprocess.py @@ -0,0 +1,410 @@ +import os + +import argparse + +import numpy as np +import pandas as pd + + +class Reader: + def __init__(self, data_path): + self.data_path = data_path + + def read_admissions_table(self): + df = pd.read_csv( + os.path.join(self.data_path, "ADMISSIONS.csv.gz"), + compression="gzip", + header=0, + index_col=0, + ) + df.columns = map(str.upper, df.columns) + df.ADMITTIME = pd.to_datetime(df.ADMITTIME) + df.DISCHTIME = pd.to_datetime(df.DISCHTIME) + df = df[["SUBJECT_ID", "HADM_ID", "ADMITTIME", "DISCHTIME", "ETHNICITY"]] + return df + + def read_icustay_table(self): + df = pd.read_csv( + os.path.join(self.data_path, "ICUSTAYS.csv.gz"), + compression="gzip", + header=0, + index_col=0, + ) + df.columns = map(str.upper, df.columns) + df.INTIME = pd.to_datetime(df.INTIME) + df.OUTTIME = pd.to_datetime(df.OUTTIME) + df = df[["SUBJECT_ID", "HADM_ID", "ICUSTAY_ID", "INTIME", "OUTTIME", "LOS"]] + return df + + def read_d_icd_diagnoses_table(self): + d_icd_diagnoses = pd.read_csv( + os.path.join(self.data_path, "D_ICD_DIAGNOSES.csv.gz"), + compression="gzip", + header=0, + index_col=0, + ) + d_icd_diagnoses.columns = map(str.upper, d_icd_diagnoses.columns) + return d_icd_diagnoses + + def read_d_items_table(self): + d_items = pd.read_csv( + os.path.join(self.data_path, "D_ITEMS.csv.gz"), + compression="gzip", + header=0, + index_col=0, + ) + d_items.columns = map(str.upper, d_items.columns) + d_items = d_items[["ITEMID", "LABEL", "DBSOURCE", "PARAM_TYPE"]] + return d_items + + def read_d_labitems_table(self): + d_labitems = pd.read_csv( + os.path.join(self.data_path, "D_LABITEMS.csv.gz"), + compression="gzip", + header=0, + index_col=0, + ) + d_labitems.columns = map(str.upper, d_labitems.columns) + d_labitems = d_labitems[["ITEMID", "LABEL", "FLUID", "CATEGORY"]] + return d_labitems + + def read_patients_table(self): + patients = pd.read_csv( + os.path.join(self.data_path, "PATIENTS.csv.gz"), + compression="gzip", + header=0, + index_col=0, + ) + patients.columns = map(str.upper, patients.columns) + patients = patients[["SUBJECT_ID", "GENDER", "DOB"]] + return patients + + def read_diagnoses_icd_table(self): + diagnoses_icd = pd.read_csv( + os.path.join(self.data_path, "DIAGNOSES_ICD.csv.gz"), + compression="gzip", + header=0, + index_col=0, + ) + diagnoses_icd.columns = map(str.upper, diagnoses_icd.columns) + diagnoses_icd = diagnoses_icd[["SUBJECT_ID", "HADM_ID", "ICD9_CODE", "SEQ_NUM"]] + return diagnoses_icd + + +cache = {} + + +def get_info_admissions(reader: Reader, formatted_path: str): + + df = reader.read_admissions_table() + df["STAYTIME"] = ( + df["DISCHTIME"] - df["ADMITTIME"] + ) # stay time : discharge time - admission time + df["STAYTIME"] = df["STAYTIME"] / np.timedelta64(1, "h") + + # formula to calcultate the age of patiens in MIMIC3 + + patients = reader.read_patients_table() + df = pd.merge(df, patients, how="left", on="SUBJECT_ID") + df["DOB"] = pd.to_datetime(df["DOB"]) + df["ADMITTIME"] = pd.to_datetime(df["ADMITTIME"]) + df["AGE"] = df["ADMITTIME"].dt.year - df["DOB"].dt.year + + # Patients who are older than 89 years old at any time in the database + # have had their date of birth shifted to obscure their age and comply with HIPAA. + # The date of birth was then set to exactly 300 years before their first admission. + df.loc[((df.AGE > 89) | (df.AGE < 0)), "AGE"] = 90 + + icustays = reader.read_icustay_table() + + # merge on the HADM_ID, unique, represents a single patient's admission to the hospital + # while subject_id can be redundant meaning that a patient had many stays at the hospital + df = pd.merge(df, icustays, how="right", on="HADM_ID") + + # the elapsed time between the admission to the hospital and the tranfer to the ICU + df["Time go ICU"] = (df["INTIME"] - df["ADMITTIME"]) / np.timedelta64(1, "h") + + # the elapsed time in the ICU + df["Time in ICU"] = (df["OUTTIME"] - df["INTIME"]) / np.timedelta64(1, "h") + + # the elapsed time between the admission to the ICU and the final discharge from the hospital + df["Time after go ICU"] = (df["DISCHTIME"] - df["INTIME"]) / np.timedelta64(1, "h") + + # number of times the patient has been transferred to the ICU during one admission + df["Count times go ICU"] = df.groupby("HADM_ID")["ICUSTAY_ID"].transform("count") + + with open(os.path.join(formatted_path, "ADMISSIONS.csv"), "w") as f: + df.to_csv(f, encoding="utf-8", header=True) + + +def check_AKI_before(hadm_id, dataset_path: str): + key = "check_AKI_before" + global cache + + if key not in cache: + diagnoses = pd.read_csv( + os.path.join(dataset_path, "DIAGNOSES_ICD.csv.gz"), compression="gzip" + ) + diagnoses.columns = map(str.upper, diagnoses.columns) + diagnoses = diagnoses.loc[ + diagnoses["ICD9_CODE"].isin(["5845", "5846", "5847", "5848"]) + ] + cache[key] = diagnoses + + diagnoses = cache[key] + + if not diagnoses[diagnoses["HADM_ID"].isin(hadm_id)].empty: + return True + + return False + + +def check_CKD(hadm_id, dataset_path: str): + key = "check_CKD" + global cache + + if key not in cache: + diagnoses = pd.read_csv( + os.path.join(dataset_path, "DIAGNOSES_ICD.csv.gz"), compression="gzip" + ) + diagnoses.columns = map(str.upper, diagnoses.columns) + diagnoses = diagnoses.loc[ + diagnoses["ICD9_CODE"].isin(["5851", "5852", "5853", "5854", "5855"]) + ] + cache[key] = diagnoses + + diagnoses = cache[key] + + if not diagnoses[diagnoses["HADM_ID"].isin(hadm_id)].empty: + return True + + return False + + +def check_renal_failure(hadm_id, formatted_path: str): + key = "check_renal_failure" + global cache + + if key not in cache: + diagnoses = pd.read_csv( + os.path.join(formatted_path, "comorbidities_DBSOURCE.csv") + ) + diagnoses.columns = map(str.upper, diagnoses.columns) + diagnoses = diagnoses.loc[diagnoses["RENAL_FAILURE"] == 1] + cache[key] = diagnoses + + diagnoses = cache[key] + + if not diagnoses[diagnoses["HADM_ID"].isin(hadm_id)].empty: + return True + + return False + + +def caculate_eGFR_MDRD_equation(cr, gender, eth, age): + temp = 186 * (cr ** (-1.154)) * (age ** (-0.203)) + if gender == "F": + temp = temp * 0.742 + if eth == "BLACK/AFRICAN AMERICAN": + temp = temp * 1.21 + return temp + + +def get_aki_patients_7days_creatinine(reader: Reader, formatted_path: str): + dataset_path = reader.data_path + + df = pd.read_csv(os.path.join(formatted_path, "ADMISSIONS.csv")) + df = df.sort_values(by=["SUBJECT_ID_x", "HADM_ID", "ICUSTAY_ID"]) + + print("admissions info", df.shape) + print("number of unique subjects in admission: ", df["SUBJECT_ID_x"].nunique()) + print("number of icustays info in admissions: ", df["ICUSTAY_ID"].nunique()) + + info_save = df.drop_duplicates(subset=["ICUSTAY_ID"]) + info_save["AKI"] = -1 + info_save["EGFR"] = -1 + + print( + "the biggest number of ICU stays for a patient: ", + info_save["Count times go ICU"].max(), + ) + + c_aki_7d = pd.read_csv( + os.path.join(formatted_path, "AKI_KIDIGO_7D_SQL_CREATININE_DBSOURCE.csv") + ) + c_aki_7d.columns = map(str.upper, c_aki_7d.columns) + c_aki_7d = c_aki_7d.drop(columns=["UNNAMED: 0"]) + print("c_aki_7d infos") + print("Total icustays: ", c_aki_7d["ICUSTAY_ID"].nunique()) + print( + "NORMAL Patients in 7DAY: {}".format( + c_aki_7d.loc[c_aki_7d["AKI_STAGE_7DAY"] == 0]["ICUSTAY_ID"].count() + ) + ) + print( + "AKI patients STAGE 1 within 7DAY: {}".format( + c_aki_7d.loc[c_aki_7d["AKI_STAGE_7DAY"] == 1]["ICUSTAY_ID"].count() + ) + ) + print( + "AKI Patients STAGE 2 in 7DAY: {}".format( + c_aki_7d.loc[c_aki_7d["AKI_STAGE_7DAY"] == 2]["ICUSTAY_ID"].count() + ) + ) + print( + "AKI Patients STAGE 3 7DAY: {}".format( + c_aki_7d.loc[c_aki_7d["AKI_STAGE_7DAY"] == 3]["ICUSTAY_ID"].count() + ) + ) + print( + "NAN patients within 7DAY: {}".format(c_aki_7d["AKI_STAGE_7DAY"].isna().sum()) + ) + c_aki_7d = c_aki_7d.dropna(subset=["AKI_STAGE_7DAY"]) + + print("Total icustays: ", c_aki_7d["ICUSTAY_ID"].nunique()) + + df_save = pd.merge(info_save, c_aki_7d, how="inner", on="ICUSTAY_ID") + df_save.columns = map(str.upper, df_save.columns) + icustays_data = [frame for season, frame in df_save.groupby(["ICUSTAY_ID"])] + + count_ckd_normal = 0 + count_ckd_aki = 0 + count_akibefore_normal = 0 + count_akibefore_aki = 0 + count_normal = 0 + count_aki = 0 + count_renalfailure_normal = 0 + count_renalfailure_aki = 0 + + for temp in icustays_data: + + temp = temp.sort_values(by=["ICUSTAY_ID"]) + + first_row = temp.iloc[0] + gender = first_row["GENDER"] + age = first_row["AGE"] + eth = first_row["ETHNICITY"] + cr = first_row["CREAT"] + icustay_id = first_row["ICUSTAY_ID"] + + eGFR = caculate_eGFR_MDRD_equation(cr=cr, gender=gender, age=age, eth=eth) + + df_save.loc[df_save["ICUSTAY_ID"] == icustay_id, "EGFR"] = eGFR + df_save.loc[df_save["ICUSTAY_ID"] == icustay_id, "AKI"] = c_aki_7d.loc[ + c_aki_7d["ICUSTAY_ID"] == icustay_id + ]["AKI_7DAY"].values[0] + + if df_save.loc[df_save["ICUSTAY_ID"] == icustay_id, "AKI"].values[0] == 1: + count_aki = count_aki + 1 + else: + count_normal = count_normal + 1 + + has_aki = ( + info_save.loc[info_save["ICUSTAY_ID"] == icustay_id, "AKI"].values[0] == 1 + ) + + if check_CKD(temp["HADM_ID"], dataset_path) == True: + df_save.loc[df_save["ICUSTAY_ID"] == icustay_id, "AKI"] = 2 + if has_aki: + count_ckd_aki = count_ckd_aki + 1 + else: + count_ckd_normal = count_ckd_normal + 1 + + if check_AKI_before(temp["HADM_ID"], dataset_path) == True: + df_save.loc[df_save["ICUSTAY_ID"] == icustay_id, "AKI"] = 3 + if has_aki: + count_akibefore_aki = count_akibefore_aki + 1 + else: + count_akibefore_normal = count_akibefore_normal + 1 + + if check_renal_failure(temp["HADM_ID"], formatted_path) == True: + df_save.loc[df_save["ICUSTAY_ID"] == icustay_id, "AKI"] = 4 + if has_aki: + count_renalfailure_aki = count_renalfailure_aki + 1 + else: + count_renalfailure_normal = count_renalfailure_normal + 1 + + lab = pd.read_csv(os.path.join(formatted_path, "labstay_DBSOURCE.csv")) + lab.columns = map(str.upper, lab.columns) + info_save = pd.merge(df_save, lab, how="left", on="ICUSTAY_ID") + cols_to_drop = set(info_save.columns).intersection( + set(["UNNAMED: 0_x", "UNNAMED: 0_y", "SUBJECT_ID"]) + ) + if len(cols_to_drop) > 0: + info_save = info_save.drop(columns=list(cols_to_drop)) + info_save = info_save.rename( + columns={"SUBJECT_ID_X": "SUBJECT_ID", "HADM_ID_x": "HADM_ID"} + ) + + chart = pd.read_csv(os.path.join(formatted_path, "chart_vitals_stay_DBSOURCE.csv")) + chart.columns = map(str.upper, chart.columns) + df_save = pd.merge(info_save, chart, how="left", on="ICUSTAY_ID") + df_save = df_save.drop( + columns=["UNNAMED: 0", "HADM_ID_y", "HADM_ID_y", "SUBJECT_ID_Y", "SUBJECT_ID_y"] + ) + df_save = df_save.rename( + columns={"SUBJECT_ID_X": "SUBJECT_ID", "HADM_ID_x": "HADM_ID"} + ) + + comorbidities = pd.read_csv( + os.path.join(formatted_path, "comorbidities_DBSOURCE.csv") + ) + comorbidities.columns = map(str.upper, comorbidities.columns) + info_save = pd.merge(df_save, comorbidities, how="left", on="HADM_ID") + info_save = info_save.drop(columns=["UNNAMED: 0"]) + + print( + "NORMAL Patients in 7DAY: {}".format( + c_aki_7d.loc[c_aki_7d["AKI_STAGE_7DAY"] == 0]["ICUSTAY_ID"].count() + ) + ) + print( + "AKI patients STAGE 1 within 7DAY: {}".format( + c_aki_7d.loc[c_aki_7d["AKI_STAGE_7DAY"] == 1]["ICUSTAY_ID"].count() + ) + ) + print("CKD counted as normal: {}".format(count_ckd_normal)) + print("CKD counted as aki: {}".format(count_ckd_aki)) + print("AKI on admission counted as normal: {}".format(count_akibefore_normal)) + print("AKI on admission counted as aki: {}".format(count_akibefore_aki)) + print("RENAL FAILURE counted as normal: {}".format(count_renalfailure_normal)) + print("RENAL FAILURE counted as aki: {}".format(count_renalfailure_aki)) + print("normal: {}".format(count_normal)) + print("aki: {}".format(count_aki)) + + with open( + os.path.join(formatted_path, "INFO_DATASET_7days_creatinine.csv"), "w" + ) as f: + info_save.to_csv(f, encoding="utf-8", header=True) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--formatted_path", + type=str, + default="./formatted", + help='Path to formatted MIMIC III data from "parse_db.py"', + ) + parser.add_argument( + "--data_path", + type=str, + default="./data", + help="Path to gzipped MIMIC III data", + ) + + args = parser.parse_args() + + formatted_path = args.formatted_path + data_path = args.data_path + + reader = Reader(data_path=data_path) + + os.makedirs(formatted_path, exist_ok=True) + + get_info_admissions(reader, formatted_path) + get_aki_patients_7days_creatinine(reader, formatted_path) + + +if __name__ == "__main__": + main() diff --git a/experiments/scripts/analyze_aki_hpo.py b/experiments/scripts/analyze_aki_hpo.py index fe88b33..7266237 100644 --- a/experiments/scripts/analyze_aki_hpo.py +++ b/experiments/scripts/analyze_aki_hpo.py @@ -124,7 +124,7 @@ def get_topk_attrs(attrs: np.ndarray, filepath: str): top_col_ids, top_cols_counts = np.unique(topk_ids, return_counts=True) ids = np.argpartition(-top_cols_counts, kth=k)[:k] - + cur_k_top_cols = dict( zip( data_cols[top_col_ids[ids]].tolist(), @@ -407,6 +407,12 @@ def main(): """Main execution function.""" # arguments parser = ArgumentParser() + parser.add_argument( + "--data_path", + default="data/mimiciii/formatted/data.csv", + type="str", + help="Path to preprocessed MIMIC III dataset", + ) parser.add_argument( "--n_trials", default=20, @@ -433,7 +439,7 @@ def main(): model = model.to(device) model.eval() - dataset = get_aki_dataset_from_hf(repo_id="enver1323/mimic3-aki-detection") + dataset = get_aki_dataset(args.data_path) if not dataset: raise ValueError(f"Could not load dataset containing") diff --git a/experiments/utils/datasets.py b/experiments/utils/datasets.py index 78c24ac..113a46e 100644 --- a/experiments/utils/datasets.py +++ b/experiments/utils/datasets.py @@ -398,10 +398,10 @@ def get_imagenet_sample_from_hf( def get_aki_dataset_from_hf( - repo_id: str = "enver1323/mimic3-aki-detection", + data_path: str = "data/mimiciii/formatted/data.csv", test_split: float = 0.2, ) -> AKIDataset: - data = load_dataset(repo_id)["train"].to_pandas() + data = pd.read_csv(data_path) data = data.replace([np.inf, -np.inf], np.nan).dropna() data = data[AKI_COLUMNS] From 908f78f02cd41bc7b70a3ece7146426649903fbb Mon Sep 17 00:00:00 2001 From: Enver Menadjiev Date: Fri, 21 Nov 2025 13:36:40 +0900 Subject: [PATCH 11/20] feat: change aki dataset loading script --- experiments/scripts/analyze_aki_hpo.py | 4 ++-- experiments/utils/__init__.py | 4 ++-- experiments/utils/datasets.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/experiments/scripts/analyze_aki_hpo.py b/experiments/scripts/analyze_aki_hpo.py index 7266237..3aa589e 100644 --- a/experiments/scripts/analyze_aki_hpo.py +++ b/experiments/scripts/analyze_aki_hpo.py @@ -38,7 +38,7 @@ from pnpxai.core._types import DataSource from experiments.datasets import AKI_COLUMNS -from experiments.utils import set_seed, get_aki_model_from_hf, get_aki_dataset_from_hf +from experiments.utils import set_seed, get_aki_model_from_hf, get_aki_dataset import json @@ -410,7 +410,7 @@ def main(): parser.add_argument( "--data_path", default="data/mimiciii/formatted/data.csv", - type="str", + type=str, help="Path to preprocessed MIMIC III dataset", ) parser.add_argument( diff --git a/experiments/utils/__init__.py b/experiments/utils/__init__.py index 2570e37..2a02d41 100644 --- a/experiments/utils/__init__.py +++ b/experiments/utils/__init__.py @@ -22,7 +22,7 @@ get_livertumor_dataset, get_livertumor_dataset_from_hf, get_imagenet_sample_from_hf, - get_aki_dataset_from_hf, + get_aki_dataset, ) from .models import ( @@ -52,7 +52,7 @@ 'VQADataset', 'get_vqa_dataset', 'get_livertumor_dataset', 'get_livertumor_dataset_from_hf', 'get_imagenet_sample_from_hf', - 'get_aki_dataset_from_hf', + 'get_aki_dataset', # models 'get_torchvision_model', 'get_livertumor_model', 'get_livertumor_model_from_hf', diff --git a/experiments/utils/datasets.py b/experiments/utils/datasets.py index 113a46e..5d9445f 100644 --- a/experiments/utils/datasets.py +++ b/experiments/utils/datasets.py @@ -397,7 +397,7 @@ def get_imagenet_sample_from_hf( return img, label -def get_aki_dataset_from_hf( +def get_aki_dataset( data_path: str = "data/mimiciii/formatted/data.csv", test_split: float = 0.2, ) -> AKIDataset: From 784aac972eccdb38c9fc3921b23f68597aa8f9a0 Mon Sep 17 00:00:00 2001 From: Enver Menadjiev Date: Fri, 21 Nov 2025 14:30:11 +0900 Subject: [PATCH 12/20] feat: update readme.md --- README.md | 2 +- data/mimiciii/README.md | 85 ++++++++++++++++++++++++++++++++++++++++ data/mimiciii/cleanup.py | 4 +- 3 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 data/mimiciii/README.md diff --git a/README.md b/README.md index 0e8c3aa..9bdf726 100644 --- a/README.md +++ b/README.md @@ -209,7 +209,7 @@ This experiment analyzes the effect of HPO (optimizing for AbPC) on explanations #### Data and Model - * **Data (MIMIC III):** The **MIMIC III dataset** used in this experiment is hosted on Hugging Face Hub: [➡️ enver1323/mimic3-aki-detection](https://huggingface.co/datasets/enver1323/mimic3-aki-detection). This dataset contains patient data derived from the original [MIMIC III dataset](https://doi.org/10.13026/C2XW26). The script **automatically downloads** the necessary files when first executed. For more details on the data loading process, refer to the `get_aki_dataset_from_hf` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py) and the [➡️ enver1323/mimic3-aki-detection](https://huggingface.co/datasets/enver1323/mimic3-aki-detection). + * **Data (MIMIC III):** The **MIMIC III dataset** used in this experiment is hosted on PhysioNet: [➡️ MIMIC-III Clinical Database](https://physionet.org/content/mimiciii/1.4/). This work utilizes the latest version (1.4) of [MIMIC III dataset](https://doi.org/10.13026/C2XW26). To use the analysis script, the dataset needs to be downloaded, built and formatted. Having downloaded the dataset from the official source [MIMIC III dataset](https://doi.org/10.13026/C2XW26), users are prompted to build the PostgreSQL version of the dataset with the official [Github code](https://github.com/MIT-LCP/mimic-code/tree/main/mimic-iii/buildmimic/postgres). Subsequently, the built dataset can be formatted with the set of scripts listed in [`/data/mimiciii`](./data/mimiciii/) directory. Thorough instructions on data transformation are provided in [`README.md`](./data/mimiciii/README.md). Provided that formatted data is generated, the analysis script **loads** the necessary files when first executed. For more details on the data loading process, refer to the `get_aki_dataset` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py). * **Model (AKI Classifier):** The pre-trained **Linear model** adapted for this task is hosted on Hugging Face Hub: [➡️ enver1323/aki-classifier](https://huggingface.co/enver1323/aki-classifier). Similar to the dataset, the script **automatically downloads** the model weights. The model architecture is defined in [`experiments/models/aki.py`](./experiments/models/aki.py). For more details on model loading, refer to the `get_aki_model_from_hf` function within [`experiments/utils/models.py`](./experiments/utils/models.py). diff --git a/data/mimiciii/README.md b/data/mimiciii/README.md new file mode 100644 index 0000000..0395136 --- /dev/null +++ b/data/mimiciii/README.md @@ -0,0 +1,85 @@ +# MIMIC III Data Generation + +## Data Loading + +[MIMIC III Clinical Database](https://doi.org/10.13026/C2XW26) is a large database of anonymized data of more than forty thousand patients. The data provided at the source, should first be downloaded, and built. The building process is provided by the official [Github package](https://github.com/MIT-LCP/mimic-code/tree/main/mimic-iii/buildmimic/postgres). We utilize PostgreSQL version of the built database for quick and convenient data querying. + +## Data Formatting + +Having built the MIMIC III on a PostgreSQL DBMS, the data needs to be preprocessed. This process comprises three essential stages: +* Database parsing +* Preprocessing +* Data cleanup + +### Database Parsing + +The [`parse_db.py`](./parse_db.py) script connects to the database to build essential materialized views, which could further be quieried for quick data extraction. + +#### Usage + +```bash +python parse_db.py \ + --host localhost \ + --db mimic \ + --user postgres \ + --password postgres \ + --out_path ./formatted +``` + +#### Arguments + + * `--host `: The host of PostgreSQL DBMS containing build MIMIC III database. The default value is set to `localhost`. + * `--db `: The name of the database of PostgreSQL DBMS, containing build MIMIC III database. The default value is set to `mimic`. + * `--user `: The user owning the PostgreSQL database, containing build MIMIC III database. The default value is set to `postgres`. + * `--password `: The password of the user owning the PostgreSQL database, containing build MIMIC III database. The default value is set to `postgres`. + * `--out_path `: The target directory for intermediate files, produced by the script. The default value is set to `./formatted`. + +#### Output + +Results will be saved under the path, specified in `OUT_PATH`. The list of files in the directory is expected to consist of: + * `AKI_KIDIGO_7D_SQL_CREATININE_DBSOURCE.csv` + * `labstay_DBSOURCE.csv` + * `chart_vitals_stay_DBSOURCE.csv` + * `comorbidities_DBSOURCE.csv` + +### Preprocessing + +The [`preprocess.py`](./preprocess.py) script combines the data from the previous step with the source data files to build the pre-final version of the dataset. + +#### Usage + +```bash +python preprocess.py \ + --formatted_path ./formatted \ + --data_path ./data +``` + +#### Arguments + + * `--formatted_path `: The path, where the files, produced by `parse_db.py` are stored. The default value is set to `./formatted`. + * `--data_path `: The path to gzipped MIMIC III data. The default value is set to `./data`. + +#### Output + +Results will be saved under the path, specified in `FORMATTED_PATH`. The list of files in the directory is expected to be extended by `INFO_DATASET_7days_creatinine.csv`. + +### Data Cleanup + +The [`cleanup.py`](./cleanup.py) script cleans up the data from the previous step and prepares it for the analysis. + +#### Usage + +```bash +python cleanup.py \ + --data_path ./formatted/INFO_DATASET_7days_creatinine.csv \ + --formatted_path ./formatted/data.csv +``` + +#### Arguments + + * `--data_path `: The path, where the file, produced by `preprocess.py` (`INFO_DATASET_7days_creatinine.csv`) is stored. The default value is set to `./formatted/INFO_DATASET_7days_creatinine.csv`. + * `--formatted_path `: The target path to store the final version of the data, ready for analysis. The default value is set to `./formatted/data.csv`. + +#### Output + +Results will be saved under the path, specified in `FORMATTED_PATH`. The list of files in the directory is expected to be extended by `data.csv`. diff --git a/data/mimiciii/cleanup.py b/data/mimiciii/cleanup.py index 34d6824..1d2e22a 100644 --- a/data/mimiciii/cleanup.py +++ b/data/mimiciii/cleanup.py @@ -184,13 +184,13 @@ def main(): parser.add_argument( "--formatted_path", type=str, - default="./formatted", + default="./formatted/data.csv", help="Output path to store cleaned data", ) args = parser.parse_args() df = cleanup_data(args.data_path) - df.to_csv(os.path.join(args.formatted_path, "data.csv")) + df.to_csv(args.formatted_path) if __name__ == "__main__": From 708bbc5decd56ff6018bc16cd87ffa09c44e5985 Mon Sep 17 00:00:00 2001 From: shiningstone23 Date: Wed, 12 Nov 2025 14:30:57 +0900 Subject: [PATCH 13/20] init commit --- experiments/scripts/explain_wine_quality.py | 36 +++++++++++++++++++++ experiments/scripts/my_script.sh | 7 ++++ 2 files changed, 43 insertions(+) create mode 100644 experiments/scripts/explain_wine_quality.py create mode 100644 experiments/scripts/my_script.sh diff --git a/experiments/scripts/explain_wine_quality.py b/experiments/scripts/explain_wine_quality.py new file mode 100644 index 0000000..43b6ecb --- /dev/null +++ b/experiments/scripts/explain_wine_quality.py @@ -0,0 +1,36 @@ +import argparse + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Explain Wine Quality Dataset") + parser.add_argument( + "--data_id", + type=int, + default=2280, + help="Data ID for the wine quality dataset", + ) + parser.add_argument( + "--n_trials", + type=int, + default=100, + help="Number of trials for the experiment", + ) + parser.add_argument( + "--analyze", + action="store_true", + help="Flag to perform analysis", + ) + parser.add_argument( + "--visualize", + action="store_true", + help="Flag to generate visualizations", + ) + + args = parser.parse_args() + + # Placeholder for the main logic of the script + print(f"Data ID: {args.data_id}") + print(f"Number of Trials: {args.n_trials}") + if args.analyze: + print("Performing analysis...") + if args.visualize: + print("Generating visualizations...") \ No newline at end of file diff --git a/experiments/scripts/my_script.sh b/experiments/scripts/my_script.sh new file mode 100644 index 0000000..70fd145 --- /dev/null +++ b/experiments/scripts/my_script.sh @@ -0,0 +1,7 @@ +docker run -it -v "$(pwd)":/root/pnpxai-experiments --gpus '"device=0"' --name pnpxai_exp seongun/ubuntu22.04-cuda12.2.2-cudnn8-pytorch2.1:base + +python -m experiments.scripts.explain_wine_quality \ + --data_id 2280 \ + --n_trials 100 \ + --analyze \ + --visualize \ No newline at end of file From 387856fc32eb86b5b05729734ab57972c128c29b Mon Sep 17 00:00:00 2001 From: shiningstone23 Date: Sat, 15 Nov 2025 09:04:01 +0900 Subject: [PATCH 14/20] commit --- .gitignore | 3 + Dockerfile.wine_quality | 64 + README.md | 25 + configs/tabular/dataset_config.yaml | 41 + configs/tabular/explainer_config.yaml | 131 + configs/tabular/model_config.yaml | 51 + configs/tabular/optuna_config.yaml | 22 + data/Wine Quality/X_test.npy | Bin 0 -> 114528 bytes data/Wine Quality/X_train.npy | Bin 0 -> 457464 bytes data/Wine Quality/download.sh | 9 + data/Wine Quality/feature_metadata.pkl | Bin 0 -> 2500 bytes data/Wine Quality/preprocess.py | 268 + data/Wine Quality/raw_data.csv | 6498 +++++++++++++++++ data/Wine Quality/resnet_model.pth | Bin 0 -> 149454 bytes data/Wine Quality/xgb_model.json | 1 + data/Wine Quality/y_test.npy | Bin 0 -> 10528 bytes data/Wine Quality/y_train.npy | Bin 0 -> 41704 bytes experiments/__init__.py | 0 experiments/scripts/__init__.py | 0 experiments/scripts/explain_wine_quality.py | 36 - experiments/scripts/my_script.sh | 7 - experiments/scripts/wine_quality/README.md | 394 + experiments/scripts/wine_quality/__init__.py | 6 + .../wine_quality/analyze_wine_quality.py | 366 + .../wine_quality/explain_wine_quality.py | 1113 +++ .../wine_quality/script_utils/__init__.py | 39 + .../wine_quality/script_utils/data_utils.py | 132 + .../script_utils/explainer_factory.py | 187 + .../wine_quality/script_utils/model_utils.py | 260 + models/__init__.py | 16 + models/tab_resnet.py | 232 + models/train.py | 462 ++ 32 files changed, 10320 insertions(+), 43 deletions(-) create mode 100644 Dockerfile.wine_quality create mode 100644 configs/tabular/dataset_config.yaml create mode 100644 configs/tabular/explainer_config.yaml create mode 100644 configs/tabular/model_config.yaml create mode 100644 configs/tabular/optuna_config.yaml create mode 100644 data/Wine Quality/X_test.npy create mode 100644 data/Wine Quality/X_train.npy create mode 100755 data/Wine Quality/download.sh create mode 100644 data/Wine Quality/feature_metadata.pkl create mode 100755 data/Wine Quality/preprocess.py create mode 100644 data/Wine Quality/raw_data.csv create mode 100644 data/Wine Quality/resnet_model.pth create mode 100644 data/Wine Quality/xgb_model.json create mode 100644 data/Wine Quality/y_test.npy create mode 100644 data/Wine Quality/y_train.npy create mode 100644 experiments/__init__.py create mode 100644 experiments/scripts/__init__.py delete mode 100644 experiments/scripts/explain_wine_quality.py delete mode 100644 experiments/scripts/my_script.sh create mode 100644 experiments/scripts/wine_quality/README.md create mode 100644 experiments/scripts/wine_quality/__init__.py create mode 100755 experiments/scripts/wine_quality/analyze_wine_quality.py create mode 100755 experiments/scripts/wine_quality/explain_wine_quality.py create mode 100644 experiments/scripts/wine_quality/script_utils/__init__.py create mode 100644 experiments/scripts/wine_quality/script_utils/data_utils.py create mode 100644 experiments/scripts/wine_quality/script_utils/explainer_factory.py create mode 100644 experiments/scripts/wine_quality/script_utils/model_utils.py create mode 100644 models/__init__.py create mode 100644 models/tab_resnet.py create mode 100755 models/train.py diff --git a/.gitignore b/.gitignore index 6a86f38..b6578e8 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,6 @@ MANIFEST */.ipynb_checkpoints/* **/NOTES **/*.mp4 + +results +benchmark \ No newline at end of file diff --git a/Dockerfile.wine_quality b/Dockerfile.wine_quality new file mode 100644 index 0000000..99f1ae9 --- /dev/null +++ b/Dockerfile.wine_quality @@ -0,0 +1,64 @@ +# Wine Quality Experiment Dockerfile +# Extends the base pnpxai-experiments image with additional XAI frameworks + +# Use the base image from README +FROM seongun/ubuntu22.04-cuda12.2.2-cudnn8-pytorch2.1:base + +# Set working directory +WORKDIR /root/pnpxai-experiments + +# Reinstall pnpxai from exp/tab branch +RUN pip uninstall -y pnpxai && \ + pip install --no-cache-dir git+https://github.com/OpenXAIProject/pnpxai.git@exp/tab + +# Install additional XAI frameworks for Wine Quality experiments +# Pin specific versions for reproducibility (verified on 2025-11-13) +# Note: shap<=0.44.0 required for OmniXAI v1.3.2 compatibility (output format changed in 0.45.0) +RUN pip install --no-cache-dir \ + captum==0.8.0 \ + scikit-learn==1.1.3 \ + pandas==2.3.3 \ + xgboost==3.1.1 \ + shap==0.44.0 \ + lime==0.2.0.1 \ + pyyaml==6.0 \ + tqdm==4.66.0 + +# Install XAI frameworks from GitHub +# Pin to specific versions for reproducibility (verified on 2025-11-13) + +# OmniXAI v1.3.2 - Salesforce's comprehensive XAI library +RUN pip install --no-cache-dir git+https://github.com/salesforce/OmniXAI.git@v1.3.2 + +# OpenXAI v0.1 - Standardized XAI evaluation framework +# Note: Uses latest commit as no version tags available +RUN pip install --no-cache-dir git+https://github.com/AI4LIFE-GROUP/OpenXAI.git + +# Create separate virtual environment for AutoXAI +# AutoXAI code is mounted from experiments/scripts/lib/AutoXAI (not cloned in Docker) +# AutoXAI requires bayesian-optimization which needs numpy<2.0, but other frameworks need numpy>=2.0 +# AutoXAI also requires aix360, which depends on xport (requires pandas<1.4) and cvxpy +RUN python -m venv /opt/autoxai_venv && \ + /opt/autoxai_venv/bin/pip install --upgrade pip && \ + /opt/autoxai_venv/bin/pip install --no-cache-dir \ + 'numpy<2.0' \ + scikit-learn==1.1.3 \ + scikit-learn-extra \ + scikit-optimize \ + 'pandas<1.4' \ + xgboost==3.1.1 \ + shap==0.49.1 \ + lime==0.2.0.1 \ + aix360 \ + xport \ + cvxpy \ + pyyaml==6.0 \ + tqdm==4.66.0 \ + bayesian-optimization && \ + /opt/autoxai_venv/bin/pip install --no-cache-dir git+https://github.com/OpenXAIProject/pnpxai.git@exp/tab + +# Clean up pip cache to reduce image size +RUN pip cache purge + +# Set default command +CMD ["/bin/bash"] diff --git a/README.md b/README.md index 9bdf726..62e8c2d 100644 --- a/README.md +++ b/README.md @@ -263,6 +263,31 @@ python -m experiments.scripts.analyze_ecg_hpo \ Results will be saved to the file path specified in the `FILENAME` of `--out_file` argument. +--- + +### Experiment 6: Wine Quality Explanation + +This experiment compares multiple XAI frameworks (PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI) on the Wine Quality dataset using various models and explainer methods. + +#### Usage + +```bash +# Build the Wine Quality Docker image +docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . + +# Run the container +docker run --rm -it \ + --runtime=nvidia \ + --gpus all \ + --shm-size=8g \ + -v $(pwd):/root/pnpxai-experiments \ + pnpxai_wine_quality:latest + +# Inside the container, run all experiments +python -m experiments.scripts.wine_quality.analyze_wine_quality +``` + +For detailed documentation, see [experiments/scripts/wine_quality/README.md](experiments/scripts/wine_quality/README.md). ## Citation diff --git a/configs/tabular/dataset_config.yaml b/configs/tabular/dataset_config.yaml new file mode 100644 index 0000000..e9ea2d1 --- /dev/null +++ b/configs/tabular/dataset_config.yaml @@ -0,0 +1,41 @@ +# Wine Quality Dataset Configuration + +dataset: + name: "Wine Quality" + uci_id: 186 + + # Data split configuration + split: + test_size: 0.2 + random_state: 42 + stratify: true + + # Target label configuration + # Binary classification: quality scores 7, 8, 9 -> 1, others -> 0 + target: + type: "binary" + positive_classes: [7, 8, 9] + column_name: "quality" + + # Feature information + features: + n_features: 11 + feature_types: + - "fixed acidity" + - "volatile acidity" + - "citric acid" + - "residual sugar" + - "chlorides" + - "free sulfur dioxide" + - "total sulfur dioxide" + - "density" + - "pH" + - "sulphates" + - "alcohol" + all_numerical: true + + # Expected data size + expected_size: + total_samples: 6497 + train_samples: 5197 + test_samples: 1300 diff --git a/configs/tabular/explainer_config.yaml b/configs/tabular/explainer_config.yaml new file mode 100644 index 0000000..fd2d24d --- /dev/null +++ b/configs/tabular/explainer_config.yaml @@ -0,0 +1,131 @@ +# Explainer Configuration for Different Frameworks + +# Default hyperparameters (matching benchmark) +defaults: + n_samples: 64 # For LIME and SHAP + n_steps: 50 # For Integrated Gradients + batch_size: 32 # For batch processing + seed: 42 + +# PnPXAI configuration +pnpxai: + # Hyperparameter optimization config + optuna: + sampler: grid + seed: 42 + num_threads: 1 + show_progress: false + n_trials: 25 + + # Search space for HPO + search_space: + explainer.baseline_fn: + - "mean" + - "zeros" + - "kmeans" + explainer.baseline_fn.n_clusters: + - 10 + - 20 + - 30 + - 40 + - 50 + explainer.epsilon: + - 0.000001 + - 0.0001 + - 0.001 + - 0.01 + - 0.1 + - 1.0 + explainer.noise_level: + - 0.1 + - 0.2 + - 0.3 + - 0.4 + - 0.5 + explainer.n_iter: + - 10 + - 20 + - 30 + postprocessor.0.normalization_method: + - "identity" + - "pos" + - "minmax" + + # Metric configuration for optimization + metrics: + compound: + name: "cmpd" + components: + - metric: "abpc" + weight: 0.7 + - metric: "cmpx" + weight: -0.3 + direction: "maximize" + + # Fixed parameters (not tuned) + fixed_params: + lime: + n_samples: 64 + kernel_shap: + n_samples: 64 + integrated_gradients: + n_steps: 50 + + # Background data size for baseline functions + background_data_size: 50 + +# Captum configuration +captum: + # Parameters for each explainer + lime: + n_samples: 64 + + kernel_shap: + n_samples: 64 + + integrated_gradients: + n_steps: 50 + multiply_by_inputs: true + + saliency: + abs: false + + smooth_grad: + nt_type: "smoothgrad" + stdev: 1.0 # default + + input_x_gradient: {} + + lrp: + rule: "EpsilonRule" + +# OmniXAI configuration (XGBoost only) +omnixai: + lime: + num_samples: 64 + + shap: + nsamples: 64 + +# OpenXAI configuration (TabResNet only) +openxai: + lime: + n_samples: 64 + + shap: + n_samples: 64 + + # Other explainers use default params + +# AutoXAI configuration (deprecated) +autoxai: + background_size: 50 + properties: + - "robustness" + - "fidelity" + - "conciseness" + weights: [1, 2, 0.5] + distance: "cosine" + scaling: "Std" + early_stopping: true + improvement_stopping: true diff --git a/configs/tabular/model_config.yaml b/configs/tabular/model_config.yaml new file mode 100644 index 0000000..4e12d1e --- /dev/null +++ b/configs/tabular/model_config.yaml @@ -0,0 +1,51 @@ +# Model Training Configuration + +# Random seeds for reproducibility +seeds: + data_split: 42 # For train/test split + model_training: 0 # For model initialization and training + explanation: 42 # For explanation generation + +# XGBoost configuration +xgb: + model_class: "XGBClassifier" + # Using default parameters from benchmark + params: {} + save_format: "json" + filename: "xgb_model.json" + +# TabResNet configuration +tab_resnet: + num_blocks: 1 + hidden_dim: null # Will be set based on input_dim + + # Training hyperparameters + training: + epochs: 1000 + learning_rate: 0.01 + weight_decay: 0.01 + optimizer: "SGD" + loss_function: "CrossEntropyLoss" + batch_size: null # Full batch training + + save_format: "pth" + filename: "resnet_model.pth" + +# Logistic Regression configuration (optional) +lr: + # Using PyTorch implementation + training: + epochs: 1000 + learning_rate: 0.01 + weight_decay: 0.01 + optimizer: "SGD" + loss_function: "CrossEntropyLoss" + + save_format: "pth" + filename: "lr_model.pth" + +# Common settings +common: + output_dim: 2 # Binary classification + device: "cuda" # or "cpu" + verbose: true diff --git a/configs/tabular/optuna_config.yaml b/configs/tabular/optuna_config.yaml new file mode 100644 index 0000000..0c13e43 --- /dev/null +++ b/configs/tabular/optuna_config.yaml @@ -0,0 +1,22 @@ +# Optuna Hyperparameter Optimization Configuration +# (Copy of benchmark config for exact reproduction) + +sampler: grid +seed: 42 +num_threads: 1 +show_progress: false +n_trials: 25 # If search space is large, total iteration goes to n_trials + +search_space: + explainer.baseline_fn: + ["mean", "zeros", "kmeans"] + explainer.baseline_fn.n_clusters: + [10, 20, 30, 40, 50] + explainer.epsilon: + [0.000001, 0.0001, 0.001, 0.01, 0.1, 1.0] + explainer.noise_level: + [0.1, 0.2, 0.3, 0.4, 0.5] + explainer.n_iter: + [10, 20, 30] + postprocessor.0.normalization_method: + ["identity", "pos", "minmax"] diff --git a/data/Wine Quality/X_test.npy b/data/Wine Quality/X_test.npy new file mode 100644 index 0000000000000000000000000000000000000000..6907d183f1b28566a46062e367df46906983aae2 GIT binary patch literal 114528 zcmcG%bx@bx_wEmfh}|8i54ITCxo^83TWrM+e8lb+8#}QTTd}ZnBO;0@B8UjmNK1GB zIP>+jKQr%VzVkbC&iD9_qkr73Ho{~_0~0+HGA32rA@ll zZ_<5E=O!)dH<`0&>GGx1tt}TVoi+P^{eS=I|16tb^#9A|OdYv$WWP4DR;+bzZf}Z*X(*5AGpcc zHL4e1-zHY-Klt(9t>O<+Zu#Ce6KaRaUc*g0JZ|?(czL@2){Fco^QKG=I{ff~G%(e3 ziir0Wi|$>0w9wZ>p1yp@%57AnxH2Pa@=UjUQEdL3w)>iXlRK9cSkY+uLs1Q6*OPxSV{k-zR_-&7T-?MiQeJ8l<2iUhoG9ja^vRGr1=6igmiDe=y!;~T-tZgaCD)H}wreo_ zoOF$OJFb3)mrMzqoc^}QFX`)h;Fw3Swb_iII2rrp)1$MU&&eSvmwwN>_e*Tq zID25%G%qD?%zGapr`+wc9yLi$+5An$g!I}xYw{Nnv}2x8xh`(yCPxHb zUwx)xlF?( z;czh4%=%S^JiUBKr80f|1n2v0THEVyriP18woiBO8PkzWc`F%+8H^Dt7e%%_Z9bGo?x-5P8cdhaDzsqy? zKD60a6rb<=Y`RdhZ<74}V%G$#kzeHUW%EbgpB5x%_TK38e5<}TzOG@I?4Kit-Pfxe zWD+hbtQ~%0m{*4AT(h9qMfZ3an0ob=ok_NQR(JTgw?BRf2df!IWlLv>QUL|s64Jj3 zo@;d`CI8a1d&1M;d%v`Wp_1S0XHsBT$=+9d_IGFL7!}K z+&rl2n$muv!NKn4NvFakai*Kkx_-7zdiBgeE7iP8yu;H7IrYb^UY9*1q}8G=oo2KM z6kUh5?A7vAvRwOdq+WK@bZOq%#(3)V2$8nVaD#8N-{Om@`O;=}okZvH!+%$HNRj;B zy5q;?R$7!IQ_Jfg>k$_!BIAyaFVQSY7Idgrwt5*?N&N2ZZhglx++U8Wvf4KwIanS{ zwW}jVuspRS#cS}|P|161vO2@KdW9I->h;ZU_cx`8mK$4rHD03UG-_P)@l6jT$Snqy z`+n#cE_;>xys1{540$s7<;ofk;j-F!`_VfZ$EbYP?r~@Rqz$Q(JW1Y}k<+Sg*xXd< z=drQZyD3TXPS>dYhQSv^jM@@AfZ0CBx^5%Z6 z&><#Mc3xWLYn{#o>RyZ1@Sv*%pLEUJ3>Tl-&w}?^?yl|u)_fp4+M#rTE-oYTRVx%o3ER6e4HZ5 z50T4j47CW#lK<3j@_P}RA$k@LI@7v(vgmy-#yYjad%1JUfJqtioh0!BO8RrDv#-C*RS^7{2NM*HqhOOxGfmyAiY z4i=jlo1JT89w+&|%}%`B;qv~MeC3nw6;c0-w7SzNFZ@)BjIgjgJ#o0BXi?TKy4k@% zab%SLt)+J(1oZ;^@N4>r8@}hi3-ZpqF(vEeh|m>jW6ynv6PG8P z*tcbazQ%ncKk!`g1Nk?=vBTb|+sUGCDPyx5J;KC<N>M1@Pj_S;-o^f*yahqC&a59!@9L)=Vn{j0N2 zl-z!EON?HLL~-f!d8gM!_t9sdY_y?Qj`VN#HE)=4xXL^G>x`d~?-MV;ecWT}JaIN8 zX2q^{~sy#|*| zl+K=ZPHPw3k>IFD+v;X^T@#}4FLl1ugZzyrw?@m;v#a>Iwu)Bug7^hLES%ulbAGb| zF?DnYJC~gas&1C7+qH47ji0!9a*0j5lZo>9A%kPKe(7YE_P|YLrdKLDT7+sb*#Dho2Xvb-gT~xkw#tz|F&;d z{9evRr@kF-+O+D9k)HBv$o_jSrBh_B?yI)wP4t$| zhx#|NGRRl=7<>kv%wJG^uzu;1Do+x>mxg@!Wn25Rm~Z!K`eePwvVEyvlV%xY$e6v8 zE)0*`B*4Fk7Vh2fe zYj@vji87#tONCmmgX9P6`E{FSxCrn%{DS2zl{iN|mDlJ}S;WpY=UIeP*uQxoh-Y&#ylfZ`rlYB56=Z1FgyBMs8!L zMvI#9?hWSmekZA$RgbTn@VIM`gujt@HV)s~()k}Z37#bHOnvon#eoL}5`MuwhQE;? zz&rgqP8v7W?4~?jZEgL=TRasm8Q^fdt7%5K=rKPtIcd!Y(J(4=vQrHkbq>Qy>7(|6Xeq*>mpj7Aj^EH^6BLMc*QTszma9^GEBWoYO>EQN;n#ix{aNG;L_RcmX~;(XLAg!|ZH1-fibT%ZPNvvn=O-xSw&uShH)n zsK?0f1{(PcK2Lsl^0mYD?V6W>7mj++x5a4l7;&b~j4dr*>1het-n4m7Vuk&VVcwyJ zx#DrBhzlcTL@8WN-GsMH8+G^Yfj%}ObD`_>bHxG$`UAMH{5FGf*`MFa9n*hpYc?ZL zf~&(`)p+f9AzWr&nvgViysH5J_IhIF)59!Q<#o>2$Z)I(&V4BiUz)h|Deoq>mIUIeD#s<(xl1LLt!=E71PL* z$GgsH|KM7Zu*-?=+_PK0a68+ydWYsYV#URv>(8HMt9udS>fiKB&j{JT>ht<%!CCTf z$kV$xpk+G@Xu+W=Y;+MUO)X}Y?W;T^8`Eqoq#%Dw&{%O zU7fN8x`x*aCr7Ia86x9F*+q@=vc%u-4q92yh!;VwPrscVU`Le zHVeJN!i4*lJ^5{Chl_$CSS*LRmAOFK+iQj`~_S`NzD@x&N@H+Xy zZ|K8Tt2;W&i)Sw9wcAs~f%o-ZZIhEC8xLArVR@w_0WR@%UH;OyTbiKGx2Q2;M9R_E zDjqJr^r&8QVv6`ucEp%Tt9~lnhps{V^4{PV)C=x0@ymU)IA3f z(czxOyQcMwwCeVwKYy@^67X)*K=US*XFibN63!QXL0)ejeK_Z<`(?@b)=k|Lw|hvm zn09;UrP*OgCC<$x}NpOUHd*%j`r_s zTgE}jxZc#^_rFFa;eds6z2 z;Czj>Q9mvWix#$PHqBgg$XlR$f`4O|FG<_}(m|c?bhlI1SNCTr9{W#?>xYfEdkU?l zd34H>48^Crn)z=y2iag z2j^aZCwVUS0$xad06(CY)TDMaanOX{ zlHcp+(z%(XqqBJV^G+jAt0cv<3T#U64c-+aI+nF>J1?_9#V_|5owh=5Mwj4Kg#vzz zj<~Yf*OzNMB#Enz9mFJ~1c7dZ4g)`~HTv@Aum?rF+_wGvwj(3d`L^&|dvbk1h~OOH zh4uZv)@-xqjU3hWZN|WeTZ#__j#?UgDIr9F`@q%Us5(og=54y@F3{zuL-ZLOjx4hH zaXwj~m+*VJ$DA*5hE72L!rQT@g=XrnbgqX>tN)X^AyR<*K0nc-6Ou9|{R{X!dI`Um zet=EydA|13a%J$Ei57Pk?GkZ5ts6C|o1pGn=?7C@``^lyuO8mX`WYN4%y#B^ST;!z z@Okbr`N8DE)xB}M^)z(*hGF@ieAY)w_@*wO?;Y{{MYAEEf;t4>!_(v?nt{!4z)twZ!LsK+HsF1%eZ`nM#X@!krb`#tElB1N=&)IZL; z|JQ%j3te0dk0qZ`FZjKIp0~=KE=UvTau3hcFIV2JK)U=&ob{u{DXFXf!UL#7++*;L z!Vk9|e_B^6LS73F432x{ElN#w8XA%KT{w^2l-+SohjYJLt6S z26?})mb*uWj_i1~%i}otcG$`;or~}(x;=dUo!h*3E4o$G)?W0MM~d)T^2k8O%m_k#SZi+9jrD*tf#QOWSD{N-nFW8@Pc z;hXdWwjJ8hv~TeM#k1%$z%Rs#x8`}fJr$27&Uo&UiAM)_yY*9WFUSv5o7QU4^mMAw ziq*c_bi^-#z5|~}rzOs)$J9;wkl-_P0`mIBdvj;Zb;*#HYjbu_aZZucW8FCWSz*p- zzjMCI`Wcm92$s+c8`m&MH$>NOHq?wt$LR4NSKaeNM zXViK09dwu}J%TUgJW5gh7Ic_fqn(OIfqaNo$k z;p2|}bN;QbKu_hp!3&=_Kdj-^B3+(cDrV*O%oW+)JO6#r=%MQK(huOiC76zOx?+7< z>=`w&oM)ba6Ma4M&T;Yj%Xu3^?atj9ix0ZS3V0Uxf<7<$j#ITkCCqce8A)50 ziik?@#$^wSkacVm3~#5J(ly?tB-ZS|Jx{)j|K#|5pq>VfO7pGQe!N$% za83U{%Dh6JTw8a=s`*tbYs*s;`dU~0DLn)BB^@}Sr@;fB6;E|Fe;TEpJ93%(j745K z@K}72rXL%y^oql9$@$X9d3HU+uJzI{viO!! zH|rYZ2;LhyBDjQlLBD0Cb@^=();>y)+EKChpo9+`bfc0j0xPCB{3*{K?(zKi`5zLU0Q~_!0d@1&cAGXC&$C5qE4vvno#O>M7j@pG z?L~tL9+9fw&F|f??(9l?_adG6j}Gp6f8*;4GDq-SelL0|c%6C?GiIVi>!3*GQ=tDg z?67^`JwtPq&ILcVUsTI|(2;b(bG;@EEVi?65s&<%N73h{?}T0gj|KN}zSJT57wBi( zC-kmYwsD-~eDOb!C!N#A$9Qi^6yzQ69p{V}z5lIzAi8}9`legQw#eAsyn@!c=75&A zhf<_2K11iazT3ERw?~1^xvgpXCKWh^6d~0JQuu!u1r3| zuYsB6?J%lGnZUJbYAt z6Wxe71E0Z>$l6l*}5MB9g+G* zUynRVJoMai=2!JaA(B20@rz$W??&3;{Vl`g+KF{`_G}xe@Gm?TTn*o(zU7+Dcl8*T zELLuumEW%DZ*r1w`EaFwGiVrE{uqImJ2jbzxD6NK-V}|mdt#J>?%A8$XqaIT) zI$FN3Q}OmAK|Z4n(bt2&p+}K_(ff!qaO#%rM!m;$d!_O^e5mQ`b%#H;NfEXSFH}rl z|4#L@W{G!Gx^B-@ecshUR-ek1$Pmk>ZLPayb&+oQ$1jHt!#SXPqJ#T48$L(gPZ8)P z@NV=HelK+soJxLpq{D|UJ>9hS=7vYoKW}Nt^hNoS{EHu^gjMOx^Cz=~ZeJRnWinp7 zr*}F=`4pT@qK^-3R80FZX;gFVMW9G|wrA#Bw|lC;$#cQg@D}idJw=nuh%xNq=)AnS?kO%9|gpCdeg{u_M;@J`yI=4Z~hM9YOKEj-o?`y$|PS)WSh z&m58_`mAVnK6P)h7-+K0cyrI9KFuH8*S||c&Ek2iKsUnQhYsVGH+FLPq6{&;eC6^D(kR_V6+Bj(GQ%~i~(pA5Oz8*dh`mr6l{=T=e{{uljOPpYB>r^gP`4Q<~&_C9_7vEjXoA+A% zOWh0j5I&IkO}#dmw8&OICgNelfS1KgimZU>QPg8}aN?If4*Ym|whETKZ1twD6%f2SbMBL7fioY}HyYe}r)1KN?$A52| zSdkV|@l2zE-YP%v-sTM06;ReWSo!<77vLTG^WCyHdz&tI6X3J2*LsE(3{6*ln0~WD z!)?dwIiXL&yTMWP=gGfwI`xcr@ApKGxOQ>Q@gjdQdB^jf)n(JbAk}~4z2SRBpUn2N zsxqqNe|!q$GvfEcQPa%_N>(tFuy%LU}wg<~&_lwn!dv;r%%{5wfv01u!*mK;^)D203{QKcqZ)1x}w^SdUybi7= zeu*>q1%6Nb55zD1Le7Cc4*f#njQ7U*wsI){vcb_v!TEA8h~L`6)Z?#Gp>oji$g$=x zGgaS|`bM0+A5(RFT*Z5WbKv)KFC3oUwrDl;w7^g6P;BtGX$uXU=$nH3nq3~h$t&1c zqkjya=edo{CU+X{>><&OhzImZ{Y?XEu1d~Tbsio&`|ipnjlK1hp8%c9dx0$8&SInv=j^4Sxei6(2L9^uXfr z3jcDx+zW6YbyM|Y|IA-(aG(Cx()fcswc()S+ZK-{&%Hk4Y2Ou@IjVmFolvuUwowS z!<519M_ZLC6x2<43-uV@{dVW4DNT+(72IQd3e0u*E=WA#XyU7U@!)m%MTcSyl3W4< z#EzH{?e(Ng0YBE|o9Jiv+9%nKJ(3}l7kpZ2J^GpQE7Kn$pOGKn$K(g<1#w1y2%o}e zb5q%*ZR)@L8t^Re8P8?zprTQh@$QPBq_hr~J-U?Ds29v%pc|Pxdi_=6ah~#Rlh4qN zh+lXtdKA2nI1AcxG61T~FTzoObQpXU=yIF`@k@T#9p%z+y-9)c1%V&h ze?7AAT;FUl=cq}G=HJqkFH^S;)p1`hht>I{e8r#ZdstXjN*DAYIp5y--xkMN<|%&( zyv5ABOudGC6D4{z_k#KcjzWitH5f3?`DCubJNUsZHke;2^D0d^&zu&W^)5-E)9OAK zTmtU~?|^^Nf59c3FZ>POO}#+p0{2yHb^xGk1YF=Dhkco88}yTl{c5UB-J_}rSu2x1^JBk25+H{Go(qM4C9`W zs!wM5e57mkk2eBeJbh{AzVN4V->Ap%H{Kh5Bl;J_FM8jQ-v>|3h|f?uEp?N6G2hN% zd4-rV8or=kQ~w##e5s*^PB8M1Zv~wTUncW~(P)8?MO}%)jlP+kf?)I{vC&@LYcH5VJE!1FiFw&V^qC zet~Wee!yprE*J4AwDQ6guO$9HaJ6pT#J{79f6;fqscTyu85Pm{f6dAKmrekF;Jwiw zf(PKMz;}i2$@w-fWjTBFJTEcf__3~&|MFITIpUYR1AgGS@WR$*rYCfHt*6mHrfv%T zyAO?PM2M3AjBL=mM4H0C8o8;Fcoa5WI!oOWnl3^Vn;+{mauas!xW0lDv+tL7x|Woj61P z#TVbn)U)f^TS&zLVnM+8UVtAHQVA2Ppe+hN%BYzd#I&J$Fli(8SG4~Ch&7HZQK5bu{C&82WtMNbF7_fKi@Oh!q%uav5(}s9~ z4o;l$T>J!_1HTuYi#`K+GI?^zmxnFUk!A zkL_Wgp@XBBfJ@L33(og@Yh;%uYfjnrv|)K4#RK4*@IuZ3{Cj8DvNG+1(f4KqZ}A&OeiGy}>M{Nu;taiyI`7ps*M7l2IfCbM-{_N}6YSld@-spf{r*35*t$51 zcwlcwr57j9mH!kjsYBFx`kUZi_}jUrDbrn_WJvlg@OgYT_~Pkzb1%@r@!1$vGuzlR zugG8hM~4CLP%pqG%u%7s;V(g_W$(e_Qyu;GTg1qarSF!-y~&pi`nC)=>lh}`VbE#e zL&O=ny+L4^l{J%N1m}Pr#eSGqriU-I*?d9izxdbry_2mbuAgy8Ps10)d+YWwds9@c zJh5#1K%2D&6*bPc?)!UP8rKe&XIeBK*5$OJM*o<8H~bh}QuAc&ZSSt7)x0VB!K$H& z&!M%iB=-%xj2|AP8PoNZPi2nEVC%}fW?3<+UT`ny z<8Th-b@&@~9$ygiB1S{Z1~punDtj4yNExE%rRKZoZ&K&c*LiR7H}%{D(Cv$WtHCAsxzGvV z$K8DEKHIlGUHSO&#pAEWuMFRW#}a3p1J9-Z##}yn6mFud|JS)!+cf9Yp& z4*0ggXY`M$$KVI}8~&XY>&9-GP$F9p59qY`VWMk$+lK8(7U(d%xA;59hez7RsJRa2 zX6!0{zTfFaq~fvU9s20-H*{L`684ucM@9VF)^AXzO+<=3v3i^Bv735M)Z?`VmcKV% zPM6?n=G?(0;4}7ikk2?@d`utLAFDHKNtEPX;IF10qtl|>6A!v|9-Inqsp8bOL1*(^ zHE&AZfe+zZ!H>vu(RaWP;3(?6toCD~`;jC`A07NaJb)+RSnro$-%oOp3=XaFVJD=zcD8PAIeCZ(j)F!y2Qs%pBEnp&!t}I`X9J&%++ze zs$Xb1b;tY`uhl*U=G@^~#DmIbfBMqwJ)j?ee)jge{i^A1JGfZkL9`akI_>v z9hlkmb;T?JZ{fN4Z19)3FD>3bEwi`=KI6F$`d)CZQ@*s)J(;80d2E^EEt4PeaQjET ztsU}}?+QOSduQ+!GarkOzjUWj^O}q<+FST!@$)A`^fd4p^^N^I%&Ae|n4br)!@GOE zaUavDUyRxdNIs*!p`W4uf_KnU%^Ta-JfokkeB0a$>JWVfa1{Dv`-d^3n>;M8(Vu6& zyI+%;Cw=rX1@jlocf(u2spMbsPP(aW>7(!Sgz2NhiEG>cR(qbvlRK)lOc+w!L)?vw zS~WE``(OKE&{M&wy76#+*npA!WArriN$N2^=ucChd`NRn6}ir?=SDjg{>ul#Ilu$Z z&$!3vXXIaWjjC^_8OC*v5$Gk{W6ps*NuQVefUg*x0K5Y)T<7ZdYC)wiHNT0@h2Ceg z{`&qZL&~T=FMdyONk`YfJ9moqe!{!)4Wjqa7r~zjPDSt2%{#h%Ug|va$N25xZ}f3^ zF82c5NBqLOD}UJ1+cT|Dkk`H6+HPz0cf8V#@Y{3W&^5@D)FFJ^&VemX3|^M6<^y;x zeVo@1pKt0C^+Dww>Kk>4I0HvfhaSmZ-rIghsXZ9X0an>R^rW%J6EPriV6V)lD}=KZh<;U#I)uNEIKy{EJ%-1Ef5~U?7VrbQ2KkJ=b?7_vv*4TH2kIvH0be}) zn0ic~7r*_;8R7a9M5e@lrpsH7bU%=<U@my*)Z1(zo5=jH_?s2 zzvLa_O!r>A+uQuzn}{Se&r6+0FM((2b@2S^dH$=KV|BLmZ@hMyuhP%ZC*c8U9xXTb z?N+oW;7>mm9ftlf_YFQopP|tEaOs*wb)(DbTgA*ecq!kjP4}25!v7yl%B6#ZlLb?yazW$L`s9(j4+@HF{u zT>9s6Tkom*rgd&z{6%>^jlLdrllKOmtk?cnH`{OL)Eq1Kg1vS4?cv?T1ALx3&z=VI zI{iX;OSkUr&GQ2@1->@?;JSV;beM|6*3Bx|m?hZPM&5yU(}%>D$s7v$FY|BkA>xcY ziM~Ueu~&lpfS-%K9dAw+?lkHAM(LCNJ=?Cbz8k0Z|B@fryNC{s--tO0elPW{q3wXC z%Rc^R-z)u1csKLM)FJllQRmSo!4Ks#s(k4&CPiKem^IwMEA5PUut0P-?Q9a2 zm9O?s^4|WZ@mVwb83QMN)>r-=@-Odgr%iNarxmek?h9SHMR*UN-7b-;zX@IkKhO_g zu8w%%9^)q zN0BF~^ZOcYGYGtIsPTK@7ZW39)OweaC(P_NWwvROFYxhm-_XJFr&2G_&%zG6Z|GR~ zRIBr3>4WzV%8{zAO5h=sVC0PZu{9y7h{tIukV75_SUo%e>XLRarY-{D@M)3zQP{I18dX!+ZA zS;3s8r8M@Wqd#!I!~^;c_?O>{U+!s*Qx%iDycg&X6( z1{WkI9?i&5{R`&FIA6|z^Mzlqw+??4?~VOYlUD31*46r}!jtqN>CY1n%vo~2_&xCr zf}_~K{`zOljG6W>iqDfL;cxH@&X;<@d?9lZ{9gLU=$`l=hzEWzdI|kwelLAUdo=f$A7qd&|5lfUgr?B+*kuw<3~iVzVXYV-kx;^8huF40ev0+IyyM|8T%j533THZ zyv}>m^~-_J*n6<8S*&@e_|M)0<~RAh^!3=Uj86glF!GO|7M+WEJ@zBA*9V;dKO%UN zI3s>vxvjM|JEEuQ_AkJb?2Bjr1AGWvt(zavAD9az@4%1IC&`oa3yCx85cmxIOC3Vr zfxpr3hR4!>qmKh#M~~tjqsxIG&>z5)=*sLtW}i9pSv;4#13v~opr5fn3jg||y!a)q zE=7Cm|ICXpKZG9<|2pT3z5}0!4^fY~Z|E9F4nKXny?DA`~-H(q;lN1Cde@K}B?^CAmuzO4HE z$yj5Kit}xDv|H@~VM%J9*W&TkWnOEOrO)o+Tko63DgOiU%U%@n1NaQRdbaD_OM{J4 z1$YvlBm1lA2jF7@pTXyOF8wU-8})*^NxdNNzys*NfzPNz@GN{+oG<)>^CeFnjk0LJ z?v0=1z3Jk0@+9>Q{)P|aQ&P~g)IP;Dd^X%S=HD)rvCq9UJX`HgW$pleOgzx%t39|-!S-p{}4FKnWxkss(wqZ^@XfG6n>F+X%m`~I=x zo^+*`P~Xs__`UFBa5a0e*_TN?RJ+-<`ldIrV)CzbG0~In3v@2>8T^?1fG)>5WWL%O zJbs#y;up+24fPsqk{pySPPvx*7F0T4?RBRPalSm4_r^U&2dBQ_+upu-OG@)uxdMDf z-xS>w{fs)KTj${y^jmmu>;)YAeba@AOUG0nlKepYQZL|z_fKL+8phUREP{V4Qky)Onr@AEFNZwcxncsDGx~1IK;iy`lFd*74|LcqLZ(VZf>A z>zpt3f;x{clRg=99r#oAUifSou{TxWU)~%0#PG%AU#AXnFTgwa&;EK?WrSJ(EcKlr z;*9!6z2JPE9u@OGzvh?PU(Fo$h(GiG@Hg-S`v%ebz}4svJaCPj!0f-o`Lv9KZDP+=ZSt6^9<~-#=k=yBJcc9*G=#P zeN*xS_>6lDKBMnHx?7a~w&_7?K8t&dK1u!smw;2*M~a^SeZAAz_pRc)6z$cueU*14 z@|j>Rl=lXX;vS=W($AvKqr(sn=p_T|jPO5_n78U2fi>na&E?-c$oAL-7^ zHO)$FDpY$;>65`X;Q{kceZ}AhzGD;Dx9QRO-C0PJpi5;?l8^4Xm;iKD)JHVA2M&0+C0(FypDjq!|% z?$>e+G;j$zB6$a1$bHkr>-3%I7vclKuS`8A@4$zeK1oPF*{0}wrhn#RsmJsK&`Z9H zBQwW2dP?^Cpg)iwhzIm2@)`R`*;h=SME67o?|E(N%E{KrN>3#o^7WkimfBiOqaHJ_ zM?6r6=x?Gwpr_)?#OGL};PQ?05qSdM&2#aC^WO3f-6?(H)*FGpg#5sMF8C1lg6ERY zz$N4}?i+h=@iDQ_5k7=&gib(T55F?+je1OdqhCn=UGwLAq4+A;Q^x)%>M{6?`^KD4 z@@L1Yp$!rRdrrY;`1_WcSgj2loFVzH9Otky*|x(~2Yn5l3*87@%{l1eYWe~23;gT& z#L#KUzvLb61-Jx1IB~|@7e0_@yL>W^yg9D?8oK$6dW`;0Njux*Yx}a0&T=dki0f&og&G zeIw5BtuVi-tIOf92Coyp!~^}p*)3KS))`ttgKx6`;qFzp_dSmqXo)9J+)U|KsP>nj z6VRXMe7SG%p^>fY?k_jBxZ<1atGFD!^UKRODWXTa@uNPL$WnZsd`5mCpK%W8QSdi> zHt?aUb=)45D*EnmdC$J>j7FrXeG1$+`h`4~Juch}@DAT=#&?BJVZ`rW7jueB#k1h^ z^keDk>7Fn67k!8POP_)7;4y!UADsBb&jpXApLKumdFL@d3^eviFyDeNsM*f#UI7Qv z1bzbe1^A40kCZt@QEo74;97o5sGI{GB>03V_b!H0+k?i>CncsKjc)OVHt%q{BbFuL!JdTe~G z`N3Hag9UjVAJgd`7Y1DnPZjK2K`%l7r4LD*Q8zgUcp-V6I)q;X+y{SSu7mT{twTH) ze#|+b+cTFBE@56|==Xw*QT2`0ced#lI-lA4kHwU)>buqGCEzIV4)`qhucZ%a{790_ z;}8$zN!}a#g22`6^&y|3SF`t)@1w)JOP#*ud?q*x>7L%e+p~h# zoUNcyFYxihhxm>SIt+c@4#xVkEWYY#{9eAhLp_FX@_Tu2?6X0SB0pTX;`w?)%{(#T z`M4S(Vd2VmMV%)eIA8KFIF;|Az&Ft~cyH`^g2%G|0UrqU4Sf>a$G%L>;9m2fE#)=l zglm^H`&e;awqy<)A3yu;$-nGD2KVuO2Jkw5MDQaW~e|X0Pj%W-~r?ZbbEXtD1g`LlMxSm7aac%^#a`!{S3Tw@aEM67acN{pR4$uAEjNgix++W`@j6D z^jqlTfJ^>w{X6KB)J<>+b%^{6{)Jym&wnvF#Uw?2mw^1hUKIS`+zWUB{0$!-^%$H= z|Cl<@eIp*I^TaQ_1)n2&AN^SLo&Uc%AMOS5i{AGk>*V(#$=}p>K-gDt_*?riOGd?s z6)oSbNY?wR_#6CKcmFT%jeZN?7vX&20rZFPuk#%R`~>*rm{a5T@?70}j2^}J)G}<6 zI%5i=(oTNb@?XxB>4=V7(N?x7WxhG z?BiA5VUHoWggQ^Z1$_rx4S(age7~FTWTI=3C-Fz28?pa^I78>cmkBED{lSTPt3vYy&t(n;pCkKL@FT+Cz^U8|aFn*BQrpo+Mc;+3YZxZ` z=cw-wF^9sO1bZTP+?W=bvLjHZoJYM&_wFqh6pJ zfm88a!7r#6!~;IDn?7+7Z;R#*d2jeN_>KZTUGf8cUhW0{U!F^yN2kS)$a9GY;+Hyv z&IRt{xzr2p@sg&;yBG6M6v0dT1WsBIui}Ax1`pu9k~ljApKrmJ~K=B0^W^mU#~-<0=; zuFSmvPvYO-?(r5-fC&q#l26TS3W%SN%A^;p8SAMfqH>{*5^yN&k?UuB>O737jw4u z>^^V#FA0xDU#A{Z-@tu5mw5(w7W@tU7vCUwo%@DPur251FSofF(n52~T3aDOvfl{Z zQ`c9F4sNpD@Ag6C7s_{qUygeWE}`H3Kjk-~Zj#S92foV%KBI3+e&D;m@OkFD$!G9m ze5B}6v&M%6tU9Kz@m%-?IEpz|@-IAqdQ9IGT?2lB-&6O!5ogS2EuCD|K0HrfBmctR zHl`o>5MiDtbiW@);3G$>{rE{FqYZ zXn>DEC*XT^i|!To^9_hm-!VtufoH)l@ZsSBh;_0|1Z>N}a#3+6@OZ{&6I8P5f$!jJJIvbTfpslkuw zJ5h(&w}PLR?-G!I-_F|?T>n$D;Ju;KPCVGuxXZ}jYJW9zU-)S`2k;rXX9dG0mxC|< zR{kjJJaq`&6THs(Qs2lA^!?GJhzI66sGH0?;qN0)a*xSpy64Mt!N2J1@Og9?o{L^h zUgsXehjh;Yoq&6R4-fsU>&U0w-OA+si@)K!0zVLE)Hiq*dLR2L(0}Rc(Z3+h*w;qi z30y)x)9pjT-%QuJY?-?&SAAC*{S5sV-pxHGpE2joyc2wg`B-?1&noL9i7q+HXM;XT zf0J{dUVuwD2Y4Yk_07md-`^j~6!`ej<)|0PIMK6n878TA64K-UKX-(>!R zyyG)aHrmiDM~?1mQ+;CBRKfn#X7?6sDP6XN29AOk($^zDOmbcP&!zQ0CH#VV!8|12 z3+4BM&p2QD^XThWj&FTYP{T+A@6gZU_kvT2-zsx0v*LaAoS6F}pMiJCljvO3V{j^Q z_P?wDaxchd+zXxyAEN)pUKG9$i4Q~YMMWqcK>UvEy6)(WDg}c0 zWsd;)7rair;JNT2elNNa^PAjb^lI=rx;^L1J?4Drqw`$yI?u)5=XK5OdwuVMf8j~; zI_Fz(x$RvOt8}&Rl{&=v@?7T0*q4d^iylQj<6huXAV2(_Jmt=9_X1Vl_`WpXF`xfs z;Iu~hX{!H*4#Pdh7erpy#q0P>$m{I)+|$29m4(lv1m7{I&x?-8dn2ELCwoWCmC<%Z z--qqCXhy#kNs|5$It=|5`WNtd`XcZacmTQvJOCYrd%-=%&&8f}cnkOp-oicRx$K8w zE)+i^{yx3~LVlnx0`F#yl|2pA3-SZ=-Qab;bH#J%OOt=87sLa)GJa)r4bB0b)-h$7 z@q+##(s#h!5w9O-NPCCxIlZ6cO7wMfE^rim9QY=CS@8w&d+}?qH=pm8pg+)OAU|-v z+_%+k{!<^@MgOb+hToGtBkY~Q7sT%cM{&No^9-DW?mkl6z!Rq;HaaR?LS6@_^1V=W zaPSU(aLxgpfc=x~KLc083(05X9quuG={CXX6@ph7Y0O`s!+=wZSGyH;rwB)&r;;D= zL8B9({~oGxdD^U=1{yj6-$P*^bYs1&hL1NyE59dt6!(q%(7@{R`e(se5+4(~5&R9j z!|&xgc;sJO`>q!r7ZeI~E_fFAf_Z7`G5Hs~Lm!>*(w08_yVm&n-_>_)&;Gu1vO&Vl|Ad-lK&{9b%P z9-N~lIkNA7Ne=F>F4D${(%oWUapc8q_(0&B=v?F-d`#%S z_?4NfgBQX#!BKsF4&53*%UDAvfN!$jb5(J-6?Oi~QvONe3||m)YJA70bxkdb*^m&=M(3isg~WWVy)@E1LE7G|jLD)ZimU(Of&fWNQijWc?W^^?{2H>vaVhuD)tJ*K|t z_Q}vS&|yYAU%Me_P?99?kSF=xjPC%m=dGqis_zF-=gB+pEZw?EJ%$f~cla($lNLj| zTrf;ge3Sb|zlD1N{smX#Uxy!akMV2JC!@~`k44vjU!W6!cbH3}UeL#3-^%?d=c~56 zldSe0B4MdtiCr6AG+9YQ*76LDXLzuzmGb^?7kCOSJZ>Lb-g#~WPljz(dPEd=oGcT54=u( zz)uUGCmzt{sK@B3?3K{XlY9?__+{<@KEysT?lJSc@MHKUI>B5kTVszkMSkBu-(lkW z7vO5*j68|In*Al2!56<*EMutQ6B}-mHv81YY&AE-d!ug(?t=&5W1>&SdxLk=FRVLt zgnv#0Jty>3`kU}r&f#Lr1Wyr{rERZ~FP~@r#|GrK){4yf^X= zd6M`g&iIZ3=Rm#SdyCX#zAJ}+N0-mzgQi}9clh2o@zByT&u5@%rrI+?zYzS(y{Iw! z@-&~D1%iGoelGfY!~=Me@1Zb{18)KUQjfut^b1!%n%W|6O}4;C${tzzh3t944?|ve zxEVeB(4b;U=i)nq=yE(4zKL!`{L&|5o{WAW-_;=RuwNPeMx0TP$?NRj0Y@FOK7O$C zi(nNG=zZ*CnlvcE!R|}G`W_Ycm^}^n@Hhwf5Z?(xPh~ENz6kXVyu)ps@MZ}J^S z@)Y9v%x`r*87z;(G@9Y@Sf$ zkKHlPs9YB}$?s+U7`%>8q48q7CyyI`Q+x=^-WX4&VoPEZ?Kl3#yaip3dW^4v{u}#C@Snlo z$PehL@HhHS^t*X3eou5|>U^mO`5RAejaGes{C&g&zAO6v(lfN%;S(B_&((FznV>} z-84_~-5u&CesFjIxSD&x@1^fVe!!Qhy)Si5PR*0(58yuXI_JQ9|3&2U%+Vc+ldht-qGc6!dY(58zbtI=Tk>Bs%RS`wEo@ zyOh`N<(~g|&?j3I#J|1P{-TeX$BA5CW2i+?mV^gTH`xz!$`xbmMPEyU*Y$z7c2iyOrKI>h9eGeQea89PS1C ziVa<}uX%4S(u;PSGhX!mx0<7(e*yjlpP?J!+a~{lci6YWe$QId9-Q{6nyU7|fm3;J zoG*Pce3@b6Ex)#{n=g1S{ds(^+po4Qrfn`%`Y(Qa@ELK2pUbBAJYV~1xe{JTKZ|qV zUa%h=zKJeJ|AIaPyqkCc|B@%+S@<#w`qcZi?nAEnegL{BzBckN=RjVEzri<&U+S@{ zn}5FNL;sC_7Uw{mktdlG*8NT<-%VxC9UnjaSo|f_A@VPIhw~-QxX1KGcrH3EJeE3- zZx9~{d#2HqsmIh!zFVU2aHY)G-J$=|*Xd`GClhv<&7H8km`0z0eN5CL@B{sB_TF;e zzMRgU;fhILwr|xe+id&Ek}ncpKj03ZCcl~ub{y%xX0+Jy1Eg4GW;dX`_mud z`+D4C&X+om&IOMJM^#um{KPP?3^j+n(R}NQ`&0Af+E*_s8ifZaUl6`4{Hgq2;+J|s zUZ>wpoH6gucbMptQRg`a{150F+_wP74tt|+CyTy+rJ5LA%@R)429+?+36towd>@Co zQ1n#tI=bhwX)19NHtL6uB0iuN+%wKSaHt4l$pFFCN{KK05stbOPr6=^ulC`QA8v5$+p$ z6h2b$8GTdkF}RO@H@qACz?|jEou?9yZ7z_$360vi>`GDdPT&&iCijB(MqcN;XYfMi z)QAW6iJ{w5FNj~>8*#=QD|sE=2wq5DM>nEw!UMp4)J^s@aK7vjpl(v<(fe+vpR`%n zCr{N){87x^qSJzZ$?MGH5D&x|x(0oJbVPIw`~>uIIA8k5oG;JC|I6Nd{C&Fq5_k*z z7~TyJ*d684aJ@-^z+cUC`91@E5q>Xqi1&v6%N`=|8Tf(kxf8$m*P9Ht-F$SRm)bKz z-XVU;XVf?H4)H*pr{B%q4)`0o5%W0kZhR}~eemvFv-z$bMZO1k3;j3xS@<9DpP_S+ zC#my|Lo-_yc$d(~JK!_!8+?9v!ihKa1LNhgB}ez1SP`!F>4Hn(-NXa$jpyq3n0mc! zZ9@$`ivA|L^48jCC*B*Mr1~PvN#Kv7ujf9vfAFP26*T75_`T>+%%Lne_~m%ARj~Xx z{-<5NGCw5WRVMGCdxERE7x-S8TSSLp?u)s)-E;e|zEj6v>0H!J@ELWJJ^%1|li{xY z^p@l)pDujdG;|5>3Vcpr4_U_Tjr;D$G`=*=ExNqc#C)v@f@>?cIzMn;$4LfY# zchAsVrKh69z+0#n#4mV~-%DTmc-J}YA6!ck?4RTwlh?Ty+&A>T?p9+*b$e1;gI~Z4 z@m+ySjE0yEYPd30F7{pg-SYfoxT%3e)I=)IrI{A`@!|xY>lgA3-Ba$h&>qaV{`)Dc;LCzdE$&c zTJ&4M)$A*-R;uib-VgI7xP*Ga{t|Qocnkea{J+#8?lJYkcHxDJ>FeLAK019e@;c|M z>j%fDKt1OB((F@seK2Xjs?U*2vr@1}3cUJ3Xy^JL^-?lJuU?i)HS z{Q&Bw?m55Cv^jZ?!}TzrfcP*y&8qB)fb`{8Hc8(~D1+ z{slbXvR&Zi*WDtNuNZxOX=F`@>ie=qS-ZiZugt;(`-;J-;9qbFdK7sbJ(c`G9|t|k z=5yON?Mug~eha=U@;duS=`-+q`CcgRjk-yHe*4zs7elHCtNq~cLgI{iOdaBUi3fB7 z&KEv3>QicMpQW#re}{ZVpBElW|AIXEfBStW_yzfaI%M+nP*{z3#nks9>HBw^)@sQf zQ%?bJq0c~`MBgEPxfjGQIxYGnJ~8Ssc%AsAA3!{C--rivjR&(A@*hJes_|1;CrF?6v)5i9dsDt0lW^LWX=b?PCjGaAG`yO zLQjQfalZHocK}98z9J5JtjYJ4%D{~??>mnESIeI zxajI7y7diSNPm;PDBL&vakL)O44=?%^fnr zzFVaxsktQ17ybsHXYLC=&%O%k8+e`hfDt7phE1)Qs=gBsf8)J@*XdtSH_;K{o18Co zUQ6!sWKL45q(9F-b965F5Pe?sKJX5HF75?!wxx#E;GoN2l^>Ds6oacdU;OLL&li4Q zHL$MzEy4NXf8hHJ_}B42P&bJ)@($nCK<`6WMh8cqMDJs6hCPPhRPGz`i(W#U>FNZ` zp}^;jOx%uC@kv#<1b%_97`=qNgWk8=Xs-O_g%`r#@ZstDNXchBm;8Wlg?)JN zH}%-LvWCYd^joivk%2&a2d2i%1{GRYEa4NhIpQFVF^DAXur3v(P-~X?+JMs3heBXy}n-dvQ zRHR8oG|*s3_nTzMCv%3#EK-rVQj}7pM1)FcBqURzg=WW}zZSRerwSHc= zwcf4o`aaM4JkLLHuWMcRb)DyN9OrSKSB|x7H%(g_!cly9?uGf)bfccfm#@t?`%ILN z`OmH`ex~RBE6xW$@A-Ogxu(exUWZflGl&oHgS@#ZPpOKLHY~086U7s=#Kd*?Lf?URZ1_OjH~-Cd_4)9hZ7FskYk{ve1#}~E z$NWkCru061-goI5?i*a~zRAbpI^V1QSbbD@Vf;S8QTz}5M)7{n;mdP=Tx-#((0ndF zJUCUoCGY&_?rro!=4eiXxFb%Qn@`u!m(RzfPr`qbFT_c>T3?;G&aWZ3w`+@*Dr8ys2)g@8HeEZ2OnwKNy;SrtVLl)URi~^r)}5F1%&c@U-Y%#KB{~Yh80g z_L%&}2l98-H($DX+U9@7N&ij#O&*dj;4}I!Ubx`>@S6|oB!?f}GqQ2%KV}8$vCgmm z#!uipxEF(ymYga+_peaDh0KDQCq6HpnbYe$ zsL!jL;`8b69&I!^=~M_u@sav({2Imnvu|sUe2HO;(QOOWZJ!qUZ{BlPzmVVP)o?0Y zLN5^~k4!w%z22cyfq7!|2YyewoPFq#`{l`V9yk&8Vavzjy7+)^ddEin*gbYH=p}gU zv(J?8)~VNRG50uLhoSEj7(XC!L*>MXzrlU+{$KdlKJYe=M`~Qhz0U##iiKn2#u~+XudFzAN~F zKQ-ruTtEN%$dPbM&u5x^k~1~pL+}nAhCV63op@&Zi~XlK{>Cj_|w_2!Ra>DUf;7K)Yt64;Vt?v)Q9wo;BRyS{wRG(>I`s+ zxTBBCK5Qwz`=gIz8$ES}*^D{6=5LhyJ&D(B3=1-}JXwuQ^2cP?ns% z%7rU4LO4~vpqIewbVP9y{*AA<$n)^JJY;@}xPu?#Ep)D5%jaxVy-Ajsz8Ukm;1Ydb z*X~|?(^W^0M{_&iRDL;e2TsLX)OqE3=K+tse%#nQJG{C%@*~Pa;w1bFulp_@;Jb8B z`EBm%r}I8jdU?pF;GD@r{BrU<{3{R17v^g?5BiVY3*W^z>B{nubEdwDx5#haorEXp zJMJ4i8U457?Wdi1JT;Uz^UW<#;ko9E1AcJvLI03>c=7XT?SuM-{f*}r^1S$<|M_22Zjc-N@smpdl@GWT5QoY9rx9pAMN z^lIP5hv;1V_WJ$lM))Ru(mudD<}^5G#i}egUH|PovE#qiIWYU5%rS8i-hm(FW9LCW zHgC|olj4qZrax=_q~>$iZ#fp}%KC)eW4ea(>t57)Z^YfVcFYvbz11&bzUSPcbs8_) zyzgK2dH>Bl7AJkTRIidfmfm_Xny28u;l~rF+}^8l*;9f4rp{}<`qab+`wQ>rm*#8J z&->b^eg4Y-+LgWoK67=`c-$u+;{ob=d`xh)`JQ~QbPe@+`)j>)BllRng-$@9{AG3P zk+UB>9?kEg!_c|#LU>)=@qU2&hHr{zw>`f4^q=+risnqieQ>J%<>PlR%(pkUgI-;@ zdgu8?YbV6`jo@E8m$;5+(UtjkD!wu?Z=sw14&h1lA#sw={F;WJxBIPcrWpO#KDckb zYrW0`y$}9{tIIuG_UeOKS4Z_XxKDhrzwjj7mp^-I&O!NCgnU>28-KO;#_9Ir4n8FQ z!cpl7N&CA^Pl%nIa_{!~*QZ5(81IdnL&X13woB%68y6>r?ghMq2dEF(2X!+0@c;Im zc>BP=PA8zln4b!-FK&19$Awd-g%72)iftRSEArW>Ggz;Epif#a9zd`5-XdQ`r>T!* z?ENe$st4f5aH{XZ)#}oG3iL_m%-k9Jq_}Rqc(;5JzrXG=T@H>ipJ3Lz&nKm?JQd*w zb)0ujUHke|j~@=i2fl5-Ap4;2i=RLr>-b0aZfG~_c;pL`=jj^p`U5`XUbx46ug|Sd znEv>yNx{U+b6NEd@zcVS_yrwC{N;a$_t|)N=kZas+6}3j8l4CIB6z@&acz$Eshux| zw}`*anf---&5?cZ_~Me;F3n@XhvMfDIS=xid#uk={1qqZ1m0bzd&xVjU( z&Nrx@W&T6tC&>Ep_LJ8x3+UD2ue>S#(xd7(F4^Snk4}Wv%V!S%w)=YJRX4tw65{jX z8DD(%oWJGH)H-j>J+{Aa)X~fHckC~IG5*okk8OPReB|#F&)f_67mk8c#dW%dzB+wh z)z``bM$$&^_l>F;Qxhp_?W~=I=KHvhw)vyXS`3r zeA;p4%QU;K@yUo^I1liSIO$&42mehxqjNcDyFRS)%G+s4VSJqoUgvw&Z>o%sZod93_kvD<58(lHdwd99H^;EtnB=!VZg3{_-i-L`Uce8| zgZXFTy8Fie%Xg(8$6WV#-<7x?e;)8cc#^(@zp1z2^X{AdRhNc$oHKL3_$T31=U3gy zTzB~b-?U!&O}?P_`K~_q|EKeR#WUwYJ(h2!&~ush-T8B_nDa~j9bfp?v%8Z2iQdUn z7l9x6mFXIET5~wz9eyspLH?5X{Z;QaH&p!P=W;L1A=20Ud9TDm9llHq`ONvm`aIpU zN!{w#$Kv%q@1fuU@>{Wf>wnmqcrxHyc`(<$X6-VkTzU`iK$iE%U*bc38okvt$GJe= zU;L%-sAti+{#&1zx~ct@htv!4Sh&R81MisQvEna(wex@%(!oc4({lMwFJ_LZqvLP% zb#*=YLfs$Uq08yV@xBwCR{T}>r&pU#;Jf;;)p5j0aVPIPYhrmm$QJ9~ujHhflrKe8As+om#g{uXS4^e~G;5 zUZ|U@zwt4BQ>nmT$MdfX|M|W^`Db2A48$}3sQ)Y(+NpD!<553D%jf@`p7MRpn0)NN zz0`E+W6zhk5cR3SJMxh8>-_+{#rdUI+Xp@cagsh+a!scDUY~I~P?vU(=^E}EKbJTe z|6RTc`|CXTZ}ykJPd_i+9zKJA#Yw!yebb+%u7?+XU;Tsrbvo`2*H2qn`1+>%qIxWU zpL+qH@jpCwGI-&wI}&2>y7)kcxq9Za^S4e;5BbmF9sJlnz&rFPx|}(C?%Tg~#KFI1 z3_mb8p#RGA@D3h;AIn4Hx_n`O&0k;MY~cRN^Y%w|Q@XPF>mJK*;&1=AQeR1Ez9Z_7 zg&**4bCCH<#0Pm(zaBiPZ~WWS6Z?JC{?EWX1^T4966V0sqwqrYEcn-d!#DMP(P4H~ z%6xzIf)|5)W8N*7`QKJ$-=Y`o!1K@ox7*9n$;9&MzLTpMhT^DWTZ<>gP8E8(z+R zPn}Xn0_Thl2R`kgYLbLgq^oO zc(`xp-2vZ<{vrJgZ@)fqNWogUW6qiVHIKiAB=-;)@4_kli%XYmv0H>D%WL(UmL zB7Mi)arjq1dgmV+w)~{u(Fm`Llj6Ggz_(48<0pWloHO~&JKKDN;*Ry=LvXcsw*RO1 zA%lb;XXPxsKaeln3-JL?^;=WO_*=B7|w=i_%C z@FDe0JV5^&{^tGTIfSXla|IME&Z{qWtKe&Hk z#}Vg4x{>%RPMWXbzZGf!-1Zu|&jfG@{2;E|Uw!%XGrW-h!JM*r)B9alVoPe|-{Fg= zmsl?zAYX`Q@R>aH+r~5N_TREPYMVh{_g5iMpn2ZWp6OANYcSdRk{WG-0d&iH>Gs%rBCe5 z8k2{_b-HK0%kR0<pbwo=tI%Z z;2kFTj1FV2JD#OKOMIZ0xEFjj-9Ks6@ur6sN9PPbw!h+z{DxoH2X!a+LS0WkgZb<3 z1zk>@#9R27yiewwski8>Ym~O@$oct`!ua#RZzS$S^}@^ZnCP_ph~m2VTjtKMZWxnr z<^68+qwrXLmT(EY6P>e$&pdtKjswx$TY5?9FTePDWTT|eKDcjq0DaOvz$N~hekXjs zWz`NV8-IEfCvS^< z+v1LQ2GhT8U*^m5A4m8MesCV>ljc0>zp%gFHIj$yZ|=Fxp1AnYhUmL|3i3QX%J~(4 z>A&!UcWmh3=C7-p;`7$)oaqy$Px^24bvig-vG>&AGdc{Pn0W9@D_7&_!0A^r`(o7JEk5w!H$R>4 zi_gX#4l2I*Q03N7oQmofaEbFPZ`xnF1|Cqndus99=jVuY+Kd|u+&1T{gWzAxv&KG4_URDC{pfO_Fu zecRWlF!Fd%=#j(obEhN)=5WF#a38${u6B=eeo}I1yIDJ<_axxoB5VFhuQe$n@D7vu z1^*fRYk&D(;Z%LB&M#i5Uqs$?9>g6uO5A~e>A&~|KCd3@-G}=cwQjyL@42YY9e$vn z*uA_@;c}JUGAJ@4oe(jqARx ze=_QSlP}<%GhwE3`}Xe&==S27^I(7VNyzi`9k_4JM}HqkYmp^}UpNoyB7EEGdK+3j zF!A>mdjmYn{!ZIlrrOS2XQH`2;x9g={zi|IH{~Jz2R?J()px+BAP=broSyk(pDCHo zhw(lT?;Tvf@uQNZN9Byczxr{+b$v&cSG!a6?`t9jeIiIHBd{^lOboA4xGQ0{r< zyXN{kA(}7ezTwB>I$r3z{FCC2`zF7+7yRJXtM0#U!sKAs>nEalAo3A%Cf zzS=AG{XJ2?w0E}Y)h8b>U-^R$o1^{!`|DoB#|Jv?XXUddd|qZ|^!^)M;@#>OmQOkP zbjekbZUjfk7x)k!Yo4Qhz%R@T!VBp;c$T=1U#NHEvG^wZz_&v8RNqv8li&Cp#WOqr zet>t>hw$SXt6!R*b8K2DA3u5F`o(R-HWIl{m6UpfJvB_H!Y&vd1x3^!BtsmTfBv99*M~M&en?4_S zQl5u*%qM_X72(r>ExiDm-OxsGn@oi_LDG@pr-kdMZ97PReiaFI^*^5Ah8iyZ+@j z#*SJQ`mTOR_d*_mqr`_b9d;dAx?^{QC*2FWhI}kOh-dQKio3`Bb4%A;(cFCcI=y;! zsSn0s-H@w^VrEB2ba4KESd7w|qoA{7; zMwinkp+ELSyV~oH^;#bAgSTt3@U_ghCq(Zkz-M%O@qsT$-5-9q>8jFy>_z;?g*Fun*D`uHXo1fv3LeY(ZR(Xb58N&UiGTI*(6h9 z^iCVza-nPa&)%P%5$Sz=uka)s1y91&_@?vgoSBPKdq-S-hM7E$h7k3mkS(QAHB08{=(Jpx;QDWzg_wB?GH>#isp>)fgD?0<@&b{ z91iuP^RLUB;v}65{)Hd-R^)j)t$vYN?;dRSV(;D2KBymy5AFqi;aE*Kv=8vQ^};*q zE%HUd3%_>BmwqwmGc2J@*%wlS<^8IS{BG6$fG&q`@`2PUJ!ZuA&gTPq6kjG>!e2s< z^8S4KLu;?DwL3BTE`H2c0Z)p*&Tp;5JqK2NWk!e}I}heh^6BEo+1}rL_2P5cqW5Fr zb$LkK`EUF6^r?OJz{RsLCVfZTq3_6V;)6Pa{!M*q^qob2f7Sc?_x1(8OIOAN z)Vq(4nNn)yfs28-POsL_ONViO&23KJm48g}8tbF?Ea^MqBphYEcorY2e4*ZL?t%9q zy4>xPvqV%NDKG4tXgS-hp;4OHpdyL1rZ{`h(zv>L;k>2sr^Bb}h zNr`+m{J(f1oXTh8{K9AAj`gZ{^Apes+&A$-A2$Bx9?KW>2R{B-(Tqmru0J37NZmJi z)BfVI>aqMg>Nx)sZ`t(1$x$6WEoi%&D@73{OP@(1UpPzU= zb4-0x+)>|jkLk4VuX};d>lZoEWb1Xc4_}(Yb9p`iU724)JQIKIgL(@-#BU@%@J~9w z@(_F`zimh<&^~LGTruZtYK4PCvhExoz`E_6I%}9vhZ~6fJ zH|w>(=TZ(1ojIm|NI&!6oU^+}-*bHQH9!5!kLVu9?*qQ6f5>^jht#vw8T5^d5BQsX zP%o4(-Wrg-O2dl3h2jqU3s2JJ==N}w`IFWw{#vj5=3ZE@`jCEU{pjK^9maZn7cYdP z=!kr6JAP@jeO>E}ApQ1kGYfCNGQW?X;IFrr-Mh1ILX5sc@3RkZ3B04fknf5wNS(|+ zm>1+cz^VAf>P6lE8J(UpW`EVC^{H*U_~o$)y^aR@zW6fvjpFYc{7XNhE4y#rQ>*+_ z>HS5fT+s>aFI)|;n``+-oxa!3`n^D``n1YR9{M^*j1D7C>W6&&+b0v}X1{{Zte3u{ zpFzIB1Nd}(*Ln+eobvAQ9?60D>zvUw;7M`)&`s+H?|JuB;2z`i=FafNtE2Oo!%^mp zh-b4G&MLg_o-?8IfX_en{+NA*y6lSjg!w?^&5E0f&i(N9Gtt~Sc{4sf;PduD{8fjX zG$eL$%Hgx2cQyD{%u^88@htlw?l@=h`^y((?g9P5Il}|gMVvGKN%7Zu`E;FM^R9k8 zaIklaBj-YXT5;07pg-`hix1{#;aSevi8~rB__p&m!O(g|U!KtSbaXH1J9H!U7WyO| zvHaO~&sJ%h68Sagw6&+_Zro>1=GdOTBR(Bo`&7WkMAr}}?JwLHtv6GS87l_t4E6it z^WBPddTslYDIq^1{a2o+PvWtS*L1k4RJF7~JhR@3uirfT$JCo*c=z9VyELBt_N94) zPkoT3{m|5?Ug#dfQSLGPU?2Y5zQw6Owt0Ez(j$TT1)Lg>&-8WZkHxd}g%-Z>&G+N> zX9V(__=|7Ko141)(tpK;Ofh~>=hrz~*`j>QcW&4h&7X80=x5?De$01ez2XCWR{im5 z6|-GD5sEwB9}*wbFW@M7$b3ZnLOwRP*_=o`K-~!+vJcJ~-JUN<9x_+hc`!%om(#_n z7pZi4I^40!piq>}B@19cJgO*)J^8D2;)Q741X z@J+niJkr}cRXA1mxmezV5#98>#b}pNhx27v9-EJT;iP`t~g$ zeFv`gUG-Q#o9mXXZCiTIu4wKp9frQ+9w(QaHQ~g9gcu(*9g$9JAMkm7*v?s@qF*iT z@OJhXU4y<3S6i?6z^@U6qk7ldcj=x~EOTeZ)U=?_m`Oh@e)C8`=MsPAP4~iobKmec zbrJi!_l9Oe+vhwQs%Oc^c(?Nl_lak8W%tcKz?1y6<_PG!rLW7!?uEVs=l8QhU;kPq z?RWr3xyR=8&ik<9m)9-&BkEIg&hSEgDDJWRMqkH|ore?o?mT?e&1r#m3C!2PH?5bh zL0`u=-Q!+;+nq0a>AfI2SO1>n-z}IUIV@8yq1&oUegpcMxZ@t%hxmRndWm@EzsW;+ zdsmwD`767k_v+v%_sxHE&eYM(^}*lZb#sU!Kf%4DAHLzu4FUcpAFCJ2^Y9MeigQNa zIX|)MfKqpz2>LEbxuxjYv`F`)m&Es9$V1|e{w((bF5!o%^!(XViLd?{^Vc5o$b5}>eoRmG-~QWois3W3gnq_PP-en=m0C5v zDkjg%Z-@50H@WesEAvbIH}xU=3qQDT-V4Py)i=$TThjQ<%EtYtMLG=J7oX?hb@#$N z^T|CcJn;Ne838=0@5|hA_gEehf8huGf-Xn@70>V?{u1>ed{Z8ZU$6R`{cTnD{vtio zHb?#vdER?7^wjwE%A5Gm;XfAKaAWLPpijbj&B2i0_}b{z-Y?{T5YL^r>Zb8^fBRr=DBK7Csyo?Vx}18J`jC5J zKBDSKlee^)Sn0f%-Ex*BMzvmj&nb0!bToALE-`OW#|f9O?W?Y!3>N(`JcIstwm552OZ z(~4_yqy*+0i97s8^%8cpdaCU4ze>C?%To-@g)XLSbN{Jr|=mB}M=CP(*zPnQnv9VT&7T!&K! z=RdG({P2trF0sGjddj{6M^kGgh5GgAw4I0KEn4dStPAwF=yz(FKI7$919QaWA^r!v z#rdTx!+r3Xys1ByUs;^=UHfaj^07LD{AMo7#B4u&QnB@!Xx>#kj#4kwmjq8bzwVp- z#!rC9x)FwU<&6n-wc=gM1FJ>9avnLvL) zuW!$<>v8BzKu_f-SoX!;g&r!I7WJ`KeEgwDe;k=3CU4Rwod>*|uNbb@pQWx>Fe4n4 ze`QiM2aew79>>RDI2E3hkNGF{57B@5#KcK;GQO3z8Si`?|M(oy)f^}J2v7v+~>dHoANOn zrSG=h<^EXTee|d0yZh}*j`TkGmp|2f4fyQg0n={VGje=DUx#<-lXy2@n?5!3AJj$M z3w{E59^Ucp8K0Pby#wtZYkML{54<;S9zQ9J5;PinCJ+>1*S1Ls$r!TewR zP24HF`=^`h)=vrDiLj7YaP_lquyP&=G4AvSrvl%_3+3Ylk7TZ&DQi-|XD+V$ZesepoeQgM zj^=joPpW6Zzs@hd$)_vNo4d#tf6eIH*KFUqE}G}~KjjO0iF;um%zIUT?-2^%DcXz0}uyd2WaGs^hqC=H|=CT_)B2^GuFx zG5$02@V3;yv*V4Wk^}Qrc3jratd|eOIWtcz-X}Jx_Lt4>o_98|56+o6MEa=I{pBHi z2#%8H)i2;a_<>(}+L9)drtCNv_2cjryT{h+yYf67Mb{8_ytDn?_=a^q{qB5dz7e0f z`zAincbs2ya`=_;WAioeEIvo^fuBI0kN>W7#(&0NqR#u=s@+}hX`Zk^T&MSWuNnT; zFGAnBVe==0UT%;-#;0JtaNko`|MqqHY}sS{a=wcfx^H-vct$_7UU^8p`>{-&N*zer zAM!ED7jzAMGw@`*k5pY6zo^=3W{1z`pT4A5|6krB?#!(*r*VmAu8w%Dd_gC09^hZ~ z0P8LEWk$oMWln|WhT@z48@}nB(P4boyQ%8O?lE6bv6as!o*8l?P-mbM(4*ve`UBnb z$#J%UnqpBP*X|KeHtd`iE2%eV_g&xcz+ z-_tMUm()<)p=;0y_;=tE^(^>I9o-xZILf_n&csRQ;n1G%?p%CVwwQhqc%45Ak6rfA zgp%KvJ{{_-<8#Dg`IViACU@2N`2OU-LitU65ZC!$=T*-+qU410;gqaPm#-YXJ@lTi zyb1rRzsYa%vG39~#54C;-vPbEKDZaonS87c`E8rj=53dq59s#meqAu5Q@)g7@WC~e zI^C5Ly`QDd8_zez9eNa8{pkAcM-Em!9K6slXW% zka~-E&&EvsV#de!?TY4{(rM)jcwKz(-}XJ&xy7anCj)uYdoz6e^eA~#oxyo<-}p=D zzy6y%(Gtq;hlFE%$WN1!l=%R2k@;p zzw)vArhH+pn(r=2I(b920U2Sl-*ShQk1veo%h5I9R5}bCCBL1%>@Tqo^fU9~E7Y6w z=(5}y5kICY^9AAE@Va=W9_wC+J9O~i?+QhN)+eHTVa|y4dM5}zbKk^uI+s45PCfc3 z|Ni3HkPpPYfJ^WTI2E6lH+wfMSNq*}vPAv7>KCV)Up4RJr?&CU3EQi-93h*#4~=bO1GXnH|oOK2%q_``2_X>J`>O6A@Pj=H_Dp> z4!6$p+sdeqivNsHjQ*g0Y<`J)Y`z9R|DLCEQlO5`r|`rzSu(dRm>B%i=!RE|UGn+R z5yc(n7kz>~-#b!=;tx=`>lLK{sJd2J<2dAg<8;LvS5aDmnw(fN9c$36vo+G`49tHP_4|IEc z$a$a>;G51_>yF=SxVdpkG}qnvty_IwmSXdc2eT@UumAP6*%si({6_7UcKG_q?5Cr? zapw&0u5@efwmByr437=&e&M-pxnlC0{$p|7y`a;k%)W0*#*a%wz99QRpA2i=T6S%d z#SxBD_or*X)pP>iHFr@wvk&~$=3UW$@mS{yO%2n4dp2s*~|o=W4Owsr@Ja4(_Tm z_{|Y7UE%Zgm%l{boH!=?htpdnhxoC3;l9Du{#&k@=lg!z|B6p+PunL(zJ4HU%stk( z2v=uK-t+Lp-lszI&%{Z*<%7wcQxf}k3!3libNEp6=>dM>UZ`K_-^Ay|-`Q&)nm8b~ zE~+!Azp1zAH@&mwYx#Not#wF9@Hv4}3Os7`mrAuR1y$rGHbLjK@)QW#_^C zK03^hy$OdV4qp@b@WeAbz<-1L;M4&H@6WWU!=?OoSwF*%`C<5i^kJ(X!+qu(#q;hT z{#=}W#|OtlIv3p@FEsC}#czN0{AlKdFkVNr5BM8ADbL$q_u}IX4>s&t|7dV`o#9*8 z{jn&*XLt)DOda1=e2&IM0)Z`Hr(Td$lB;&nNBlg>3~=cb#xbv+XU~_5uF=DXhHyonr?B zJ~8}lblRbEk5xP$^sM_{gPsd^NAD=8Ck`99hh?960oxpvod-;7{b##61cja64V9pg8 zp*n*+L@&`NJhWt)y=%U`5V#i+Z)r8E{lc4fhWa|<<1fEP*NkB=epdFXm^w~;K6a1I zN1T1`Q1A1lPK5eG&0jCJe|G+D3$DE1LZ39ZPCu`F?7q?0;T>~0owGq5s=YF`$ku<& z0}&tS1imZ&(%12B`Ph9k2P0megiE|J(fA<{Q!NIaJ4>^O(%*!uy|ln z^e$~YkA)xLU$_L0vcLQq_7`svC&h<&-tE4@QTm$7y^Z|-H@;sdYFXS`l=c1RuXX-8DufDp4^%90IotGGX({$8_Z@qD8 zepQcJ1#&;WCBOrmU*`dT^Ujrf?7qEweW??*vaO2hZ~8CXH@F1v#)r&-%e*~zuNzJs z4&Vp;t`s$S8%AwYmYqlR2cO;`ES2>7?*i@nXUhtGc8}32Li8qSHt{1^+Nh2 zTp~`=cf>P#H6BZcp+EF3)4%8R7Ux6nPI`AozQ9}PeRO;8S&BRLF4vEp2X&|Fb*hve z_1pHyX9GXr7ve8ph4sSK>SX%6>s9D8;`@RZA{~+6p6Rz zSw9&aMn1+b^r7JM5iZGug$y4@%aL-rdNN_EohhiSb8A-^6BES^i;k! z?`QE1;+y&l#Yy^^d|}QWU#2=Few=qr)w0>^CP(_2dm%o+QSKYvKElo?_# z=TC(v<8?0jufBZuV&{VU$9BlOKdMW6A4fbJUbD}X%5Pe9L?JnAKVLZQhe~={@ZuCoHINC zjv91u@oldcyAb)S`Iyv?mmX@@;=;?P|J9GJ0q9KQ5g7?4WzM zM*0K%>-@si{GLsUX5AB3OAG1i=JC@B;7Pu1_!l37cV<={zpzN&t7CjY-lO8vr6ckM z$;a33t}@`&5$6MQIOR9-%zs8{S^T}AgL^-gzT><8 z8@@SlP|j;B{&Y5MaJo&k*Y~W5=IN>*%R}*TN50tk>N8LO*kxeA-v>u|ulco4&z0?e z`>~MjNl$(JvM-2!rY@qtrS-z^+hjd=F@Vq9H*<3MJ^2afeef9`;GFT7n8&0pJ*4Hm zjr(2V9Vx%&=~6r+HL8oy*ZBsW2l$Mx3_na6Kl{zlZvHod@d`*Z=?W zPtrZbb#*6v(|jZGLEVY&iMPbB*E?eF1s=lJt4RQ?@&)BIn2UVfwh%A4?+?>_bP*QfgyTOE{o=wPW*&*YEk zpLdV{xA&5Y9-DJ#)STPG^i=2fwht=3xqeb&p#P1pB3qli z4Nm0E6VvZxj<@%k=`iwfJWdtQ#0ULv?nS-s%iDBWmlpMX(T(EsJRF5z$Zrb=Z@u%i z?wdmXD1Gkyp7Wpk{gE}7`nSYMyzr|oH!iO^{7mHkh0nxweowehelrJIzaAYN-hrd& z1bpUGJFHkS;I)hpFXR)0Q%}_FHGB28luJJI|2yYj{X(ASyMp`pOVqm`n4e|XZHLwb zGu!mJ{e>>(k#4Ul=#2X!41~KtBV2lzSnri@$W*Vs(~2k*RA&;N3p; zWBrgVHk|5LB2!X?AM7vQa&Akfg7vS;61{8h{L+o&H+ps6l!B`(HB1V(mZ zZ`8XqkF;v3y`>-C6W{^z5MPBj$#0}@Q68dC^67e~tzd_ismC{EisnqiXFJbyPJ3!X zmgqf8@6zIL;)8uK-}BdXLtpJwF+I{t%r~-L=Zp@c-r^lGxDT%8e<)tF#D{rjZj5vz zct>3X{U^uXEp!a z`%i=9OZBhIcv9bte5|g=zb^j5seDZ8=yw-QUGwg-^x&IA@2#Hqds1*$rAfoqQ}rzPl@+d)i;Ra@D7b&!5(=e)P_jdu)H*V|f#A;YU<2q*v>k zp?mU~i)Zkp^9!fi2k*Dw^Wr3blz66JMBb#A$QSA&@R>P#d?5N;^ikz)-~87tg%cw` zBHn_}i@*E?-u;y~%?lED_+IgDahogr!DFFz??vxAzlZE zA8O2KyS{zhoKasAe5SsMAM*|3-E7Mw!@A4a|7vhEdzmsEs|Ml?uXTq8z4>T$>?`(+2s*8w|;)8r~%}1+-|M+2! zXg(MGE3WIa)TicNm>+dyT$^KkYUhj5xoUN3kkT~$Lg2sAmFZE*T}qA_v-nWxyWVGj z`|uX=L0#H<`)pfPcJ{G#0p6m|@?4Hnm8X{766l9CKk7h_!cRTDBQ=C4@p<3%p1VF2 z@r;jj)Qjoo`v1N>@E)Lh;l7Cv>NtG5_=R}Jx1v5|o{jmQa5epm4lb^XJMJ-k67IvZ z%qd&+L&XwVYFyE)&3m<8`7NFYs2__D<^`FHqHmm!-#zx<#2x#K7piB8lkx>!`MD;; z7S=6)KJq!*2lG9B7Y~4|=`crDulVVUa$}=9j`yAPVZ)Pr#o_}VE5Ff|#j~AH&VMr4 zpD%_7=%X@E0qzsm>B{mZ9)Q0MOY8ka@x&`WXnt^YhI&1!JTth)*+@?ncjRMnM?7;c zqWLg+>NafFdt2mxfUBF#{%-6S1NKM$U+a~RowIBQ`qwKn>r%hw`if`I6-W*FR`?0{ zcf|DyZ9lzd@QcTydAfK2Tw)*mH#n-~noRe-KI3%s4oIo-&92{;lru_lM*m_l+M$o~Qr9 zzXiTL^x{Wt(*tpxU#|83mn!BtaU{@>1Ft*3^auT9@H&1`Z&igyH(h@+R5#^YY4iJ! z?@!!zB1Pnniofc|KNTCE?ThO(N4P{kgF4yHsSW;F z-26Dik%!!4^$YhHUWco_Q{kQ0^VS*l zPZl~A>1X0Pe!)-RJjieI5IssfQ=dPXK5M``hn7bD-RjcTi@)(3S+D*Ezxi*zE58l={CKC`C-;Xh zO`1@k+v@G%nsy)UysA%@m^|;C!4L8JBwXU0iR=0=^yA1EzUyAVzxt@)NqV(9um9$~ zq~AV#JiS5DEz$c7{5yQQ?y>V=AD++rXPcY_GR5F(a{<+}zFA+T*Bif{jQVEyD#S_q z;QZqA;$%F3Q_o7CS7-j01NTI6M?Quhn$9DL;3@K(!7=I#g+{(xbbx8eQ9x0eID6kkcf4tlF;o8@c2x|1;5Xu@NQ{?^Iq z;fW%DUBCbAmdH=*UUW&i^{1PMo(|}JbQt`iTCE44Z`=HI;2xU`=v}UlyViQ-vn3~j z!CzGv|MP{50bN;qkT+j=dv))XD{{u<`Ho-px88;zRU@&nu0u`Q_fzfxcTlQgvQBfqf7s@r%XR=HL14{(Zrsie1_~(K9g= z&%ER4J$HO4^ZL!N*O-@Sfp^#C3wX!=@`1?n&Y3vrzTwC4FI=LJN8%%| zdn~T2n?7G*X6N1S9}eh!@{m4D_#yIFU#@Qsn$Y3BDc$#kcp*J%ShbCb87ucibJER4 zp_jOC;_ueaKK=I7Avt5_2*B&+6UY}?a`q}0uFUw?ycP8ed0w5tJ$5hfSaU|`sm~v1 zcKf84vc$Z1AnxET;yOO>9>Y=YF`TNNMXxR}en8@e%8Ak3B)YOXIzFWD3!dcf+i|w) zzHJ|!3-}4l_w1dmR_-aU9*X9qTd#W&f8U%ldaC`UPx7^?Lwc8%eumGx7tR@6!v70L z>Bs5Z_wd6lFU@(E-^5@4&3E04c>TBJp^wwnF8?Ov8+3m4`S4Znt;EMO_yL~e*Z8F3 zh%oF3`m`km;ARrde#Yl&KG0(B977U#d0`VjxR{tNuVJ$|;`E3a0pem3f7fOkIb zx@>%|E*Bzy$y0w{NU48I&KTV1zo}n{zg?dh@nprx+X8t=-O2pw3X}7u~#3x=f2$~5@H`cJpP5e)hoP9}+^F7Z-`U765zmPA}cf}n%);`D=e55b7 zO`mmR!*d~>OPU!1-@7M?ZBI1sGp&v(lSa$1c4f1TC7>>SpbYah$Dbc(^ z`hz;8cPIJU)aT8W5O?%v(dBmcOl~yx&!Z8J(vPl=?p}z$^hvl+Kf1hWK3DG7#-8hT z;zER@@GRdIC-GS4%sf(cGT&V?cU-ME|F{@zd2&yyp`A8Hb4K*3sR!T}^0B!y`G%FA zRli_DOdgWoypJwU(h2M@e+gfZ|0X`rcaC1}8<&qSjC*2H_R-0a-~Pmy$9gASyb#3m zLj4!!!=xu9?e8`{A*NrCuMOTYH;JEC-PC&J3;9?)bI$aA!N1Ngy*e?^*j4X*l^VT= ztzKwu5?n%$^54v%A9kRBsi}jK1AItdsCVM^Nmwsk172^xv&JLk-n{ber1-#hMNidt zYmOHE;kC>ykM;fLOz6JRAJlQ+K6(@#20jZCew>xF@cw|VtRCxKF6*Tm!IR?I&AWPa zES2YvKs=*A;6vRyx1X@D^QC#2m*-kW?~S+1xGiPerT3;97b>4}@pvfi=qEFO$9)qY zp1S(;<3Eo{ig2}hx4JZ4*}V{d)w9eq=VOxJ)MNR~`P!TZybvyl=Wp~VI)U}Z`=HeW z=nwL|^J@;hJfA;%YR*CVS449#==SPlg@$(9k^R!VUhjH}51F>ze#=XDWs1QMaH{?x z=d8ijs)KWXbtr%*y`N?7B3~w)N~c{}Y|M)jdS+cv^4{DtGCg@9#0zgIxogJzbJv9Y za`YW|^5d4BYHV(QA<|*!8txnX;2!IjmXFRIACJqqr# z5B$~i^(}*YJ-qAim3d4pMpdbOZ^DH@oyEAJI4cRIkk@lAQtf8!gZ6W{^x&Z*l<(z%X{(46YP90R@sZ~$+I2P>sXym}TV?PYkhu~D_*MFmP*jL>`|?=y^g_r1hC z*}n++uH-lO4ZpyLI{(nHCbBw|C`DzZ72^@mRQ;{vZ!Izj({@FAh62qUVJG-f_;;D)$n`>uZzFy@|Kw;sOuJyovlRxM4e(=rIqMZC#b0wq;`=Y|YqWDorMZ{pz+IlRM<>7o z%(L-b>&4&jd3fFXBGJ67DSxL`nVlZZ6Z2j7O`bPj!+qO7uSK)&dveC$b^Tdy^!cLK zSIZAYeFyT8^Gkowr^Xj#y>JvB%a5p^%sgoESA7$nq}#{y7B~t%GheRZ7r7s4*zwBT zQ23W$nGOzD^BY;Ox^&{{u1BUGI2y=r3H_?y(EC(+^bRzgOTQ_dApRbEchY~8H{p^E z?FT%0GS{-uyF2)WeJJr&n;OroOo;jbrrXOhl2}8?{}Mz=v@u`jlOOl+za^3cQ<^t zG6e4i91MDiJmft1 zZ}>4kZHXa2cb`%uIn;NcpTT<_cz}DMeym;ypQS!grdE|wDZ$Ap_inF$eOdsQc-Ip? z%aU^8=UP9V4)jUrXMoR~2RO=o(|6$f>O0^g)z3?Rz_a)X;2q~oe86MzA$f>y#IH;@ zf*e9>ZXt0QLfm*)bjy7 z%0AFb^h>J;@Y9Mr2gVM0VAlDwp*~^zK-aLpba4DlAC*1{byM^0`6{N|J@ET_O-@99 zd+`i^bI#&*F7q;dm!Cjgy2iZ^EPJ}yrFrHLPQT&YcdJ5rwe{Lx^ZT3!yv3X+yahhf zR|i+?n^BLYBf?S6FMkyLYyP@=)|NYOc{}&yeW5x!T#XOWVdQ!DSY4X_09Tv;08i4R z=mhowUZ+36>-1`PN1yxRb~k@qIAvOhZ;I>I3#Z24W4dyu;~PGEzVwdJcj-p(@;|6=`Y!&aPKL*hEnIxTbr*L8?gc$nok9Ga z_U8Wl8}CT^SN!!JARgd-bh?J~OaC=TKwpP^3_mz$;sg9p{%pHvtF%pt=Bhcr`pMLX z^flAZoCiEh-Bexg{(j%=SX%2?RG)_*yt6&N@U3TeCI1u6U)R@s>&%DOPoKUiaL(xK z@&!JmUyrZNcMBfb@mZah_Jr~#9map#H{k92Q+_!YsBhB2@$Ro{{kd=OO)DebEx)a3 zG5gQtg{LFFiMPndcsE>4?=!D0evX!Rqs&iLFBE^(H^p`P0H@N8%so(Nn6&Ze{zaGO zBfnp5<{R7Fp8c0kjBdo=CvVC_^mTlbZUpz$C{lOp@bt7Re&rL>J6%`tSV&)Y9^5x^ z-Fu60pZf-{(~aa!xDS7`UU;&2_8&i=@Z41~b0y#%ywLs@J3Bhl7q=XU=2wend{^=X zzUdyTA3JCIlJw<&bH3-y$I~`Nec0{=T(Y6n0~3F5u{V(C=|=iGeAm5b-Kt8hRUga` z=k{u}tMs(g@QHT2_YQ87X+gw?F4y(oD7?ivGmqanqa)I3)i2}=c#;oK|AqaP=lKcf z)$|8*1U~=ve3vi2PYd~0=mh)(cz5G99d0UBEiK@~qwnZj#Jii`JoC`|)6RvZM<)zf z@W%0=WS@R--`Fch436^O?60^3SK}A*yna3WjsEN1bvmN>Aa9y4XHGBNr(aqf5{}|4 z7T4htagvUxZ$=)%WAPR|z2o=N&M?}aOKyw$tu>+UiC z4*aVx|GVr<_jJES7tgM|81N}{?^kd3jmr~a=O=a@Q0lG|!N@XO4>fIZD4Gi>KERXCFP*@D z(=RRVlx>~Sa9NM5V(K{XgSsC5%=xtsaEW{@?(pG7eCYCgBk$v=zu_&;ulk#LubV%( ze__WF=RFo;jKUQDtBY^ z-BI77{|2Ag2RZ?~GqF|2`&#e)DbiD&2X!a?v2eBdi2PjU*@)}nnLd>G^T1!?zQygZQjDg3n$JUKX2ibssHuw-6zgoG_~;G7tNSBe`?|1>$S9M+qzK=E35jAYOJa8 z|MaK$;zctD-kuOB8&)1UAldP`%xd4J&)Fh(h1rv%r%HbalXkT%eD|$j6%HVpdMGMECHNEHi%h5fy`0XDbCf&S_xUQUaLmu;de{Z2> zxNL1U^|6-gD^~`;3M@0yPk7W^IoLGdioBy|w6yiOOu2XT^0n=j{uJkXANjQGa-h`v z@pp|mb@ep8dv%kN*ME`KMhwp}o0lxRjq+G$Xz@$*uI^!PQa(%YyK}q@FAXbKL<`L` zjc-{zMRf1qQEz#fIN|5F=b&e`7sBiI+UI3tw)pyF`$)TipX8LWZqhY$ zdWyQ=-F^3cNDqyZOE2bmFOEu6_b~MS#zo#MljP{+fp+zb)1_b0&!7Enxy$>l!_N$u z6e*sj&5vkj8Y|vJ7CBpFd#oI1x3{L1wV$kTXz9rN?Ey|nq+Gj(8 zs6MpJ$z!LZ#r?j!45#kR6vwBQ_jhj+C3aLPsMX%|t2DD)F!}I_7#XwExzG266uEPC z-x*Gsfs*f5V(7`EK`0+Kk`Z-c96ejUDEcoFsjEbQn0p+fDG@3}?Qua;W!3dhPid zkv+~(o3yw0nP9IRv0d})TlD%5IlRT~Dp{92Wx$|?LFeOx#k3yVf10`lNxs|h&@-KS z7fX=kzPl?Fx9UN1=8+E*$LQUc*u$ewLrafp{9cYa-eU9e zBjNJcf@JUBD?{Xr%A@AIiu4tAm#3RlDif{l;n&0gSN%?Z75z(oh`Z72t2kb9WsN!; zyyVH8B8$$}PZJ$huPXPrR;D=LE#|T3D|f+n)4gBw_p4<;^A952zQs(df#Gt(pqn@M zblD`@`z(ry>>4fZZLXHlZbh*A-OrahhUWK86PjVwOZ5i)5-%6$PdNHANhS<^Uw!I> zw<6m8d%aH`!)2AY_cdpBel6#Fg=K&094WVr?J+uirmI|6r}3#~*0Iv?LCVikwgIwD zm3+gq@8e|u_`@%11jNbkHBG18`qq~>^FJ}^lKRO{E_=57*4^%T zqW|!Kq*(IOI(LVcFwq8mJJT;paKCLo zN*~)c-r~XMmRGiD-lD4C6q9o`eo148h5D24B+Athr_R-@8z7TreONko@FTfvOY)C! znJV9OS$C#N*F@zP+(T4VAN$a)Ny7iSpMIsRbotoa(X3!gjEvoux8h>7w^Aq!K zUAi~N&r=@1u+Pq8P=vTRDRazZk350DZCjLYXI>>nSgv}tVfywQ@v5@r*{16<1pYQR z_W8XRHsNCK&8q=tha{f~(l7fU?1 z$}ocrxz>8)lH23*N$)Rhvn}-PEI(dlr zt7{MD3&!QHJNsTtYuz#YjsF8Vr)K|M!6skCk_AQgjqi3(yx*`h&uVIzNU(pFdm!SG zNL^*L#;^Ww6(>0_alK!NRenn0|A%;HaSi#z|P+iJF9>A>DE_oe#x*ZqxViViIp8{W@R1D&yaglj+jdkBx7fqRh&3H zNiO`bY43)XiNfwga^Cqzw*-DXe(_Jgg});ue(_0GyXrs0Tlxm32fNqu61B!J3u?X7 zM&NLnxgxO1 zkycgKe^K{)c#u}v&Lv&mxH)P5UiTP5emk)#ra{&C--7#XHU5ox+3WYzcS{;m?CJiY z-$lH(D|N<$7_oAc`L>Q$5yII%wx@?vmPpduvwOE*nE2apTY2{;+vVN{)?r>3^FNG{wiCspiafx*K-8;p@`8i5tQ>(cpllgxm13vY}xMjt)<=Xh~jth z)*js$B^&#ctX^wZf_!|=e%;pcG3vbJW8&npug0^E1||!GfRanABt?r&^_{B}dmAbD z*Q-}y>p(Brp!oQygJ*aPi!IR;O`m;~&U<4T&wZ9IPt{AF=>8yG?tPMYc~`d|;>!f9 zD=)@{3Gy*{bI|YxBkJvmm(#XSt-kh-hrApV5ET2&M@*d1+17K{bNOiLu;9kGGG(_Z zAz}6-^jt%qRetV&CQMG9l3OeNbei8^k|C4Y8$XuM@4D2^+2tpG&I~9yV7R_(@#9WzCz7%xei2dfNV=s@F^&Az zy=^UR=;IjC`*g{U0q?)bR_*~eZVdmS;@QWOey1l+&XLm`EgClXk}7KMZEu};EKH8= z{ob)j$pWEk5BzRQY5jv8Vnc*=u}L|{8%N80%QZXW%9qqQuiKob%QN;QNb=jA+m`iq z9Eg@rJSSItP~wItuRnVFnj*eZw+_I5V`iD9evdOye%vR-DWGNQcNHJzes>v^P$@~& zip&_}T6vS$Z9ZaBo^PDU9Xlp)|J^$hyq@dXE^3php4Mn>lg}m#^jxo{HT~7bH&V_T zl6-4eZ8w3PwZHd1WL(K)GJe#xHIBJ%MUYf z<$kdT{22VOH$8amsa*yxh?jMO#^#$?wvfDf3$7?*qskTj;7+wpM{>MYtr9aM; z5sNGL9U53z$NcfPA@yHtJ+FkQI)M6Zy-&GHhrrct_TTHQT;wOg4>s*vSmy;~ zh#t;|+F7QDiHUhn#w1%LOYC7lVy=aeny;g74OS zeVhAYnz44g!szb8%e9is&K4cy254zYfH>&5vDSvjPwa2^`1aMZYOQ#xJn z3YQh!^@{f#tmRCI8s^;5=t5_f}a4I(#(N!Qi}gY=A&Rh;zid$-B*R&MfQhuWIU8b7(Q zSVx1k{S&3Ni}&9}@8_!c%kL7;PP8po@H42CX6WMh!1hMIjEFrnqGel%7_R9KlmYUCI3Wq!$q{Tsyy@Xnj?J&1v}IB-!gw z#4zip8It@)Jw*KN=`z`8>HrfBTta=Ui+4O)UT$!;af$>#j62-9UlaX&L0#D4#nh3c!Nq2?&3Z|u{YTq*id5C%wfBSg#Sh37e zX_7p@ZrC9ad_x!+N%%nTZ|4inUG;K>BP$>}?XzLoDsZx8SEG+gq#Ra|VdMvnTf z?6+^#6L({lB}wi9d+@f(IX7!|wu--@3!2UG4h3|`lm;F%u+1$0!t;P>e5=AU} zx&@vK90i`d=XC$Ws^Z~tZU2pp+-7^o<=v`}zt%iTe$qFupS3Vs-7k1@c4b%pCo$=Q zyvcX#w>P_`$%IVBBQ9M#C9iqm|C=~@_QZwU<~u{>hj9fN?~ufOV>%JbB* z@DH6&MbAws^F}twJpKD;qpW}N8bwyGOm3c$E+$;>`(f$cV3A<%*~RE$jN;F*U-AX^ zfFB2Lol&GzTMr3NrOq;XYVoQ|X%9iZSb22SyRwD#E_tZ*-i!8E?2_f!n_U)M8yGCe z^TZwS!?CtT`p2#B2=X!f8S#Po7@nZ-*ZlNBHB2<(1Hb!z&O)my0jbK5u`~F-pABD? z`RjS0stdauwwO%0@w|nbcx-3mneAJ8LhZkRbk|8!2l1i7+8>Mv1#`uNN^KRs9O0Un0?#UAcFzu?xUMvUxt zTd!Q8MVK6L`Rueqh5QC_J^C+`mWSrL3htM>8$LC=&V-qle-_ce59BxasAHARKk1wu zCy0~d9}jfT`u0Lh^U=TEWks?my{wbpT{(7&kQ4iH^=y9&hjSz(&I4?X*vq$z_t+I-0)LGOm@J46fC6{;n>8tA4 z*U1T0x^KyqAxlz+oq8Lqa38!fbpUyuc*Z?&-jiWpb8b#?Q~bLBpgJ@DYLqI%&0Y`B z>K87l=fNfYmyG`9fAE#6FThdw8~l#_GSgiqT?{npSa{{fpT^ESvhAnhfB9YFFL@Jx zBd%XK5^&*pA#a5r<8NlguK4JA`icvAOYU^Z_ES6o-;MKP51bdg?taoUXXTp^$^8~< z7*IB?)(=VDl9=-}$gT4?NxsPZP$F-#Z6S~Pr_aE7&l&`7bzbZz;AyM3IA~-4<&=c~ zrH*|&cV@ME>90gwhaW5JjQTC{3-gpy3!TQLiQ`X(mEYVmSMc4i2l71M4L%B7GCsRT zkfA)J_;tQprE2Hgr*A5valicT*63jE`REMsVsEf*cHTer+mlo;}eq@^B!Rh0`KY$-*?frBp z$u6i6S1doM-k;?x;A;dOo%%ario`4_BiTG*wjdT6x!{MqMM|B&!!^cey-IK~V< z`c4+9*m!Nn+<2AeYo9*y&bmUF!hI8mS8AJH5T|%9`ft=()Nk--@b(?`rWLq%^Of)# z__1G&@|{nw@>jeOyd3pK!PjLytDSQan+9}C=r!`U>JOcNxNhaOHTMepSqIyV`P)bJ zMd)YI4Lkkw#pno~PdY9QLGR_MCK4@@@GKM|a1`%6^9P0~c?QrwCwuwHULi(JdR#88wHeX>cRA-|m5;$Y@EZHF zmvyV1XsA&i(?^fD9PD~=?gcS$apzI?Ju?J&2V4zr53fAA^IG4>8}+qe=l8qE?$lR& zANNZ?tI_ye-=3Bk5pd%3zY*{yS{3W4UsPPo{QgA`S?%2P^*?ZtWCv#`h~kUWw~mpCK~-s>R4TSfcK;hAWrh#=*QarE~W2t zC_>Ud=6=u3`|EwL18M*2`xh%V=X(C&-;(^sc}Fe|pV{edoLErrPhUC8gk zgToWhZ$Ym?{RXb4z92p<>g(-X?!LE-KhbenazVa;FQG1k@1xHPE+Jo#H|ZCGC$pYc znY*-FmdY3K)!<3+4)NjdS%D&BU=T!0CD(dtFV||Uf1%IQCg(u*=;C1Q?1IL#0dsPjQ z)Uo)*S8-tSa2GE*+cT_D5Bod`&qe?3v)OH9v!ra*kL4cli=Y*whTfdFUGY)F5BY98 z-6%xyli&yZf;>c@7kmb;hVQ%3!R%dJS#PNu*Sk)*GyVAU6uD!1_vJUtzYBB~4LL{=uV5ruMp3oP^(jhdG+`bZKShFqs=UHN>;9FJx{XZ*=W>n*2IrZQotF3963+ z{{XJ$cdd*5IF$U%L(qSt-;LfV`O(hFFFkIldIN?%VEFpeZOCIuDBs0T3(%h&~(O;EQNo`L&P)s7yK@M0dEgK2`;I4tzM;dFJc6| zGI|*5ZsHF3m)|9x5httFK78lzE^&%SbdP>HyhggW++07#Iz;3s9vpw0dUI*rIzDWjqzdM<@S9GMz_)r5%7rE1HVh& zgf9W_U}x|IEmj>!DOFg<)F|gz^5&ov;k9kU#b==@N*{$E$M$*e_x4`2ApU~SuwT=0 zJ;om_{NmtA`h^?TPVL$5P@EX^YgKgAnA=M4$$5$Ey5}WdR1aP{qmk(w)pvp?*jlks zysfdPz>n#Rz*iG@{-^te^D8=E>UhRTbL$$kr%1lH>TiNm-2xgO?!72aOgz|Q;YH{E zlKi%1{)XfRQ*uP_{DcI@^z?uAH>tD0sr1po)wLUsY2Dw=Q#ua2GR~*N38fQ5AGKve zz!Zo6H$|`cN|1Xq>nBYLTV-m>LGM1^qs&vZBC?a+CMl= z^$W?Hek)xspQRTbxpQLHb@ch1n3lYShzjCbn>n0Gf-#Y$DEhE zS!-8^^*+}U1@RX=39hD(Q)=<9W+SVHD&6)J@p^2#Eg4E@PFyeBWUu#_zFC6$0{uGm zCjATeKI{x!&HciAE)TT(P^xIU(oceS=m)^}!6VjQctm!}3Q@cf_!oT?x@~Y&!WhdY zXnmRzE!bw!=7m6qNBu^> zh2O3BAU^GAvpj+Q((i^(g_nyswcF}$UWVN46?%2)j7ZUL{DJ%3RvT#47w{S}rzibh z)vt`w@h`tOu3_woEJ1!lmkGa)U-aLUI`!DuEEU($?+|}EFY^K1Fa22TmpIvc^|PIQ z3=K5&ztmauO>_HLb#*P|gTXu0$4T)chwj-LC5(a&e0Q7RA$=~jb$j;VwbI)+@8$ER zV$M#9zMA``zJULw?nWo(@42?q>#>Q7H#$|hwrhogK&9tG7la-WeJVUH@u8>1B9rwU z;{<#v_Di4lk#UN<^}|R3ekeDtTi5g**@{m^XG7e<-*oXAbz!a0ybr#sBP6;^;!b>- zI?=_dXNy4}QmXnce6Hf;f8x2o>*#+uFZKXVo#=6F?xo$Cvi`O$-aq%GiUUhFjT*MB zh(HJC>;}gHM$D=-P!^(tHDL|HE;=WXP|!Igucz=1obic z67+J|1HViEjqgT1Ph3CvY1Z~RO$;^oDEdzHvz~bGU14XFBjEecnG+x25pUbKzZPAn zJN#I~bLe=pG^N+T9^mC}<(z)E*EdTLf2lW#ljI@l7I;tkvGDf9Nq8>ss-?y! ziY2RtZ5UZJOX=6?zY!nMKfrsYtb19I@v%VlWBJ`<>&(CJY8E2~cimKJRJ#g6iW?59#t6=oGLA z@-g)geLZm0kn~D%0l#Bq^LYksd|xFhT_*9D`=#GaT>t6Pa#!T_Btbr=4#3WS+rPE3 z&JPpgS7z@R>y|9RJJhl0UX4fms5)<9xB!=6zvK&W^^duWiuZ2tTlQ>XEe=U(Ec}b3*OBR23h-Tw1=(yiJPgzk#FP-+SSflwqKB zy4V9aihc|IZt@#E0dq6h8GICeOgtn0^1Ijrb?mCUnVwf&k|cdzbh_LFJOO+v^(H)5 z%&L?I^XJ8?IUlcEb{8xI0tI?-`rY6M@GrU*>TdD{c!&5)eLcFvSJ&o62Bb>r3ve}k2I}snc1I>fw}}(fo76+- zAK+8%7H>>_F*`##7?tZ2lNKr$j;U}iELUH{FZgcQFMU1E3xCF(1h}Nq4Ck!}7N-jI z_T)`;(D)mC3HCtUJ>zKZ>x5^Sst>t()DFjfEfU4XNV9vnMfEgvj^Ih+1AQ|5;y>}f z*aLZpxzOM}hqgaD<)Hdx;7Rfu@r-(iI7z;U9x`fZ)4&KxJcB>0TgJ16$%kA?f0KDV zba)kg_cUmH*hB;GFuw^t%P4C#v>W@IiL%Tl@K~DT5R*m+Dux#R%^lH4h0-i>{cwiB7l7%KnG@d8aE}z0ksL zWkBzARR<8)y}VZa_%_B!>0#)%fPcZ&^s~Ty_=V3)`^`=I#K}tM27i3JDN^_^88zad zNv7bu#7W`~@qv8IJ%FRYlZ~sb?>5#iSeD&l^1FSp9EI2UZtpgEGznU(uXU}s=tz++ z8EWneJpuPX+<~vo9<$r!d_tZa^~`9h;epSBd0u`Oog=s!JPA)gy$O!Gwp%N3j?R!z zdzd&jxSS{J|IzQD_ar`G4}o(>wXoQeBEV7bp6ISJ+Pv#f!7B4#JZ;;}*YCX@C)am%U7amGaz(e zo$2b=i96{}Oy||k%@pV==<^cS(JA0>#C7@?@N(dF_&#)*6;GcVytPiWY`iVkb9~)+ zRksi)!IRi8x*+CFu`~EH<{9V*&@ZIEz#hOm+%LR+^1UlP25XTD_mMBqF%9_ObLU5~ zt18da=LPSeQ^3yPqjniIpS0$rubLBnmu<0l_3k8re!b58BMsUqo7QAsau_j*Iix; zaP>hO$0jE$=cqb>eEeTY;XbSpbL<&5ZD|Ax{PgRAMkabEH~d=xrIczfpG zn4@y8xBcqC5%CI_;1|@fJGNzDO{FVtQT*5FDF*3M-}z$6 z&pZCunPF%bGYJH5L zp2r@@7xcT&*N*n;-6K}%f5~rcmpd6YZt1OX3H*cUk=`NC8iy*qGIN&bcV2tUc)i4? ztj7G%v;;A0gQdQPz8YLYy&2zY>4Q%{(q-p|W8Qvp$yT_JxI;YSyTKDsXA#e^Gx8Ai z5c65oE#x8UA@Dl(fc<8rNAA6IVxt^se&WX0eZ@55`ty!IKO0s4BHK+q+R%RXZ>1yE zy`I!e@jCiE|j>Vd%whoPfsFlg;*EW zs0&}ta0sgLAxWJ7c-r-O;hq=1Thke?cMB?KDV-zp==5W;GxjOq7vxRqWBAm=&D$0= z%F0rFiSdR(*QyoC7U)Zezx1VjhKG%*yW+j#cfhIO2XqzO?~O8*wi~ZbQvDX{H;p z@XCujpPANiZxyZg_EE?5ujdKsEOdB$H~4kmp0f6uPTA7EcG8e94n;I@AG$X1FFY;0 zJvuSI8+(1I=h<(B9}_22(@PDkG2i%K^#!;Z-7E3zK~~hVyhibo`EGyvrLBUa6XoNO zjw41z<|-W%_!nITc1GQeA0PNwa>l#M#WeaY^m*}%mL~dBhJM!5&;_w42OLEnYEh80 z^p!`JxUqdv$reFr5`GfCkMG9Z7kh~4OOxM-zu*V(4!8tf1HCf)VaS_4TThwHd-GfE z8NnXl`;yN3_Pkv+TQc7QzXSg;(6e28zlsSGy)t@&QoYC4aM=-~<}dhNa1?oy--VYO zb0c$DWMxB*x!ZZUaSPmEWs0TsKR;brxR+!9@yuJd_0k1A4EwIQhc#gbqLOVA)qXDc z5^%}#4~MSYTTwW__$MDT=g!_Fbn$#Q`Xcx-JUDaty@uTInY=Yv1XT}wWL`60kl*lQ z;u*X>aR(d)zeB%U+1VeSfO%f-7o56qZdjS2mBZCs9eNG&1w0pXUv%cl zTO41XESD?k55X&=)5Xq+JM7o!JM?_YT8mFA&-<^Qm^5O4w%QL+ z-;MY{oa7$h!Kp9k2W)B|{h(j346$nY@7(coV^o}^A8T6u)Agypl7-WyNqUvur>eP7 z>Hz9_^d;o^W-}*OjwqF+^f2&T@G$UaJG9vv(+Xn*P2z@H|tILCvk9)lh`#Cr@M{fFP)vQH{Xr%{- zw}*#uFS{tXS2rWo7pYTs*pH#_4K$0=Gp{v!9W2exM$O(cIY-U;&^M*efF2y4mc2gU zDC#%zvCZ-;r7KnaDwzwV?xx-(PU_+_;u-yJ`snZ)1wQ*rR4JU(BL1RR?svQ7@Kp~D z6rW1{Mm%GF2wesBG5HuhB6A(o0raz&^FhBp?C1NXt@ou$>O%Id>^-sby6KciRR>US z63?(-zT1{dR)*U80@W|PwLZM8uVu2@--nKgzBIftx)tyl_e(#Pc_-?5;w155jNPc! zllJLp)EDqk)L9nme6D{_Oi*>e%69|xvg)N3&Wp@ARwW}u%`@y8HRyyx;eHh21AHpD z8h!^qrmy#Zs{^5by!*lY#}8}^>zzM);E2C*HxBiWu#Z;yGx~bm19~p*fjmUqq27f5 zU9_%iiH_Y1_Za@!-^bn=p?~wPN#$@s9>Onl`3Leb_dqw~sDXy3t2* zVdP^`E>LKQundc?`qSt^wK1g4@Zh>U z!Me7qZderkrS^$Yzv0K&1OA4Nzln3{r!^fT1iC@^sB*Q_mNfg2CpNTiRBzIhf9fI5 zOJ4*(rjF&j)UntDbwEnUjSa_!J{8RSGpC0B0lxs3fUAvCLt9-b+}n_%SJdUhmLRov zhWn*oXz^z+6*@fT4mdCRGjvzz5jij4jkp6JHK_b@zn@7#3inaB!0*r>qVEJQ;kyyf zsI%x_IF1~#?)m-@rI!Q$!h2#5d^h}<--VBYmm3v6>95zxxnkJup5Iz8_fWr!J%E3i zJLuG-SZ4gBbOG+8j(rd~GAXG}nDS$Ig1*VK-?X{lt>#VnZq#p_SJ!^AGxWc~9SoZF zC|@8xPTBVPc;WuF*~xz|wn+_8bvL*Keg{1leMsUC^$>L-bvL>;@B{m!&@q8abkB<( z5&TfR#P{?o_C^|Z#-0(AFU9XaX&s~bE$EHt$MU=2RL)D?UEk5BYLC!FH80Ja1pC_P zql5eKWBl!r!Ki5i7NjcP$ZL9VjC){+fVU^E&p+16fA`R6iGBw?BDz8PBHS-|p8Uq1 zJ?7CZgC6_+p0B4BH49id=3%-paK73$v6Z*tjo@Lp2Y6-0+yB{f3cpT0M15@Jo^{!0 zeT2N!a`v~`^Rm?b9r!+YaOzFsFM34gnmI4LGWA8-M*5Szts+$)oqcWKU*<*7fp8D# z3D|o}orR8cEZ`?4fsj$`mtrHaic6vH27-lj6GVMmp(di z9e<;*$6gfd0iFQ-dv{@_*%@mh1h@p9?sPkQ6VDY!8um+lL4M1dW!zi8L@}kafu|*& zflJ_{D#Yz6zyDpZU{4uwo%2#}QU|~r?MUqCu)0^Qs$;3U+4s7n&WJ9aO=46(fclvH z#&;vnli$dj=$OFO%!O8~e5tkcjZ}L^&<&!0AP>Qp(C?mcZfUc`$Z%2a?S{@?sRaT% zdogg{*Ejnz#Lqy-ds|*atNjGj7vKkQD*P`z4EvSI^VBWq2I=$CFT@_eeee(X1$-Yo z0sPqzgO$zA?Mi9Mg9EZ=yC;gI?>DcUari3HWm2~gC-GzE_1JS-wn(PQj006T+!z>n#-FfUDhgZHH0LcfsyJiHP0 zG3Q0!hrdy0;cvt<>NoHi`zJ>jOzS?{B}6=(a&l|Uu6nN2S=?`)o?F-A8x1x5n7BiH z@NT!nboQZ05%cN8y_0QDslBZ1iDVBP{0?@8JrLKiGxW-vIvo$Nx>dMO=Fc8__JM4> zP^EFvbpk_L?OK9Mo+rMk(tzBcF`ft>+)I;1ac;`RW^VkD*3v;aO=c3Q> zFmZ0-diHp=Uk?0>A5+J2zwE03@9^E=Vc0V*H@JFdy!&U)5<8>L0;jSM55J(#KtBNf z!PC(xz~;+mdFWo5jkDgQC>{}e;CG2<_#5BN-@Zg=ll%&5FDw2APun3br`H3o@*3}7 zkca5Ekl(;1=(&i$?6cv#>=C%THFEUqW&i9G15fJmM$C6}57-0x9ds2Z_jh=F==3*f zUSZjglAp5FetY5r_!qteeh2=S??&FFAItnjRblD*ckvK)zu*V%_t%z9&C(xd3G{vJ zS0-O<+Z0vKbxDls$1*pwV|r)XJ@cYf|Crw;J}{5NTt59Fr?$4P{dT9Tc}VVoy));l zZ;F|E(_g?(;uqA%;0NLkc%690{2TL5H!pd->F8*n!Iw}Ma$e#jaUJ}NomKgEaL4?? zWi`GV`(fZ=`j&WhzO8W)jXgy0XT%5UA>ukb40cA|BoB3~JNc-uUWVA+aO?S7W^RIh zEcGV%411v75J-8pyW z4$#wb54v%kdY<}?`62K+ela~Ty<>%|>57NJkJ)F={BfmRi$23T7--;0@Plns$-_r8 zi)!qle_#1RzS-P(X_WA&&hns7^7M4m)LGev8u@}cfcpjSfX|3$#7T7g@YVE1$j9gu zz$N4hz8mp@eHG}SnM=Yi;N|cO?g4w?yV2)G7Z1NqAD#X@e*D)$v(%y%DQZ7B`zrWu z)Ggpi^2L_&&Gh`HXR1BZ^xt?tK<|m~h5@@$lwKKL1N@6UP+wpV4F+}%8DQt5-s|W% z-fqIk#Lpt|@vs2ziMfUPlCmdi_w-TvRL)D>0aw$%0GEKz=)ck5M6cmDE8(z<#Xoxt zsRPtJj?Vi5;A-m4y^p5c^tMis%=xhAG|}{w=jL;n|H{Yk1jHTiFTYFu20kOM!)w^g zZrN#KwwtIPMnBf|IL7vy8UTbX^0?+z-j zud&w$o(nzKi-7D|Erw;NeQmnmje4HC5ZuS!MdCXChOP~sfI5Kl>hiScis?%;e}R4- z+{gU<%4bi?776>Qcv|uWc@sP1yxar2HsS-k2J_vVmpT^wKz>7S#M~GC7V6D1b&hnk zj!9H|WSJAbeZOAI#y`^}?{&Z%abEJK?s?VR!Jl^@;JLtkgKZ-9H%|;y^8xU`_yu*# zIqg$GY7-+h2MB(ke}OKZ`@Q&cN`HeR;p!a_^pnH~&WoM^e5Si!gS<&UcCfXjtZP;H zPST$}k=z6QP3jinB=KQt!zBggeT!=BkD`xrx8v|1DQgl%i;X{BzU|6TdnJfx5UGkeQzYb0X|B{Eu7vMhP1ACC^Ul4!UTbKH@|MRokLzR99`-RtlFIjok zM;>s?7W8q*Z_FhzkN%|Llcek`;bLS#s}O^dkCYA%{J?jk&I12}qxfC;YVH>u)9g&M z4)Nyy*e^H=o`AknLF-drOBM}K?~}o&5+A^4^t;oSt-qR6&QYaPhFjwuLB>uu{bb4Ut+o5rc1Wz{ayTyrQVJ`Or2{22Ved=}@W&x>vwURf8Pf%}MO6;RFs5kL9>Hy}jH*UyWu0JMF@g?k$CGN0anS24?2i^hyQfF=IJj3tY zgdB+u1b!V{0*?rP#(rhu4t`9Y=e+na?~AZ+h5W|*7vwkiU-B_{5}p7ahWQqFM09Q3 zFLgKk1AQFyM)co^lkmUP$K(reHGQZ3Us?=VFep~|^=SOK$%F`1AM@SN=~5q)H^C*; z-RMimZ|u8bZie|R^f25ncpbkWu5%C6Z}7_G%?1r8IX8Q^Rqct~K6Ql8h9jARxC1UB z?%;3KL+}KAH}SnM|NYJ& z`Um!^!AFtb;FV|2-WTP#r0~7MKfEV#9epbI%YJ2aZRixJyQ#CN-+0dwKc?@`{6&7; z%gZg!`l|ef4v#(qaWb@Hjff^cifFofrs?Bg5A*{>UElh1@A(M!vck&|e+S&XHLBz` zQ;m5@;u(2U_nj+rHf_4hnsTIelG@X&%Y*aXh?C4w!9Rc>I_G_vA3Hu*)O)^a|GQ?% z>YYs8z8>~KK87dYok8#p`zPrivxg}1&Gs_QOn$3+h`t{DI)2Q&6a4`6p74m^b?TP4 zZw6;SEtRD94T7uTJ&C{QOVEj-@8ceB>N&UcZeCRL|Fe%Do)(=s_Y01~e&M6w&%o8h zU)}djTmJQT_3>jqEBzTbinz`^4sjCxmw1NmmA(jg2c1I8^&f9d7-a0qJz&4!GwuOg zU1{RWu#qf&|!PVn~dlgyK$|3$}y{sI1heG0nyn7Gb8V87r#`u_0M^iBV^YxT-%hM(fOsORC8 z@wfkJ+($m9&LSVfS7X2IN&o3Lve?X;A;L-f`mWW^fAkONbny%NPT&W4MEs3D8T>E# zg8iP{FL527i~c-$9>1u%+UoDokJ1HwQ*_Yi_{kUWwCEquM{!>ACi$2>W$Y6pp26?H zm(b@0S7X2M;OOv}OM-`axu?#(iDT0Ryax3Jdm89FVQ18Z&W)G3cMCJpF4R4IEa_4i zjd~OOz}z_XJblyOD`HpdDSStRdjR(lAJ8%Bo)^6*a}xBA*{7RZ!g7D}ONH-2{&~Nf z{6^hEe1K2I&cN&F*U^db&g*@f#!lATGF5+``yJ5Zsi8&TQ|jak>R5i4y))n_a9{I# z-DBr=|15qKoc(=yX(9gmGiQmv(GQsG)k(YkHC%unz)>|D`g&g+l%U@GqEA+Sci*EX zo)3iYj{X~OTgECK9=rzkOI%0SMts2E;Ekx?3K};za4YLBgBErP7(FXa@qOqr!KvU9 z&I_;1ULX9JegOOqd&lWRvY&wZnE7t%A#}y$dHjuf9v+6iKlqpUOI=7FLbuIc6nHN5 zCFCLY9#FT?Kb|tqZKc8A`Wk&Q&I{h*9?)e{&u0Xk{aUu9k;YsS@BXqE8#{wHB0g}x z#D{yblh5Ygk*a><{j3H1x?Fv2YoM{$lDQ=Af%`>|Nc{$`roMo;XHJ5ATzR?Wvzo~X z>fK-BFaE}SH#$7(A@VVG7Wjev)!YMf!rTM*3!bE32v0yf+tl*@jE;jt)Ls&!abjw};ojk2x>(Jh)FcU*K=lL-1$3)5bfm@EYVH z@H+J|{bTHGM7PF;@7Cuiof!Rj_ViY29sRsyda}rIb3ZlMr9i!3$Xo~K#eTt){Y!p` zyV2{b81U!4FL*iTH^F`M--u_-S#rPNRQNt0mySa<%kOHw1wIO$BYGJ6h4f>|Z`cFx ztT4}uAHy3_zoFaif8C;2t*wb_|Bmkc5`T5$Bz^&p$o?Id4ju+P2`(WI z@w@ojOoO+ROw9B&>P`HOcjYelbY8mVNrK83;0OE-90i^Pm*8*2U-AWY_x5#rXD>dR zD9>%}=M!R-Bj}5e=fOMZw!u+vGmn0?P5PqdhtS!;@342A{xSVn`Xb~_;u-e)-|a(! ztEpSSCGe@t-QpL#|7PP??UBRkG|3#5uKoc#)5Vj0v;8XHNQ+hbtKp64qk|{GQN#!O zWYll$mEa!mH|z}j%R36(FE|Q+!!OXwQ6JMELch+OCHwG{KB~y;o_R|$6kpxmvW-P# zW}cc$;=575k&of~h-dJK@EUWjT{c_zDd1l`47#h)z2cpZe9Dt^M!I*LSwCOR9}_17 z&O7Xrms2Es6#I>Cy&iGRIWSVae}PV6l5hDwFB=;vT^sg5y$K&hT?oIvKG8V*b=gQs zJ%mn|_k=kwc^*6oPs{I;=czBiXDz$;>s^pkpypWlZtzjMI11dyydJzKI!ADnvWGwW zY{>KM6QduC{ulf}-bAkq?&JNpC*EddviHR*zK?g;@nifAeiEM6^4|W0l`Vct{0$sM z-NJkTbryTz=ojJ_@O{`Xyb=5kcoN=d^s|z(Q=Hqs`eejO@Grj$UI$NtAJCb@@32po zxi55?@W1c`w#S@f2{;`ftklLpJPNi>(PNCA-+#w^alO+4%;q94Y1wYU?MTZA|IKO9dyXR#K z-!ZOb;k$3;Gs*nWHQ(6qmxcKCLf(R{mVF}yJPdp_yc~TI;v{|?vb$>)hdsYkABVb| zeoIoVM#b%1zA8Qyo{M_E*U4?;T8=KNk&k(QlRQ78WW?I$lWJ)2sqBMh|F5o2mwE^s z#qYxZf=kd{fp^f$JvC2`?9h3k0RNKb@i+XKdI!Z5PQ7g zQ>lk^`vJ@)p}S%}i}$|hce787_=`U3|JU9Q?iW91ug{Hkk-Lq8&M18!d>=UKOy!yx z!Kpu`=bt@r@C4WyIF)-Kp7G8gd6Rx2d&*9?wH{q-c!KJ8Q(vI70iUtwiTIn-c&58& z;d{#T1JHXms^eUA)3j{C-ca^5uxAgR0R02@Kz|6lgPpMt8oZuueXib-@1YWVpwFOt z59pYf^C|PltlhLRuT(vRzoEmUzX|SxCm;_|7qZ8Nx?3A-RrFb%2=!hPeO`3X&n;a{ z8bsfdwvKT&4v8g{UXJ@E4-p@*2lRdLp6q{M{%zli#m=iPomIG+cZ%`1=l%iZuMW?V z@wfaNMaEnyC{XjJu^iI}@P;*J}T-Yyp6I?>xyfbBK<{h^$ z5`0GAiM&~+{($H`U!oLF#owr7sozT6HGALig0a#Yfuq2aU{?K z{@5A3Cw`2bF{efyKt66g>`jxnwh@AUH}kRRK*;ml1NH!(1gFwJX0IhWQtB3XBkmU* z#qYusFxO1q345RpxDd7N)6*BpBKFb@%__uG^+m|X@G$r>dO7$>cv^Tl{H??4RplPn z$`sG1y=d)K*gt|t1pk63!Dr}5sk6Xm{U}u9m4QX+?`9JS$qbHz05C1^i0Z-=Z{9}^Wtyt zp74Fd9ex*m3Az>Z)gHOS#)QpF7eO}$|9mwjP3d0gqw~9bH~0tQ8TZRvGx?bLi&uZ@ zP4s8f$9y;94)rE^lRg8un);1-|FgHIPn!NJUFCUnAl9bsU(YX`Q^y`^tk@GCbV{oC z>PD?HGicB{OznFmPQp*-k2@Y!PCrT2Z}3s%A?ge2ZsyT#w>j3UIYUoFcf}qp`h_KS zljs!C@3g3Ie5>9ZBaQb9 z*|!331b@c9SNhV_0em<32lx`~7yOHUhqz8X#GYyTS?Jf{<;WM{Uw9+>rn-GS`dRzB z`zGdo{3S2m-2A%o)da=&(f4O>2Y8bBK;ESPhMmFhFwX$LPF=|T;>YAS-VflNV*Fy@ zpZQJnQ9ZicvN0}*m(TA$Ue`AAtPbp&7U#VlaZZE3MI^A=S^UK2hI)C&#=%cU){FuCnU&M5u)co6^ z?`n@M^B4HBRZH*PRgEImo;~6Nafdibe&Zg<$LQkeZ_-DXKk8o#9`-=ZVUvf@pMlSK zZwCBJ-LfL3>Du(-1%lrlJJBvHZbYKm@5$a<`YpuYptQN!eJ>lTJw()n% z>=R?}Ej%rJ3Go4a6!Tf^tAHn9KO%h*`x-S%HVaQy-wpdkF9%L#zMK0+UqWAxemDGC zn}y{+o43iA<)>fK?`V{v^gHN+&?_J4waU5E24jspkH3M>s0-mIr}^mL?y@3T?c4r2 zGoa*v;rg!h1HfnS1n}VKf8pi$U3BsA;Qyz2GV(m<#ow5h#vbVNl84sJe7Y@oR;EOk z2|k1W)wKt3345l&>)@Tz&tB;*Ka?iQg}ka-EH+NCFB7~&-bA;}Jy5rRAHX~0O?0HY z!;CAu)k0KVh&~GYB|Z>$sAF|?=EU_$dwZV=_R3NFcd%dL8Tt}%D)E=RNnJ>uN2kF1 zrttRcw?}scf5v&K->3`Or$Br_SAoCj=1ulQQeS`{sNZ;}!Y=O1v6+nnCFkXLc^9$4 zw8e9eH%^w|DDp9Jo&E*!fxM~SvHA1fEp|pd1W$ktn)pkd#ojvVW8w}tiaA#1o!A2h zF9-f*j|)5^xSIIDd6{nkN5NO~o&>r!eiwYkcjF%5&#;Ft$K1tcSAC88m_4WC>iyNQ z-tiQ1;!yv6*Yh&f+zfFC`~ZJ;ZbeS#a=Q}+^F!3f^clz(#IuAhZ9DCn{!M(DU$SBU zMlpiEDffV#abD_U-T8TVBl`Zlt4v=H`-Qg$Ptu=dK38 zVS;;LPMExjev|nPW^jP!#oM@gYTmrBCca+_#6EI?t%DATqnO#XVIT0{$gkRE_@%j z1U(n^8@dYc4t*#5nEnOz5PRU@qu}MxRg7M;-LT!<&q^-`Z$y6MchRT9!)Q&!>xXkc z2y{W*FS-i)vFLZ0&myi9C%K2FReChC-g31AQm> z9s04n&wx%B-V>Y(u4aCSdXw))9}=93zJ&foj!D{$^`~=Gzni>CzXd-go}q`~y>aST z@B_M_G`aTdkUfcl_rAb=)NlMQ`~!YbV(7!V^4zGpNj5P^&u;dn)52sSLrf|58ys@HrNC6S=93#nvWRt zu4ty}ztJb-yu@Gpn0tVaVs8hy8r&x^bFr;WgMp zPal#viNEQ_N$iY11AP(j4)gxJFAeS^U+}y58#oFb6Tgd{!B3Kh(6x~_@ne1$dtm+o z9ED$G{CW2be?$L3KbG^3pZWH7`c)GR9UgoMeoP*+c(^aL@@qr2pO)`tGoi_Xot9pO zd&gJ z`Fw9;k7XYt)Sfc<2XGWRe&+JggA*tDZs;j*3rw?wp;pBz;ls@sJp>Y z@G$gwnP=d4*@Kb%Xy@dY9=9a#Gf)qa=dm;L1%81&fJ?Xs=1`cIrXJ$F_%ZfCoWw7z zi~cy2{LDk~>(p<&kB*(;Z{RcVI`NEqAfEAF5_o;&>BW`q57*b&Kgs>#Z}>6&w>w?W zxS5wOp)rR|9ZNiGwmyEkg}aePoGhJQ{!@n`$&x)I#2sD!j5-!w5WmY@9d#`DjQ7dF zXVfj^H{t_(srcO-{p9e4xAnD-&0m}A%`?=%>)Zo-F!nF-I8%IHzMwATyTPZD=jmsG zQ{lm}GvW?=;D-G5{oO?8pK8A+bs_ePUl1qJ6Hv#3tLfvA-*O|ThIk$*+{gcCj*2-H z>;XRpuhWM_$3#BHFThdP(~fSM)1~m8m3~Xxwkh1Z&O0mPb6n<6DCC{N>);*u9r$1P zDExx?H{Kfuud}ZWy{E1&6TeXT;?H}b+%LLJ-m8PRr@r8KIj`G^jkAZ2{Ve%z*e`jW zeinU5_L)<6)0ZYbkmu)4pS~6ir;mkHis{(?M2 zJ&#`y&)9DSo@9>;eoTE#-9kUM^ky&3beFM4pCQR($jNyz#u_>?bc5Ujx_G`D_zXLv z9-?o$+;u|gy}AF)Gf=m{D^s`d-KaN-XM8twSJa#6uF!MAds1(XYn)}`=ohZ$A*oyF zUtqt?q2O=SS=8N}m$~s1-lHdHbP85H3_KV2fIYyUQD5K})^ZNRfPIo za1?b5b?lv-liS+VDiH8z%+=A?1E)UoP4f<~@ku^)usK`dmcsGi@q5Lj<6!T=*Z?i}C zU9GZ;YSr(}kauZUue^`^7KdHRx}`SCfbEW8!+R)*k~Kwn&u3b@~D1 z3x1b6>q+H_6_2JA(}-u9!L0^?8%k^FHC9KQI#;i5fSh}M$oPZnVwKJjKc>E5uO;?? zzkz?@tLbl|)8)Ne?18$4?*{(G9(3;)9W-;H;K|tSM@Ov7DBSDwr+>^G3its#1E&(# z*=LSk1AIn)tN-WyWAmxg0(zEA6S{F7Js01Nd*FRN>NkFuz7u*b|Kp+S(jR9^z8iK% z+yU>9-%{V4FdyRhS+FOPIbr71CgwdElWdW!;#uYV3s+Bv=L)?af7h5(S5Jdqrw-t} zyt@u>4Cxc8zwr6UDD z;1{>w7PCySiBS7J;Wfa&d^h&-b6)(IcQVOC?5&$I+V!HL7OUQA^RGYt&F=YSHS8Ci z4d=z*ZiQKQ4=SFfcrNkyfjwpLXZS^|Z`0a!tHlfQP(aEhry~|wg?`a}`3>{$YK{s$7j+i6k9**}>>Vc$ z;TQ1Lor=6ow$<}i^*nKh-^DK;-{_ILtwDnDzwW1BDJxxq*O_<1&d{x}=Rb9o(Hg({ zzg2xqJmYtnyG6ePet;*SuLpkESgfPL+Wv_OPZH0VkB$5LMRHDlX$`#2zIfs!dkoQ6 z^Nue5_HuFlgrhH$By|foihRtR`-{E7w%K`luIL}g7sOxYe3JDoe0t=45zJv@XXFdw zBs>h??Y}#R4X*(%!QbE~sawDg@b;sV2iny$PFL?2Vh_yE^WEUjs0)2(PVYaz-wX9V z`rA%1-)2Ua)p(bnruhVqnqg1Wd&0ys`kS1W{ycn1?Z#tT_jmJ@;8f;C;KB9R^{BEe zDM#_u^cmol*+)t}PrhJ3EqEPVLfyi?V)7e(fBXVoj<^o)t3Bbbo<*LRX#6hr%l;_6 zCyw_`CK_t=O}XFPK2}{_A7)GN8T(7MY^|^CI+3;Ss5iiR8!@&(@w9=z$WiDN#N&QkltxCd~xZk|Uk2M^9YP`}}C>=}WtX0C(&F?}cMEc|Ws znxoTvF1bni$LN*m$1>`?&i@q7;t^KgqklHGSUPAARDT+T+c4!_L4Z{4ViK z@t$T=A8Wb3lDeBYs{hlRCAb9qfStkrhKFS|ySvC#t8)DQ;A>II0v;UQHs1~WKprB` z>-q)#P5J@!MVO!O6A)-W*fCXtCy6`km*c$L1K*AJlBfg77w*ZltR zCu)w0-zC3+*NMOAQ>nAaL&RVFg1!0F-PkYt#K;%?F7|*QGmis4<6XJd8E=~Sb}(|~ zcX|IUyUv7}mwy(~;0efY;K>~W_DaV^#u{}uIwtTs@0i1va1X=>@B`nCd4J*q{2AYk zeWZ)}dOMf9@2&Pqa1Yoo{T9xP{n|z^*>1DkMG_z2Vb~u9~>gT5|4ZCq z9;aYhTB`OvLzH|px?91XFDh^9`USW|*N?#u=((^5_N{d5K=qIrU z`g+(I@eCf3eU9usKo3Km1>WJj@SgY^?>=A;5U-u-T>x?lX_Yw3BzA8g7Kk6eEBENYvm_TF+{@;q@8`{lgM>oK3j-VUEe z3pe%c5-Q;3i0kM}_+4-mbpZ9In!ot-&Nlo*yyalmi*qk1T`}>O{w8sq_`vV72ct|* zdfT971!~_QdQav;XSTEYHn2sOV2=wr1$Zv}4c?P^CwMOS2kLHr`=zadqZ1{%LH1Hn zZxScb6TtU@AO2H3<6W-mI`6chdqoEVj)GsOFU|eJbCECL?b)ZRtA~LvAwKXq#?y%+2oWd9Cvl0FW39(y2f(hoq-#a>HzMD(fjP5E8Ui#_n358n+v z7rduwfwpgQSgfEfY(CGRjqj^ONj?UbfTPft(AVR<#9wd}a~-@dLSGNOL!Ct(Ks>{b z(J^rk#9!)I>LK_({DM5s@9OrQn12IL^4-`^z&(IdiR;)eeoWm>oMi7f@r=CL*Sohx zQec{x=4jEd!IxCQ@50;jyUdfp+ryXe-Ok&-{bgVEqX3sszp*EWyh(ou{J=fv{x08* z{t)^H_I7mbTl@K~DT9=L(mdtVLZ@+Q0^KYAMxDhy6j{4+-_0NvqPmv z&6R^q1Fp!2Z4ySiJ^7^e`snf+=%dghau3X3fK#b2!0X>DC7L>1`Deb6{6-$)yx>Xt zZ_H<5zx3yiWJk5>Xp<*+R}O#UyOD=DZ@mZcX-Avo3F<6#3c(A8*I2)_g2vuD`rU)O zZmKk@U5a2{4?L-x=gC9xh~#;A4fa34%i(XFm$?qU8~B0mrrSsV(&4R1mw<8_c%690 zToU_@QqxNftTEqMgWtg~z&qey&I_+hJ%qlR_XEZq?%c15e!d`1lHZtf=e*Q~?8OGB zVh`X{XQOwSzq`dt&I@0{913%+XQRi|m^9*-8Wt!Rx2yah5CJ&)w(zOS8 zIrvHDdD#m{e1PxEyRu+Qqq_0R&fp)weeks49pV}H%X|xQ9e$nn(GzDmms%2YUg34@ zf#0PsLLREoJ3((*NS_`FuX-bIx_)w{~YDet`$*TXY`WH#)68R`W8w$6C14wD(86oEmJJ^5mRN z+m8p%10RSwqap64&|?t^`xbHPz~miiFhEk4)>@AIB2w(dXKzBwA{ zp7sGQu@CZvJnvq>>*9mHMRQsCuADQz3Uzefl{fXrTCaU@e$}Pb{rMmCL)r)ULHsq3 z$vvj`@vWR1ydZPL-*X~9WFP#uCp$m!#?!+SV)z9=0sXAmj{67anQ$ba|C*<;que9U zhCiK&-m|n1@EM%yJh&I<%1!M*@S#hA{k2~6R!aQ-eDc|kP6qG;yhDF*-}njiq0lFt zUpUG;SK_2O7<6TE9Usy^&u@=+!#n;P|JjOpIbQgB+llCXGQ0&pRyP$N?C-e+@9t^U zK1YlWBc9Pq#^3u?oj==L4Dbv4YrXP?dja>+!TD3+9rcUt-5XR+@B8mOM|BbUnSK#^ zlsM@;Oeve|{ln!`0)8(2rcI6?{3;`R7^n|LJYexNM=I7mAK-81LDL`HWA8KI0dy|! z1gRJD8^P7|b-oq6kiOog_MV9uznlrotME>nxgFL^U&jmK2lz~U(0|Ndt?qB`qB+Ro z1HFVU2k-DX;#sqY6kXeGKuVylhZpisif8&P@mP62{=SJ1kG3q|>b9XLqdFwLnjhSI zNqpP!_igH=rxOaVNe!F_coI&Huk)Jk>D@DV(|Ld&==N}wdH_DJPUbw&&){`+Y4J>c z!-wb(KMd@dr|O=wAsx|qfTQ>c=B2G25uDo*)kW|(c+z^k2T1?rx1ZAZtJb&8KNtCT z)J^qC&=H-7_;bcj08iRqb3^d}z6yL^-HNYa`Y)>7I^l+~SY%#tHxZ1pxzI%I?`Q?t}$hRV{{2=@TbA| zPDyIHGwRQRtKDO|oP4am8$Y&Qbu#!@e&buApUH3L4VoJYm+)PQ>+Xd)BflKHF5j;7 zO@z;64Tl7i5 z4`q6uFH?G9&X{vXH!^1g?z0chuY4Sz=lwUjhV|mdqyL;gW8JTpLUVfIK67&XH|N)X zQ@>D$bdUKe;7NFg&Lwa9uJyvH=3TWLod1zGwp|G5C47TR9v!#!(P|SSUj-iPUWk+4 ztMhKE{MPiJxoZ5WrDqmwQnB9oD|1}4Z{NBrDd3-^SHqKdp?pDyp`W=I{J;E&Cp$LY zc)Zt|f4}?ByD)IIe8Csw9!Ge+eaEjmKEF4RH~HF1^xyvduw3&(>lM%BH~0Zgg`@C7 zJ|_G^-Cy05uHl@~5&buL2ruLlGxrt`;H#jY@ePV+<|EQeyr(7)!4K98?>wCQ^Aoei zo(aW=kNTxAilyd?(YfOL^;)if?#2rBG6McS=fQhae58-x^z)Gqrl*G$M`!tH(VHiN z@_qXDE7~h(j6Nyu;IVW$d7iFpADlBjh4JMR|DMqNV&rSH5A@wcOs z&-QG3&Y8NYxTBxp?m|Ozt~+oc!X@J5=nCCxH{FsLgQNIfod>#x z_38`NckpYCMfum1csiQf0iVIu>i*7y?>;bic(%K8?TPw2=sW7X-q{w{@e6SWKI2nR zcQOaLc&nP*#7pW@kL<^_4@O1`)}uGO2fn_UX%a#0`aKfcTr`Frkd@IbbhzB>KV>NssW zzV>>p2e0_o^8MboXtA_VU1Z-YsV%OW-hD#A^&Yo;uFs{>7E+E>)v^#!-!|< z((Z*g$=60V!jI`^&ab?Q2f$JEv+)mHY&Ny}{wUAm-Oht}CQi~d3Vu{+@yvcnp?pjS zZ}DX6u1`M65;G54KRVp!{L0606dnAA@_T1|FlT*eAJlQo>AhzC{deVPk{ZH&@EJcM zol8Fc;kmVI`}fWa<+pfWh59D^i^syh)=S@^!?+i|3rE=pIBG{`kE_mhyBPUw_?YM# zbXw=vKEQqAWPE(!SEg4xXY_UFO#H>Wcg?8w;o6>k{iAon=mWO>8NkU z`BgvWe{e7Sw>C#xB-YNH9EfM?^Kfe7?mWG2IGq&OUwtThbJU#Pdd#li$%B28jy(BO zpf8_ZqMyut!(-`k{+qbY2OWRTtXH0=+v6>GfP0Kz@DuQBs5?0i_@?!WzxKg*;Z%6t zy`Wc{lcTQZzR}O{Zh0Q>cFy(>Tk~f1%S%Ie$9aHv+V7vgc=fR3SLTwoJAYzp!Xn>= zADjpIjlQm5+MGyy9v@QYr6a22;1_f*`v6zdmFcv2mh-DF?L4^0@PqHV7x)F9)w$a{ zcRzjm?f~B5W8#;?hxETCESvJibKC!l=8nU^^b+qKsK>%3jepA1vd`?q*#5Uhjv3MT zbToh6KB$Y}0pi2@T|<(Gewq+dKjyoVH}&1>pSKU{-T0>Si+95%=5RU>aH_hVe*cqW z>%4mA@};PMNS+sW+++9695{6bKF0~=v$da`FDZz8uPLL8g~6VvKUUqzy^t@g*L}kS z=(NX|)h+bS-$`NdB9rb8|2!N_>-=<%yt&f?Iv1P@m(YLtNY(wt2fkwOx0nORM;gzw z8Lz+4IZIT}GM_+QL_d!2>O-+!{K9{u zPnwfXFX5leELxz{>g6--XYdGyDyoZ&>y}X}RkB9njD8p{U26ZIZul^RIu3 z-t99N(7kYv=`ivp|AV;_e58C=`FIFVu%i{bE=UXUA@l9+gLuYg{!57w*}o{17`>CJZ-&1QzkvJXA$3T) z9DJr8i^tM`{WrcK^=>*MTq3`z>v<mFzJNTwNZyu?Apv&=z z@wMT{&YAe&9`hT)XYhkOB>w7edG_5p_jY>uV03=f1Ds#HrO?7kXY=;^Gl=IGzN^m5 z4}ME>$!y>3JRSLs@Hcv&{e@HMspfFfHQZzUBIa|c&%^8RLp(l%OYkB1fj-G6CZ6H* z>SXe~{dF(sMsUFu0sb3Y?Yn#{;;;8NoipE+kM+-szvd}?yJU6txo@rt%xmKVao@yAc^*He z%h4b7U+@L#?{4u^{cU%@wkmwPUFVta$iv3iTX4tbMq&o}7&#^*QZ%zuL?&C|tO^k;cr5AWuGfG5>) zoHO&F#q|=64(@pQ+T?J5^TQ?HxbA3#tNG0F0Q(?cIKQn|y!7J4H+F{lsKiNiktzij zm*4T#=BV!r@21n*2l?2%EBNA8L-QQvA@Nt7q-*%Dc|r0Je!>61|Lebr>+(Dt z#aBTup;wEOd`#E1ec=AIK^ftV9rnL9u-Lw6o(&(Kb4Hi5zw$Ah8oyrkW4=LiL-9iA z!TEiC&93w3p2;55k1o%{ee@mr!{d{CmAk25N~Etl5AucnP47U%efssp2ltH*bJg-4 zv3skX59!su%eO5)&+mnJl?SnaJ6?m>3#g*-t(c;(o1|-eBjsMpY$$_ zyoq<~XMn4nGkCp4#!JnIC1s1jJNz*Gea@ME$UUpV(~SnDMg0Nv68A#hlrQM^dfPQ0Q3f1Z=aZylyP5rKu1(>5r5@P zbv-;wT(@3$$DCg8Lpo>fG2SAc!6o7(eaCn4EVvKfv=6@Pzu5=h#gFkJ{Q>cF)h@5< z*FJX7*{BW)Px6VWZ;HRZ>zv`;;)8QGcVC@WPaMvS;v^rbxFc^mXY@1Y49{|p^%v@Q zqCe0H=&AA>yo1mGPkn9T4jy2=^eFv9=C$#?dJm9p1oy!^@`e2s&*V+}ppK(Yc;&P$ zXK&eZA<|3S3;QeXz}41E*TCQO)kXR3lTkIFEWIguXHcGZe)*N%3w+-BmEYhq`HfFk zoQz*D-lAT}2W`HFI)m@RljbhE$9xrhV*Z=Bj<=|b;4SYMny{N}sv zg+3HkEg{h1y(zwx}#dEiHs=hbn<2mVR%7w@k3+=nIG)J=`v5wpMUF?>b` zhaa4?UQO%0-6|nDq?dg6;D_%%(rI6~ZN{g+9X*i|^#_RScotob{tK_`!?q9VIO_Ac z>QsN>t|BQ>UkCjc-jUxbEu6CXr@m(bbx1mwzD4@OQ>#AfI_abH;RCmAN=$xuPvl#% zUVL7@PI-q%(5VEx+%g7|p~_@+Kq^Vc89 zmo>3N!Q^09iyK}qaVa52FM&(oRO@vw^fkK|aFjl5ypYcvj-o3&5AsF)ckP4vg+6!l zpv4D#Q-8PohHt`W_&`;_&&4fxIdH`Ys$LAA4^` zeW>cy2U~YcObgex{q?uhB`MLnwE8UZSouPJlaJ+1I11iTk8M`vmauH)lt@ptzwWW~ z>t6V8;;;X<{f(zixB56E@IF~k_Rn%%f7ukx#r9qM>mK8U{+l={AIlf!kxm-lyvdkJ z|N21qR`~mzv()l4zdd>7Jw5xYZb~l^chukLll%m94f*2#)qmsXGS||2;iz8UT-^Bb zk+T6l^m46Vi{)+bSEN6PI}2;ixVhS@ZaP&`w;K1hNJ8Qy~Mq+ z58{qIBwx@|y_*V8_9)T0_wI+%!sbP{d{OuEv0zP)?DbO5UEw$Va(}pIU{Vn8{}s>Z z;M?my@=fFVx?@owuQR z=W5qGV^EtVXF|Bz{)#(zES`lQ7;{#0>C9+KzrSUTdUJIZw{oaJb!UqqfapH^K&o>zx-kB8?imUyaYdZb6eedbcB zo6?Qchv;15x_bvH*p6}-L!P@dzBiU2d}dxeUg%!fhflw5x8|1@6JzEO`ET^!6-PR@z4TH_G(QSHQ%Cn*d`O(6 zPr^I$Jf8LEGef?v_{@&5%7+hSev>sb;CmH!_;=tl`mgvP{;G?JXXlU9f2;V9$ss=? z9cFcly9>2{{cymyO(#%q;eYtE=O_rVX& znSS1~Pru&jyZaKOJU_EX;TO-n`F8-9Oy62#OxHT+qj)C2;Y0dO)p5L|Yahu_C* zC7FSJIB-MjVbA0`7R}99&%z7cWA}niAU-@aE>E4^Yqo{zyz1R~yYH)bs_mf=k8QhP z^(#Yvz7%@D#d&a#t=B%B*gj_N_ia-HJXSuYuk%OogR2+f7xXi}LHhcV!8>m4-E(UQ zPl{*i(if_2$lY*Xa)j66K5<7}=i7GQ)MLG)U?0SXNC&Spy7~=oZw}}L&V#(Ej^n!r ziX>Kly8FJc*IOMg-qrGS6d&*absV~!ya_+hALJqR06u8E#XZ*7(Q{ObPB&It7R6us zj{Bz0;J)$8wd^ta({g#!0=`Urfc)UiQl75&#{RX@J0SXI@NV(J{^}2KFZA82Ux<@% zs`|WmMvwB}=uvQq_gLfWko9tu+0brB)>zq66^GRPGc%|%V`J%@vob^aZirh~jn38mwomTO zy5m$pFQFT$n>r8b^YXE{u0HP``)~TY%@gC(g@55{@z*|xzx)JvxBdWg8hlrOAs>i+ zfcyBBod5{EkLeotyz`3}(%1F-Z!6uidQ!%g=)2CD_#p1Q z)^EsjB^sWK-WjwH=F`Hz`u+LA*LNJc=c+zgW8$y>21lv4(2eu~(!t|-?3{LO-+%u0 zg%PfX&+vKwP2Tk1^r@*2S+9Qd_(yUu~YFxK2mJkLifw z`e%hF&a0boBASy9SL4U#f$;D6ERC&{KgNHbyKZ#=>^V*a34LDk4gMBzt4GqAH*H_AwFN2 zdq5}9FVf@h8>ST=k{gyyyOR)(^&?j^;$tx%7>D2U>l| zJ*LB~?Xx{g&HJv7$@BWVuiLlm#;cB67xNzB=c@{x+@IXXJMUFa0~G z_n$r#_su>yzw}f(fqsSu9{TWX-McRa`l$F`V`X>un|96e;FUft-g!J{dZ6wkuJb9# zL-@Qp1HD>5FF%1fIqCs$i9F;Ui|guS&IA3-clCYkubZRt+@I4!IwE}se!#mg-~Hv5 zJUMrVcuU zr^07=A$>F>Yxvs1A%enCToasBqHOh?M8T8-8Uw#ez z7)}+>^gG!HzBYUlpO2q=z<*|r0Dhb~{D<7*r!EfoOW^gSqPePl(sfY~kI&RM<#~9A zzHYtd1@RTbC3v@aHu8`;>H3H0ll*etu~FxRt6!X(Z_$@?3&xIrm338vKN4c*z`?)r zrv2qBw!d^acoIIN_suTyS^m>CRtE4H{nx#KOWt^<@5i-E0F&J|=nI zeS@Q{S6x~?0FH7V^jY$!>Su6%;Yqq2{|>%sPJ{273rI)QH>2-Me~bJ^FLB@W$I3%^ ztU0}OaK0<}SKh=ogGoyJRS#$NeC;UA#@@?DS`1w)(n>vp3`#<$h`mXsU<{QyV_(0UV;a|QL z{xkVa-AVlzf0O5(Gq_J5wtBboAivRfoL_N=4#ThE-BkC@J?1mNq2;Jk-TuB9&FPhg z=zVat{dF%M&e&SyhqK25_<>HK&Y(|%K8Y9N0q!2I?2xIxwUq^auCYKEMz3GkFs)bRNt-_;Y`Y z+{H34MemSWulKX;ga3xlJHP6@c$WF#w>>gq$HtXQ1N8v?Z}5(|qwfGd`|bRgEe%g^ zj^+@lL#hXukBDEulXwfgM7_m6&>!gF@VfccDfL%oeS6LMP`!{3B&Ghu?(1$o8}R?a zQR12YZgJgt=rcS^mAhUzA9#0NJ(hpnJ{00p`Cf->3O8Mz8ph`#y4=0L ztl8IhZ)!Mjc=y)Tv+RrFum2|gIuC=}U&_4YmAz42gsx$|!Mw8zmp)b`R&DxY%Wu0W zM{Ll27h2UV_d{q-jy&YQ-S$q}Yr}G14jX37=ve24zayVHzDZw?#}5a7oA%m@##t9R zzp;Dn$Ug6ygqV4maJ4$GxoYk)echZ9e$R;+i}LJ0btHs;>7M3uJwJNd%hx95iN*U? z=rH>A=nwYS+-7;6POCoTzquFqh4V|7d#J#JCkKA>Z-16LgLg>f3-dkstL?-6 znU6oTxL|5vZm4%0Up!O#<#!7{5SCop41x1pLbM2XV*wb>GC_qkU4!ztrtmXn#k1JbP*X>}Mk#k^k3s>4UZ|7a**(09D6Y=6aH`UAd+U&t5oJp4dMlrIV%{p+(wy6lg9V)j=&v%he) zI+=5Z&&wD1v3DQ%9O*{t^Ws_355doWl-?fsuFMTp=jE%AhrD-yztOqmWA~VkU!JG$ z=o7|=%qPgT`RZBo?mim&Z}IibpB4j(yQ&S zJ|BG3yv%q!S$)OsAAYzpm)%?xeI0xi-bcrW;7R8J4}hc8H~C?l2R^1@Qzi}lYTCd3 z(sWwyDyz?{qw8~z=C%Fv?xZ+L|MmWm{T0{g5Aiy<`_{kUoe5i?J0JLObbC72=~7iY zPR{dNoCkG1aS{)}hwx+f7%x<{Odgw`^)!g?rn!*|~4#q$%`Uw8-4!jJ7Q zUPwQKqr?aEnCPkS4qZ;1q{F~{@_eoIw>RDL@~S|7Gv9uB%d@|?e0O?)Z^Dz-D?T{C z;sZV}{#vi|d;5kb|6bWKE$VYuFLch-dBq3!SX{rqbV`ZDN#~=!By;l{Z)i8H(5dy2 zpTK!APYj+E*S**A+M@QUzjVD2y>}~4>QhtKqZ{G#a3B0DZw@c`b^DH2&Qq*S<(vy{ z{W}!T~h~{MuiBBmBaB@VdGP9R_dVBb7IuU-!a# z#WVF|=j_^S-M`-R)$#xGy~g7+b!qyUemy<~bq0E>`zCMl1?d+Vl5N!f*Bf2wTl{Bk zD81T$Q*WW4sr$=s_)t7QhIim~`AuIPe&PJm5#3|!rB7P#p0PbnBwjoi%{PKe{I`Ss z``wu~H$6~4_AY_=OZSv7@He=GZlvD{E)mb{uez!I#gFB;6-i55-ki2EeDL#L0Mn6kf=;l0V1!T!Zqj z4e4C;2mXh`!+QLc*cfI_te)c&#SjMzv2#`O;l&- zIBm_h6Ss$Sdvk9$Et#@ocJ9^^5%h2%llVpnl0vDsPi^jlV$bveFsDSKJiR^p#Q=<@%LDs*C(OQaQyXSuMcgQ z8u@AUi_kscRQfM`cJHR1M-Shh6zKbcAK)GDzo`%Lk>VH58Jvo@sO!P0=DM3(Xa28# zCp=a@=Bsdi?F0P*PQ_!L2Xlzj3+Yk(FzRH^ue=FQ+J|_&V;+b+|L7Ghxb2vP7#<5p z*$4N+d4T)qziAAEVz z$;jVV@9leL1HG5E( zKg)fS=iw6iI{qdfiQsF3^Ry_g`>uMdxI@>Vr|Qe6E5jx7ra7|mv3@7#3@-6q z_f4Hw+!5E+kIi|~ci^1K$ND$LNxH_&St~oewC3xUo*m7rfG7Poe8_s~1m=;7J8((coV%W$@p3|p|4e@K zU3ng!gdg+=&=JKQ=fQi}jZTHVuTMP~;c9vbod909p!C@;tml2Zz`37Jd2pP~Zo7$h>PSz`)!F}Z>JYD~Z-?K#ZLgxWL)+Zc) zFP^&Qi-t4%?vHS)`Z2$Tx2mTAKe+s6E=uMOYn@;i!x!KbkS|)Wy(z4fO!L1x}^Q!F_NE+{b_BzUfD&Kd2A67knUapZF`T^NHc} zU(f&cr=eHoT$s15Pffmn&+r!eEAHU)^oKdwQ&v=&eK?>SwRx&@`iWM_AzeehP)B!u z`P$@TdDFe%+m_$-^L9CN`?}W~{}JiRaMXWZDK{?V!_$$k!gt}Q|K0rGQ>PB!Jn~$Q z|Kt2$_)OjuAG}8;PP)h56F$&#+d@wXpnvvqH1 zP7Yj+57}S3Cp{HT^ zP#)r!<1@$S>A&KR_eJ1e`*8NzyI7&BC z)&2D!^NG=a#T|G@|B(I`^<($}?t>p%%qY3^*L7(joT?6~Z_ztJ-n0DS+QI{~zi~R6 zCngWkVc<#U4Bx!7@7I5=Xn3U_(By$r4IV!p`AZ6Z^zfKm>wXPZzcH#<$L@zC-?sgw z+r!m-O#JKo5Ar5ns9&V)?G0;;&vqu7%PPOYCE^ZW5PXIg>ce*5)XB+3E1L2X1ON<*p;4J}UKN`(VBNFnBCK41bjG$`|^b z^r6sG@i%zgoSY}`dgG_91FnskgRHO492YoM-HCslUaj7Z55YU|8N5TEq!Zu)*OfW@ z%AIR32JjAD&iSQ3z}4cf^E)WH&X6}69E$SgpnelT3 z)CZ)#5MQClB#$ ztNS}=^!3L-8M3$ds>Mniy!-4egL98unHiKG(!b18S+hoS z{>{76k7KSSJV`HcFXC~ZeehnAa~9t}Z|*q1z4%~%oip`r^F8Gu`B)u2_s?w>?wESv z${ekFul-PLN7U~R_xY}KW*^|FO9_{b^t|R=q(|W`RX2j$ZkG=Vyr5IO?kfYu#9TC+Mf^heRTf~Lxvs<@HaTh zdGL-6e-vGVE@!>^dCfhLFSh;I>4sN#24b{qW8Y&B?I4kEV)7T#27tQ zU78Lf55?k~hxka%=fXGPDE=t(9PtZz z)BeIc;(Dv%+4hI^GD12Geu2m8n}MVBGvF6+pY_7O;u)R5d619I3)(xR?rZ%r&V=+y zIyfHSzu^IJ6yG)-#^=2^jvv$Q#YuYK{q28#?(>BykuJwiD{s2T zcr09Ez2?yKBhpLg_LHZT?seZ?rvvZcIlt=u>V>&y7Fcup*V$w8khpGsiM}Lryx|A< zjQ(rA>H*@J_ZHzDb!qX;yr4&)DR3xvy+eU_jo=b^-947)#Ru;q;`4Ady$^m+A5t&$ zUF)S+s~6GJQ*Zb^(eXZ#wK%e+-+NTC~Ie7g81zucZNMXqmC>#EqRDbE)_dc*n1 zU&6299VYzEckwLm39Aq3AHt9C`1sVaNmG&ozCr)ZIfGOAuITId1^hmAxD~v7yjj2ao_M3ah>idPC5_%+x8NdvK5%HHF~cZ{`Ibg{z5#9kKaDv z$G7e~SLxEGoH6>kI+;En`)jV+Q`g-1%Pr-u%rjpxd-E3!wubfrE|E9QDT7PgV>lJx z)Q2r!(9irg=YbAGPsNYTZI*}388K%%ec=sVdlgTQ^d0^4>ZZ;^+kW{6_Gx@Hz=uvg zvHQh$3vCYZLOg5C>G{2iOgj*oOC|1z4;K@b+<*1ESz~m9_mlR<7PdYY=tp-R;1c>e ze&N2!oA@yv8?CqVu#%OlYz@_M)FH(kx>47+lB&16GFSG4desVcnR_^T?^fKw3&lzI zf$FrPY zdNtkNdg(CM>mI9by2su{gsb66@s}@X`?|O8Ti)i%q>lm@-v51jQiyMwC#F9@KZAZU z`wLgQZ}6}B(ATvKtUi%{V`#3sJTDKyJ9H!X!8sFu@s_MJM*iCHhfAUQ1-~b}ZZ4oY z4qOfY!e{hJ{vG$2Ka~zc?=$b}x&EhqnX+Mj^!^atp56yf!aL$*;ir}!c)ZrBP@FUm z#JrW4OP6bZ<87Id@0DKS9@C@jgL{m(&}qd7{7pX&T&)ifZ*h;|KKbo``tAdsC9XTa zbQtH%y`UR;&&PZMbsT!1eBr*y7v4Ah>AHCv!+beo-aqF5onP$C;o5b31p2V?LiYlm z6o1u4_^av4_>g%O6*FIIygim>5nf0q`0R$8rzV!m5_5j>3-Lk!g}Ba_8O_TqoSb*a za4+KZRQQ*#0>5w`=(Ko%_0r|+1AT|CL6=kK<$HB6=&5|$bPf71 zTr%p97Y25zlr?65@lA6(_{88Sd5BIR&zsv3zh3(bSE~;l`0a)7-zuFFy$>0$+rv@j zaH+EYdDlpMkmsET`%90)v*^n5ynM`WH1*M6=bTSE6uqkq zm%yp;gFHk}Rpo};=yo~2%hzxi+U zRQHX)PablQ^$&T6DPQx?J1%%DA?EzTJA6Uzg}8$UxEK1y%>^`v)4MS4n|hW!L@)Wj z^$xV}>JKnif-Yx%HJ)W&@vSe7{&iNV%@GfPchvpi^@z9p6Ca!h=S*C;zuwu_-)(-i zdyEgQ9Xxx`kml!u3U5`~kgI#bBI~91`7Rx1Zc2#QE@Za!4I^v$QT{9m3Hfy9K zs*~x%uCueyV_ziyJ7?NF8$3WfqnF5U@{oBp@B_XHN6{aSPCnbS>5 zrq9Pk@2tR4OB(mzns?2)&^?xi>@R;5pBNpHuVVY7w{|L8AvKusTi&MmlD0(tNq7f- zP{)bqh2pRK*0XQZUyH8E61(}#WB1pnkPyo^x^ceUJ2po1cleFeAvd?LJb7{31A%yE zf8U!lxkk1teo=KjeCXu0wc{gFZc=8)qZ}(2m7*y+h6CG{@`A?Z}drZQ}+!% zvtDs0^~Xz_^EEmeTCX_i9Wg#qxbOO6tA4(^_QBBHeEo%VPka-ebk5+j|LOaBe0cbq zd;zcfZ}2Z%ebe>(ZyBFsT{M5kKEP+s`2gcfs#kUI^U_I)U%Z2{ox4LcU$xz;;r&=$5fwwqk@{qYG zzH9Dc`KI^pYtngBh!5fO`cU{0>8bpwcncm7ukSbyaFjgczD4@OKXoVn4St}PI1lb| zv=609{5ib$g2<=KHz;4gQNByJ7k}X>xDSuDzkKHL@3hg^@fP_Qt`;Zt2bkmSUevm` zYL$z-&jmMMH!{zUkER6tsrvH8U-8U7s58hz=33Hs)Gunk(dW9krOrq19f)WA5B3+{ zS@X-{C8y6NM(Ey~m{A^=|3o(VQnbqWCLsdarr=_gk|RPEQN@n9P}0$NBrq zk1Ef+crJR^KAyk%u6rRq)LOmsl`pSOi2YJxMD{NVCB~dHc-=fwbu##j{_C9KEzTL8 z%e(fz%TG(+DKu_Ev-fXJ3iV6FQSJpjRehdM*W3;`if+VbvpWABwN?&GiSnDep6|*R z{Py}n8bA)&J%;_cGbBEW( zb$OGHIC004o7auVi25Y(A#t+TfX_~idizpnJ{R9B9nn7cZ}IUAzmSLEhX<#9QuCIy z15y2go~kb5ocV8e?fd1XCXc3t^hvs>_^V$}f0p_@UBkVAcf=hy3QpZv`ND)3Ud)K* zT|GYT<-)UCW{H_AY)-E@=^oQdEQAk|GGK@e1;#Z z^TIpwruVSrdGjCONpbSyEZv89d9^^)Z>qmVd=Mw~chh(JPpX_X_V(pKpD-V^{neLG zhk+mTvBHz;Lpd5&Y*4CKO8E4l?wu-pzBBSGo0~+Rbbk4Y=}~+YaJ4xNc%gZ^_!~Yi z52-V#zsZ~Uym_zi&cM%4bl!LBP@HEnM{ z^hubnL7#*l@(yiofoKc}((NrT5Xnt=C)>`zv3_L+TfB^)q|w-+j;9S4Vh`W*=*k5=Oe%Q2h@OzaSoe1#@zDztzU$gV8UqrsZWBL2k8N46x)2!R7Rorzp>W{6k zxXwKXvt0=3h|X{I{?kkC|73jN9xu8(@zI2AXQFqFoL}{~p|^k1vf|2_0lbcHIuFio z)yo%pEnAfy>GtliISuq3_)NW!51OuQAH+%Z0CT+c<$M24zJPbGzH{ZF0XJL>)TQ+$ z;aTbz_$ItgPvs||pW$!IKPL22#bsl`zIVu0(2c}N{gC?V^z-ic#|I)0(bxGZoHKP1bx3oNkG|YJwg3ES z(HsH#B)p@(seaL8ZNFjfTzTJ+ZUnEJ`vw2nU;mAdX<*-BFTVU$a^z1HC*2D^Cf{|B zonLVs{&nB*SouvJvR-;BKIELiQQmjL-@ZSRXT|TWw}kXQ`ykKb7kDAv-g^0L_>~*C zerZI{KT-m9NP4Pzi+d~|o4><<_U0pRT{m-aftdSNVf2OGqsHG6bKm5PRbSjzxLWy) zp!`SwDctanoQrnt+_de$g3>WKN}cSJ$|n~eNlgj(!SPuAkZ{z=2EBg$V`o~_KScl4 zC*hp=u6d4hIseVOeRM?rGxcNf0WN{h;1asBya}h`vCf%%p&thiP!Dj<VHSh{L@b^SQ}ll&$2fu9SHh5PuqK01))$m9|0 zBON9lM|n?8AE0+u_#EL$@$8!S67!}1zBIh!*lP#JZ%YmJ$9`70#MBdEVvL?DAG;Uy zUwsGOXP}>{J9U}zZGj$t><-n@onQGFZ;{{PeZ}}D9R|PX`S6zQRXQ99>2l65yhA6D z=bZ<2J^eW1r1zS=C&8~Q&&wD1TRiTQFX%AVYyR2i-(2kW#djHzFH_v{P6ZxI*T7rU zyPdPgUq1BweKSu4>J0W*Kcw%<7xEClk^crij6U+s;!po58RIY6KDbx)y-CTTesulw zcU|>Jm&G?1j5)vZvHG#NLyv;j#dZB}Czmc6();GLX#N9TLtUCa`F~qiRu^&3^s)MH z)@y(HQ@b{;+dAKoghkF7pCev~5AjvdHI@!o+otTu*@3$BxjkQP$)6`%%v{T5i#{s4 z=-O|B?Vs=On>P1+s4ilE^>3Ps!oM#5o^RLTx)w_^qy8-aP2R-c@MHIlt}IT%JMMA3 z?~0#b)B9iM>bCND)GvZ>dT&vGxAVZ4i5JR4d^Y@jWm;eT#)blEQGSEZ@C*NKSotak z)?e}M!4E&wJp56%J>w%jq#qJ*Dc!mHvdW#dMtokJl#i`fT!$a*uX}7xgSo3(B6z@NENb3U!qW2E)Sbms#&wW_3P2JQ`-z|UYh-aQmz4x!2ksqAir*HAu zKlbHqc<0_|{yJTmZ%};DziGYn2XQCu$*i~DbZwD{&x;RqIe8wRH&4v_S@Ch*{^Ile z_9=arUOxBxQ3_f>978*|I>{bAs?RpSnK6~ zfFH~$`~K{heqH{P8JhDXAB#KnOZ3e3cAXP}zFYWL{Do8DGyT$~Hf`;^t;vF@jx%k_ zk!OZC$QpBgg`q*%m4i^ z0sVJoot8UO-b@a>$BNIxJM>@wjsF4egR89<-Z6Ka&)htIIsx4t{)JQNQTp}pdAg_f zV|z8bH)G4{MbW%Lbq0ErJZ~T9>kr+uYH+TC=~4Vu@1`TxJv4V-xrXbbcNElPOBbp1yuVGG3wY5L5So&0Y^sWYeEYGV4sO#}v$wTyJNTyc;w|c?`wJHS ze)Zj%k*`fYZnoq8!FeVeiF{1vHkbIlOY!UP-4nfQgvZih;7R>@`knAM=WJ1-205!_ z><`7?5-F1hX6rLKaNp!j`zsI8?cHPFRUd*U^|6XO_?z`Q5BLTDBwbFvFrSvrr5>=X z$Sq&p(f8cH_jvHJ`m3(YWt1=QEcs2oP{)zq^bg@#`c2J&Q|HBFoin%^?sFdWw>ZDP zD__ue_?7uyt=IlquRaugfb`#u?h2 zn(spAjBn8VK6ui3;49wx!l_SZl-L^i+Tb(y*m`}}e-n3{2Xoc%w`ytow{%WP4*7!M zGdRk3)gkFHr}E!hqfDE8F*>3=RIBVa*LK>M5Ua4H%e}*fWQ(a6(h0nC#eXKQo4=zk zl#Zz1pZ+`Qj;YzdZJ80xp{MWoZ~8^N`)j@QYW_3%jgN^>!GCkk;2rvdbB4dEU%1C` ziN2(U$A^9T^og{nuTKBGeDUj&nMF3%~hkxZw?~Bk0shb;6(^lv^;o_(J`i!;edFJOckpBNZaNIzQ=gA~!B5~_ zM0v>mI=||L@FdQ>}C91#q;Un^8@p(tET2N9_6V#%b5h+8F88^fP$UeY3ybsSqcd{_`#Yoz^^%vZa13z54bO0UahD zKi~oO0q)Bj{zLBZQx^wxaPh%+-3z{zm-9YTy65c|L-RG9UpPwKkuStwxCHO^9txd{ zuL3TiN2wR8&%=}EV)KcKXN#Wi@l$~@CjvS+UBiFFkMRI~K3~mWSNMh}&O|za_0rG0 zyCWaNJKlf8Tlh%b3+Gona~{<7u0Juf;K!@?M*VvHi1M-XV1MZ_-pOszcpQjt) zu_*~TXRPhNCz|8!{OYsp(e;^c4|Y8p=`i?(bH*RlNlUf zJMU7F95MWj{=m0QM=ZIrW!YWxP6zx1@}~J*aEba5zma#u@MHIa&-|TxE8O;U$Nho1 zq0TSdXT9zP|F8E#^;yzsU*GiRlAC6q40XUJeV}{NtLc+BRUK7p z@a8YWzQd=NczEqqkzYf7h`*Zt0H66TeoS9?FW^2pB3;8g^S(7M-v48p)aYGL@yvY_ zf8{rN34Y;S4g7-Ns6_vyr}8fNJ?d+QOWX_btY`aoi@)<)N~BNP-*_J2T~B(SK0rP( z`k8zj#j~fHe7AMnmY_lNA2xM(e{Vp4(Em36oAnDHzaca5&LBK#f34TOxcEZ$hqC0p zDyDzP{%)F7W66TG%LC_EKbg2bctXwbvo2i@)ZeU^zXYGB6Yv!~zwjiz1n;JEIcIPb zTBBNf-p)Ajz4I*vR~Pt_;PS7GjqcbJ@C z|1EwFz4|77ef_*=H@x?gjt4z8SdBd_?{o_X7S^cjB{g&cw+w6`#EOktORw z`@mpv{-(MwY3bH3pZM%gT2Z-$z9U=jWpDh!5fp{Gd;`$$O)! z_1k(RG`EiL6@M!;{QLTatE`OXBbp72tz^Uu;KiG$}IbVPAqc)48z7Fek z9@Kfio7?D6nQiByx<4ExPSRoUmO2TYEBt-tP-sqX_7Aq@UY4FcMyHiGod^8F{)%V% zhrC;|^}&jBE4BYSG>;U1;9tjw=mc~`d=nqyQ?L*GMtnAO0&$05neUaKmi~YT(3SP6 z>BrGWB|hMrc(=SMKB#BGsc@7z^!PD6NvEZ+i@$h){z7rmKFC9Sy7;m6%ExqYIxRlr zoS7p4r}9;p17|*>cUI^R=0`aX^=EY0^h}dnF?C4uC;5NzZn{y9%C{Y__g6~9yT!Ac zr}gi5>;B{r-ht2JaTI?aeNsNYP`by@MYbhJJ{$X>PDbbAKf^DaGrEyFq<*~>^K!iK z^|ljH|B$#2m-w!GOpm(pjmo84&q<4XHsYi?hRGM^?^#lJTeM#FZgog{s&i%^%(J1F z@PW_?+UGgGp?j%=QT-wwM>)UrGrkHwX!FGILi0UqmrPIp^wPNqr`iYeclhPZrJ_gC z5$UPUuedX8%A}!RO*O<$BUL5JawYVgnt!zLWP5Y3Slcdov_%f`eYEx?EL9jHtDu6Ib~H}f+23Dj|%U+a}G zc5YgJ_3RU=f%U3GIuG_?c9GBWpRTboSaMa)DY0&qwc&>hE^n zoHKcdUzz@^A5vXJe$#J?XSK+9srj&^Y%%#5kDXFB*ZYUdrv&Qg`cQJus_=B9L1|Hc zfOCdtiRQX>`+9t0bVN86j`CgkSe(SW&0SQ_dhvXL+9!*hjr_Fo1s)(i z(AOJwYnJx-g-ZdSLdV4Pq95lNfG{890`r+rYrkT29*`0#w!z34M`%J<9OJ{s_W zm}_Yso;(Cc(RXg2II3yMD}6k4E^`DXl+V_Fa=xUqwhZ42BpX@Z?s+hVH zT#XmH7v2*VC(TX5hu|pdb2h=<|4n@pevs$! z0QZmK7-@@D)zM}98+O+8@qKXU=&{SW$`;4|~N=+*ie z)C=iGe7f`!^;mI-PQd>l{+feP^+dhJBa8lvC*de@oqm?T)tKCile5P32Z%fTwE8T) zfALAaV)vYFcqA0h#C7wzd>8)p-@HR=&Of~x@2>dO7ZX2eaVF}QcHi)0_szW!&z^sA z_>qyXT&bHqzqs;{+ct;vUp#;wrJg13*atj|UylB(ZtB0m)ql-xv3uiFnE~BXUnpNO zodAx)3;96e>lb{E<{o@>Ap7;d#r8#bQalqU-HUkMLI;N@?Ss78_olaFh09!yd}98a zca7wEx*Q&B?s$`b<~fQF=C8ln_R&#=n*Qrsp;tQ(;*RywcczX1_1vI=}D^ofZ%9-j}>-f8llcSiR7C%|+q2mp8r3WuAii zytw217C%1w)@)1j#hf#7N1TMK^|$C_rGtwPc#C{NS9Z?Kr5f4ka_;l(&qjSd>hpy= zO?!XD%c;Tsw?>W`(fD*=y?h|@1w2XLp+9t*(Cg0R!-s3O+s=m1Ud}nOMeg)y?k)X+&V?7! z3Gi6&?zqSLu%mfQgCD%D=jx=$H^^74FA2{w*T>vjJ_S6B{`>ALHQq0nGd0pRyk}{y zC0!XlGvA2+m){8Pqet;;l<2?x`(e4}MLJ^ao;$M7yCxxKAH*}~0e|B!Q4jDAh&q{k ztd4_c@%LFT{A+(7*gb8+uwz+bd~JM#IScIW{p9SlXf7aKSsrrV8jKupa$dVEi{kmb zzI^yhe-^xx^^t*F9yo9=+6QqRKjtsNhtvz*3%)CLNb}dDIZu7lW|Ul<64E`@FW^4= ztBw=RZ7$UN=3C!cAL3c^Tl1njRzEduSJX!({>sPhoAWErs~_|K;@x~fIZCV=QSi@{ z(Egf-N7tY~&^`UPF}YXgKh>~hkan=ZvGeuMMDr*0eJ$(${i1T;UkGD8T0Ye4&%{OI zgfvfQgank<&G4Jo` z`>N!O@#(@l&O?P;r{(PTY({`z@OwJH_7{((pSj2U5AnKDj}o1G?|vvP{Bl5vVwEoZ z9lb;9{KD1rGv^GCwGY;-4vA-F>HFXfub#dT&5>0n)6WZ6+XuQ4{??`BBOmU~wK<@t zKAijW6SKyi3F#&JW1U~Rr+5ZG{P5h`wf%c%M(>Ex5#^y0jSlX3`P$@=4}|Usr^;{Q zj`|z?>s@8ITD^rY$ozHvknp7a6?f>V_Mz0RH+*-j+Sv&I@?G`2H|f{L4bDaH+VeT8 z2hip8p}=SSQE)21hV|anVAIM%7d{B#9djD!CG;Kt&Hn1wbI!aM3h&Gudvm^PR$U7J zKK<*>TgM!T>Migj|2qGJ^WYwPj|yJLyRBCrin^)u3!lM#@;tnLuxi(~TQ8jo_{8v7 z?*y4oAYZ_#cp-fqj>6~P>7S!+^I8i+`WaoW=**#y7n^qFJb<9U{Q%x@%~^(wbve(x*umZp~3 zddD>vPK9(M{aO56;*S00)5S0B1HF&mh+ht$_b!+7%O^(P;RC_v-DA33gWnG9YLNj;S_&o~;&*%I5mK-ts!W@?tTl$VaG;vgf&%XWT67mKN1<Ief@H7T48H;Rieb?xRP2yt!)g z?vEY|%vCdInhvhcEAFVb$n%S@{`BQ<=ADb;nfj)<^KtivcYn70WFWt(U*O%o>-@?? zaEZ9?ovZkMUOG(tci|{JfZs^Ia30L}bl-xqf0pa|%cf|(&YAPju;SQ}ySrWt@MH5T z@J;I#AMlG|^9Q`uAbTz=`X=i)Y2>3ph$1a?a!-?-#0@(v|Uf>y?k` zM&CSr{>cujE(Yc~;>UcX{9NLh^C0fHZztcc_}t?!XN~5=#PfOnD05L79?zF$(mQ@6~taZ+s$J@6E~5H{yt~=&T)-0qy85E&73{w z!THtK@#}_all#XuM&}oP&_ARvRNa)1AK#pM=*6ksx95+EzrzcD-M(X;#F)75{X%(D zoy?p__kw+}*ZzaunC;XX1nYdAP(q=C_xJ z+zaQ74+LJvH|a3qq`$#U1*3j%+Q0Js0K&`YeYIZ2HcTq8GxwLq8cf z=gT2cpCukEt~+OVESws}vtLtoEc$2ejCugy zwmvoa7$2fn%bReu^Q-PJt{=Fe^{{7h9SbiWEz~;y>BE8kA@`X6;5;~I^fP&gFOv=< z4>@P_5;7vHqM?nQjxBK<)fr^b7ep8me%$pEgN_Rl;U_*Y*(eqp`hL-AHMw~c9c zEb!mxJ3Gof@@)9inLr(V{rWyTw>_CHdN0X7z?14N=I=Pa@R|I^hi8tg_yAABsp5nG z7ONC2pWStBK!5OEI&GHEmKDFI{OQQYZ-40z&aZn6_uY}_sgpGpCdAAkQfI)sy*Gnr zsbA>7@ZZc4uwFh8^(^<;oXEBn`;04HDJ|lKP5zl9pswd$sPpnG!@t+|e&o}}8`A>3 z1&-odQTO-X+&BMCKcqe%c~gBz-T(2Mem?TS^z_g>6|eMZ@y_Ep(*u4k-=%}=FH~m` z*WEYk<)`&7mvhFi;k$GremUn?JmZJKWA$~=HTdxGH+>S~B;P9?@yW*D-tx!EBSCz> z$e?l$uY2s}l;@e4Z`FZ-i&TJMytS7MQd9w7euZ}c7cLO&#(Yx$vSeHwN?6#X}GU7k1Z74Cx{ zBA@Pi#n07zeQzkPi)Z|De1q~392Gx*$NBZ&=)duK$osth8@&Gg$SJd~|LSDmJm4+x zuX%&|H{ojYAG|ZDjuW4c#j_)QyAQmtOumQ**kAXUF30z34u&~!)=NL5YnVd>m#8z) zVJ?lYdws%3xne<1B zx;cQW-DCA`dB}S0FCOch!Kvm2eRb}-MGJ$S(fla9h2AIr@;RD6$)D=K@f(Sg#|ypm z=9izY3#Yu;y5x*4Sr(aJg1^~cd=p+bZv~HqQ|C+?&}7=r=YvDl2UL4A-LTzYUxojs{-&epBhHHO48_grdkHtyv6!Y<`Z_=aSN%=V52V#HY=ZV2P`o7Ftfgj)!ybwQD zzu5I%ff`SANsfHQNkwy2`=sllVDXsC14`Vp_`mx7^=Hw+=`iXz*AA~Z^U;F;*3oa- zGk5Fpml9%~YgM^5^M{OJ@$QTls6>B|0_e9SLre|@(^tJw5}&yxcEB1=jf`J{fC ztTFl6e0zLT-3c$W5AYcuNc?@1-|}9L4exN^STKL*4`tSO{`Vaa`^%>wAB&UDgSx+Z z3%qk(+XwDX8I-LAc>y?48BKlHe) z^|U_g{aN4F`|}T6>t5G=UFUfm$9bIR73R*Uzu|@60TF-kSb1JN^S%0=($CL&{>cuh zq2EdUO`qk1AI@u3dDW@89fyaNjH8XQ_$!`Gs{h%nlIIjjwCq=GK+pf|iSTZC()X6+XYt{U7}u<5`UcJn&KKHzy+X0DO}k<(ygflaiCq>he^2 zn6E+CkcZ6u5`XdI$#K19uk70ya4I~hzs39=bHCtU{s%b9ee>VEt1O<~d}8_2dt2p1 z@-ZIZUZ|Uj>;9X#Q@HR`-{dyk7vqJ_89#yd4n{w_>h+^Ve+&K+K0I|^=S&}!JYTVU z`L1(HpA7pTAMdesj_bOLx%-h_9O=bz~tr=OjivU<|3-O{5*uRVT8`_bED z^YG|V;*K~eKKO6medycg*mJ25ZHU}sbx3-Yx$g8y^%mbNo_UAKyf*bW-%DTTCf6H^~bFO?qVt*GOOXLNgc2po z|B=kI@4o!${0Y8HdWrc)>YIH0^b+{N+;Mdr^WyoV;1c=Gy};+?P5rUf)mPUo&qqU^ zD7h!pH_g3;&-nZ7uXrX-@@4W}UDah+T<`ei@ZO@n1K-Q%xMxJ0eLeqEG+}2Wkrh2c#IWvDpe9%86PKposu{>{% zmU}D@!PWY(y*sI1C=VsyH|LDbrQe?)TwNNT?6l;9n=XphMf4K+Sp0Q<`E>EZVg-&B zdcD}f`1!-vzLa^!$tZb$>9pR5{{>yS&g4Hf_uh0Q_AV`5PF)X=buZKd?1S^* zoXHo}CpXyJslk${!jgw}%x#w$J7?arRL>G8-8bJ$ua@W4W8uEKDUY3;bL)zr_vy0~ zf6bN9|0WN4FVufCr-A>$dw}jSU0FTAKJb^cs5`E7r*Vg4agzR^Zb}D#aN3!1z5TNy z@eGbqKc*Y0yc^GgQ{ien!27Xq$wwD{G4+?}(kl*lm-wRjc7yOgf#S6tr=K(I^1L5D{_vG8=*Km*N zzi=uXg=d*hpziOz4)s|69r+mkb6@PuN^I%T_3b5!dl~eA77-C-Fl3f_`RQdDD9b;(F$hhi}^Z zOGdon(Ko-W^yr$H{!5>vD|-jTx^z!?T^_P7z1o}+ep+}Pzktu+R6d)c*_kyr_Ba{o zJ8&M%tH87HWBW@laUSR;aH{t&%=g4M<>Ol8inXq>?T>%G;|QP=dAqQ^*7(uVOiY$s}oDR?#hVhQTp=vOZb>--%_K_x9=BBz^Up( z_7^^rhr~1fGyNhXvbS}8?)sD&_SZcY*X1E~J@JhGz`q0Uc>m)3`AgbX`gv2BXG1r- zxccS>{g;0pIS>3s&I3GYE;c;LkH~i=4>@P@CS6(HbT8DMik2N#zuKO(*#5%nKaD#7 z%2v~Jf`6SaNL|GKS{IIzkH6elVQSxQ`4jRy|F1ad9*aB4Jiz{%7i116zG;8G&){8f zxWxWCXX3hfy5`Q{vFfJkPFWAVHSvpsC*r49J-xEkh`Euz1NxbDuhw0`t|+Ekw%!|UpW)}_noOb;vW{yCnCFtB-_wi#RF%s`J9t;v^jgZ{hDV2ibc``qAMiei-N197B0i z{H>qUqS4IaN5cG6^K|(Xl64Jwly&h7_d?#3kKvusvlir^^5Xo+JYDa1w_Ei6^3UpD zn(*J8GxcLU)_bhU^;qwX7oSnJ^o`ZBWBzLW0sQOorZ@?ws>i}n>X7E_@eMk^@+Q0^ zztQ{D=f(924>erT=Zwn}beInh{o1|6_^qLjie9oN@0~~gJXtdAgZ(x4fR4zok(@Vw zZCGe%`Ozoh;S)ZYION(*p$~gO5o*)+bDVz%S_F z;w1kLe+gdL@z1Rl8xA}X`aKE=YT&?~F zKfu5I8sY<7V$R+p=Ug(m%r)r&_c_1Tb$-Q3d6Pf&x7wZC+}bTI=2P(Bmc91T+9nsT zi}`fVI6bFVeiJ9@4|D=?Qa+~3$(#P$!4{nc&N%yUWF9o#$o|3)bPawkb#!&!C8O(Z z$-Ofpa^K8NqRW|+t}YG#+F$x)vabyfxFjm`=apw{j%U5o;HnOJPDJu1d`6$dH_czq z{OQ{9Ip59+K7PJTbFrjl>7@jno9zajMErg3vGc&!Mn6-3D_vp5(y#707_}%le8S3i3MbSr z_*1VPa`5+2W79%iS{@SD-D9}KJ$B#BZKgl)L4W=9duO#Dc09y0@t58w?zk6t_tB^3 zJo0JY^^rKKj(+>w@1FhE^(RB0u>Ex&oHKJG`L3KZxFp$UqaVjRV){_%CC-^T1D%#W zX-==W<6caBdD)X27wm}m6zF~G0elMTn`Jj2t(?28VDJU$qw-#d{mmV5>u&{y9f^zQ z#IF}ylpf{^!)N%hJcP%p7xM4WmHA#*-&3~z#TAc4{RcGtcJY$6k^Qv~cou#vAHxrP zx^z!|@Cr*-d4JVo@hso#JeZ?p{sY`+9;vyk^mTL6`4p@RN9pHv-}pe_ zb$xaI8$DH=lsCi!=@06r^c{Fo9nwDV#q$-zXZoF<{^^Vn6AGY)BJqdMbe3NbjPl`Kq zIeo11JRYF0`N|)P-&Lu>u{gYU@ZfVV)#|=3QfHup!;|umbCzA{i8+7XcPKI^U3{R! z>^Q&slG@|9M)tw?s`JwA@e8;b?z^?opp2VuI1wfHci)?}Y0ru2V}qW`2km>sGxb7o zQh%X%CSRDhvTxwL+SPKmMtA_-(|@xMyS9uf`}@`TLwu;-vU%9-h8iaYuc~_tHz? z)coh(v-kNHc@y#ly#DTxpMQHbe@4`#-;qZ@`uuqC;kn0flzUvSVc}y3+GIxN0-Co% z=i-MkFGzh}9^$v>C!p`B2Z+CX{LY#4iyz-s@~$=SW@m-@p1zmvsqVD2#cg@}*H2A& zPtCmT`~G|)Z}ZM634RUp`);a!|2+!|9t(brd;T~UxBM_Ap})mG=##Jy;;(zG{>D!* zdg_v9RX3&wUy%Ri{OTLmFJc}v-fbWF32wc*?T0H;jz)ZK`a1Y3oL@Sk{pC~m;m5h{ zUzwE^=0A8}ugi{$#$Wj6@#vR{+iok^I4$ghc@^R$ylxIX+y|$+Z}dJoSG@xB(i^8N z4Sr?!f=9@nm>GE!?i(K|ek>m+e=nYu+#hSsp1IigvG}Vlf*1O4e0cH&9}|A;z74ta^%fTt z*bwFg@iB?(<`t`F;aSP|jechSr1u%jol*Da1A%|VbvlgyW?rUsE4Ip6aa)V3Rm=KR0|@GSK#_yL}j z-^}&l-!VsP$D?)I9*D9d`h&cQ$NJvA!^c-#wBuytoQY@fgZdkMc4L>~OG@6D8>>4x zzv?acCjAUvR~Omz>VgS_9z7a-jwSk(n%pk6aH34l-}^j1AUon4#E0nX&V%@XAB$)D z>cmO+!d&4)PcH1RfpUKDeLELf9oCooYU+#bME4#<=I^6f)*3sz%_`G%L z>-0W8JbLP9<@4+q+3ZB9w>ZD@8-0f^=lpsnlg`DDhzIbIIuGJHeO(^1uJ3I=B5$`- z7NUq>L*CSf@?f_Cd24lA628|x_TS_oI12t1cg!1a-`ukJZuPU;M&+4RMkmrefiO!wbKk8|n-ly7oOXv*Xd&d=0uXUoqY- z{>npepYx!eh0lBE>b+~P>3m`8o`6f}4|Fbg=b2yEEHD0K@$lXZoXQW5cjNP=m(Bh0 z!Ql&ne&)X6-Qpy??mT=sZ2gzhYj2PA&(qiCd2vU45O?eYoj_cd=jl7X7eA(Z((UDW zJWHH(-^5?Ka*^hXjtnhyB%(jSsrse81Fb&qUf2icfgjv`YjoNCfrW}6kLX-aEzUdY z-t!71@Yn`T>ZUwb^q=>S7k~fK^rCTAL{A-9Y{#b0-pz@xfAza7w!HsY;zU zUdtzj?uh(1dEUGr^ViiOy$7hDLEJGPJXx=H&g3Ec%WuTb1^>!J@EJa@J|ynA7x)Fe z+Wzv>(yNzjdh5I2i=@Z+g}8%X;DvM;bx1f>{MDC_&yRV%(0R2dAB|hyJ9R*l``(TD zHR!a?89p!1^RLsD*L;^b{j(B96W(caFVws7ZgmFy0?(2+;T?LEx|2K~c+2$0@AO@J z>V3V+!>+&R<>jG{gKvtz{AczTexU!-`*d z>)*%-{&oIT=fS;zOW-s7O`eBS`Bv6XOP%^dp`(FcxX0#I&^`UPzXw&B)qd!S_`*ki z+uEY`!PxiGQ|Xh=uk)aO;he$i{MB>|OIR3jXuXmA)A~z&i@!4!olu(j0pAZuMCAIC+jC zeNxfB6$_luT{=gUHJn&UG53d%llCtXYW07;lkH}A}&xfCY{;Q5dPnF;B zLh-@8Am?}e;Zfs;l{y~!Ed4ig0pUq?k?*e>J!AOn!U?=Zee>aV&plte;n4_w@ZMMa z?&mXqDZl;y*w;+&6Mvlt{7u~9pWHJhQWr5_ z!+aR`_~zGIwrP27YQh{B?_a1F%E$17`7lKq=PmVj**szXgMAQp#9uxT_+j0|8iRIz zb115R<>69!MjeaH_4%{H>MhP$vL2-_{Xh9g z?E{|;-<9~_-9GWmyM5lJ)pv_uz?1kJzkRK*ue(0&s{)DDrE>F~J91TYv{=>l%SRlJ z_#At_lUl9%lgFYjPjs7fOV+9|=hQv6zw{kAitahCX5$(Ec`GyYxvMkqgPXVFU4oK_ zKkYKEWk%$j$wT}a=FWt;bMB*45;-ek>za23|B931nY`(~skiuV_upD+%Adby2cNmP zWB%ENH|(B%W@dWiT~ByNU7D_;Zp!yc?{j|XQSKXF$giPpnw*dM@aSjq8{Q(G?fd=a z3;OKX9k<@{>&yj%_r>PU&^^UT^?CcN{zgADzt4Hl7pgA3>U1AAenH=H9?TO<&KE_S z{y6ijdg+n)!2f&4!T5#qk8F$h?ZqAO*ZHMC;91G}O`J5pk55b=EBt^T%R}&_cm}7c zzll5lx4uksZ{aBO@YK=010w#KSIn2mSL`0E4~b{1uiE_fki?1@KgMJ6A@zXNNq^6s zU+nLw*~KeAx%uSY5YOn!@~l7zCh%kbjUMHD;Z(Q;&$^+^;#!}KON;p0)U!VBo_68mpQnYoKb?ynrOu#!k$f-A z?QoCb68-bZzE?az-3fn#tKDNhN4f@owYuJ?H7i%nJ9b0p&)U(o#GTDI9E-(YIsttL zjxvvlU&HzRpkwy&zBB)bcKv++$KU183Vs57NFC?@^xwp@|G)oDoaC32FWd|L4E9(2 z^^O8REq@<9ioQcXbKl@7dNn?zug-hTGhXgFenZzn33YUHh0V)sSEAeSdpDk{^DbE1 z?b9<-!W>S1Ir+jIWPU`t92}L5ckoTRhIiWNp5lYKkk+!5E+FYo~IjDHf36@Td^ z`U~M5{bcsny};+)WB-j#K;KdK_q}`|$#@6uQzvsC%uS-p!IN;w4b$@PS~mGqz4ylJ z-uddrP0?Mgvv&Vlp;+j<#ar+QG2+Yd(khf1u>TmrQK794ipCkD~p2v?DwXE6d zm5a88_`p}pXYM=<`0j_bBXZ72(AV|Nh&##a;%^6r*X@yhGB<*EoCn{_2dy86PM}}f zy6}4aJFlAlUAucCb@XK12k+30;A-FN9ab7sbL@P`S#nlZ|N6x ze7easYx^FGtjmAqUeHU-ol$qf=ixK>!MjoVH|2SC9PdWK)%xn32YDVJvcK-J|7H$` zcxDdg)fF1o{48I3=u5ILzbCz#Z$&&4ci^2;TZXKA;Oz}jr(F<&wKfM>ZE;)D3B->K%&b`Rg&HZ%Bt^^L>7 z{3YTU9)M@bn|Kyo`R479$Ge6ti}a)O_sR1YzjAoX{Fj!6c|q2dhtx0BH@$}~&-3qy zXXXejpOlk5uhl6ZHX0$#s zxl?wS^UqIfuJASeUtPNH?IV%?!eqRzzDd`Bqx9Y4h2jpMqd5)sL0v?icQ5GR;ts!t z`2=t(ykqX7x~Y1Ad~AQ=5_&3rY)%8b-f;Z;waR2}4ey0I58j1=ANcU-M*bUJjt;{Q z?wnn8>-8VpwEmQTr+KT7A80Z$qDMJr^b+^Y_cqH{Y{|XD^Ci>+=#%DyH_APDaA;;` z1piuBzOWC@FFr5+y2tjx9HLLV)veU=)v2eWbQaU%00%1tn1w!It)JV9-Bi? zw>R(AoPTN6?epS_)Oj3c@Q7OGjmb+;^{lq<)5U3@18s3qKgyzBJu3Y zo6cRZzu3}Pe>cB|dhE|>offU$eIWE-xEK1n#fR6<+w=StCsRXR4^HK?ao^mFp)X!g z<;t%%hdd9T*9PGa2hP3VdllE^WB5#dqia|<*;fJg z;aTDt-lD%yA2xkQoxwiv6&IepCQ+xxv6!#ey)d@}j`F?Y@16#2s{eh@nt<2&jmF&d zbNy~dv!eH_Z8-n=BCCV0As?%^Sl9X0#|l@gOViK92Yk~x^WW4>&9(H7g8emDSbdY; zci)G-8`hb+?^J#B^Zn&7$e+O9HzquFIb>H}O-D7hBy%WT*p^u7A;J(>k_t@Md zczwbnXVuK#y;ws1?XkXf>$LB>E%14B{_R7u4nrqUFT^+P!^y`ojwIF`4Rb@)v-oMn z2Yv4FnK}a>9-akP)9t;t$WLosxH?%cu@B-7odACD-eQaQ->db|mCFLpvabDg&gh=* z1wASm|I*j-EOXW9h|UAvt=?jPt?R#uzuqSsJa*LEpN~Hp__4kYI0`P2H~GY>PS5+! z6}uLP`9|_vvL7*-Ux+*Uhv<{|8(l+Rv-%r-=habF^Cn(C8TwoJGMzK`!ab&Y`fvKa z`0cy*|LEX*FXzVkgv}AaFZg%tZ_b~!cJ(_eC82&Sp5YhjrsiP4CFZBnY4Jin5c>erKp=o~PNHV|i2D!CQQ!~ zAopmPljFa^zs{NZoA^*?*|U3g*54iRwI%zmt{B*`+_MXIhjq=pg(u-N{xdjL-?(}J zoz{EUOSVp}oAp#?m|w!D0PoPx{5Lqt`PGL)NA!-Ed2RB%{vmx|_=Wu~d-c6*oBxp~ z5$cewKX`Q3WgEkL?qySsUGd|KyTbgPKf2^^aOCKz_wveA>vGMdGa~wq^FZH8?mOVa z<1=q~%XhW&pF9wIpFv$jd~lEHw8{D}yyL&&E$=i~k(RS)TRfrX$&*zlWdwgJJ<9pz zC$O&mAwK?OKbJnAj5y`SZQC~mKJOlj>-@CNnfeeO%ZJD3$lvE(d-V%A%KqYS@Pm6n z2bYh_Z`l!y)V>{`PapDI7<8t^YH$BE#8{vXw2V-4}ICRyPy|J!{X)-Cnfs6l&QPYHYzkEPp-50A|MsMDyoj>T8K^UZA+R+}5i7vh6^VO{lD zxWs?MTi{=Def+rTH@BU6%J2AV$rp;fvo}^}@E$gwqc|yF=pTan=$_{KxW{lGUMQZ` zzu?(n1#USSS=aaGdwju=150vlb&ki@ zbuWHg+GOOfAMcLM_0i93F01)4;*R{rCx*w`hyS*3T;5ba=J%x2;sJETgR9zRzR`SM z{M?3lx0M^VA=HQDH@++UIQe_^AHxp;pPk0P=I_8!@FcxjJhQHPtU9E7tZz{rr`g48 z7CbV3T||HIy?ldwAmTcH{Ah)4CrjR0Fz8(P#SeAwTYLD$wCK9N>n8p^=xBIXLmiS1 zgC8F$|HJ45Gg5;;RUT@b+iT<42TCX0H$JAbhBewetU!p4^IgHe{D?31{G|KHKXwJbr*-+yoHKczuMH32*MOt&7WHoP zQ^g%TOWny_K=n;?JM;m<58{KmfBBxh`(50LD+uT#{R-eWX za1@`p`IB^7=L`=Jcj(pdgL?s2!w*jm{_Fii9X7_@>W*qRsr8}YV^TMTCyU&0Py4Hz zrbqBP->bQyzSsWpng5*D;j<}qHpJ%S$QN|aJ#UP^W6idVK6oQFMsEu8$xji1KW)emV72A>UFA`jsK;-vS<++*>~ebe`K z|4*Yn{p8i{F}=jP@Q!+xb)7RlU3@8zd=I_w$pDFi#oxE!fhW>7G$GUXd-_4USNB&Z z<2SMo-rrPzqo0{a3a|5fn)h0|Wb65*w@!`WYWD)~razdkfoJg(@QHbE(H!s3ZavX- z-1S@j)xU|q`CjK2e!#QjdHlj0&O*!2_;KnDdqRE_f9j9~-dGmq_gPnb5ZB!|dES4s5B?kfuQ~Loy&GQC<4|_Y zXT#U#{3hdS>n7)UIMuu0cXY`-@9Kxs{*~wPLb@`%!&jjnUHs*rG`CKluso#B>pc{D zRC1j+`1_uiS@q3w%fq|B=N2BbEalG?kvQpInBR9-!(a23`gMPpXD%PV()OGS@1A`m zmWPu2sQ8t=vm(E#x8N7{SKQ$@Qa7d3nqU3cvefTyc|JXw_U0|wO)p&?d=>nj?y>kl z|DEvo!iBHCkQ3`WFqaiB;g_S+;`6@OeVbG4*-f|KnlAyLIlpv7{UUTD`1kW@aOnZ< z)`oec__2GjXLfKOLEgLl+T;Z%9Zyg}bPb;4cMx-VT5-i@Lw z)BD^v@mC#3JV>|S?kOMBVfgU)D&$S~!n$-?IsqQb7X+t%b<!=YQS5aN?BPQbHf=Gdb^c{HS{2gglR5=%b<&xX1n* z{HyLiZ09GR55DkV#IM2Mr%r}ng#7kizY9y>y*c*Z;0Jmi-xdFkxq#xN^J{+>-7~06 z=Zqr3r{H_}w$)Afqx8q>C({=yp22JjJYQ>_zCbrxI~_}4{&wSe9wHj@$-EV{~2Ee{A&bk5ZM;cD>#@5blF zGdR_{d`$2e-BTUi_qMKHrDV6PzeB$?oT`uN%hi)s#I<(DEvGH$_V%RQ_=Q_0KDv0! z;)uTE{OXf{cm8Vm*voT%I1>EI>NxtH#y?J_JXpzm>_;=b=^G4n@@_F1@ngk2x{_FZ>|xz$N136M5#fDRgd1g8$5% z9Qp(QI-LLyp#M6*;+gpH;ENwVwrtd_82)w6^oytm&pZ~BNfk*?=d~qdB`)g#?!Z7vN_DN;ltB^ zyyn8Rv)&w&9>b~loBL*eonQQot^xPa?d47L&)^++Qa+aF)wAG9{|!HeOUzaCUY+@# z?i-!U{;D&0_W@3&8_CCf3KzGon!iE3CA@n^=YspJ>pak_&CU0{aH@B7_l|0xsJi*j z@QxUrzgw_07AKgwK`3qSaC&%`Yw!n(59K}dpwpOoc;j+Ci_<8 zi{!c!A1VC|u9n~E5AvJyYhFBm3EfkEqa&&p`d)gUd*QzQUSRfZC%PStonP@+AFKT4 zzKOr|N#_g?*!_L!>wENA8}UETtIeyx=ivvu1@G1;foH+L=0ws<b}`udH&r$3Jqy|>fQ2H4J%El++{mN#)CWmf$&`sPg}$20dh%>TXMh0DwKSsdfXyDu(OC5tL05`5)a^0piipvI%noj!VmJMb?I{S-vzziE7|Uu zong+1ec)SxALu)D@a!SoulW4aQ+~Dh(yv!HTN>Vz`1`?o$|t6-5AR075AKEf5WlB7 zy1E`dw0Fg|S6zMSfe8LJ7rWbC{hM~1u{qo~=M3)S?{m)h`LUJ|JY;{}ivtg=@7VW({+s=UtKn37s=iyg zCtp0DxjriS&ARj*`yj5Xqgz)!3tsPCv*(0T1-Hc`U;EFp&H1t;_gH@O-{|(vuesya zHRs9s#V_0oJ_SA|=hrz?&ziNq!Uv6>JrU*&>cietpw@Q}ez-33o&FI0 z)i-bb_SlwKJmbsMZwja4^Wr)_j}PG&{u>?(r>b}JiQ)5n+pAif{dHQL8T_gAU;oX# zL4B-zHsbI1=S^K2pItD)7f+W{XK)_=Slj2^MO%v`;6DAw_`Lds^NY{Jlk;ngF8_6< z!}0VVru444b6%*=oBs<(;Q{Jd&ae2uUyWyZ$Gp^{r!GCe&c+Bn!&~Sulj?sqtK>OF z67rk6i1$MI&+ucq9RHcR$c2}Vob!0U0tx3gS%=}*&@T;Fi)SasJo9SBs}Dr{QSdJx zG#tej|mB*j+Yw1(-m+=evSii^(eKK}+`2Aq;8_CCXdpKE`NT@Js1fB6Zl%lGO$@Y%o*;+g&Ret_@A3zPSkf5-XNXUXSC2Y1fQGpGO3HRyfr zvG`z~0{^6VraojIw7yVr$A2>iLp;;Zt6m8A(NpOJ@`d-M)raU&`eWrG`nq}momL-; z`Wt_$I=cV1rR}Zn_u7#!fd{z9)|EHi3w^@&LEO1~W&Q#SA3q%Wo$RlA3%@cQT%N!6 z`Umf8S!rx6o~es?4;z1jAL!unJpLyBIuHD`d_m3wej#s)XLt+V?Y~)X7aQyw0b9x6rH2#fJOT{rM;H3w_whx|}&9;sZYKeG&88%=g5P>B{=aoL~7(Jp1CR z-HRJF+#BJWb9Q`{+W3<_;XO6+jK6P7vz?`Sowq0W9Mw1F3-}k$vJd)%-8b`VQ#p$>*2u zsZUsas6*#IH?*m+KKK;qwCeuwL+X1QZvOe@vjPv0H`QCjGdit0gLxq4`iMLDeDe9# zuLnQKZ{9;u4-g;z96D}&?d4123QHc^F}Gc2&|&a-^;kNWxb9w<7w?>@i?qtRd}H4D z{pgdDlh5k%RC?qc5bwmpllrsp7P>~rLp6SEU$^Pzhz^4n;+yg@oj`n0*OTYn3q01G zJ^8{rSK^Mk2%bgvgxBF;^I@#({L)kP0mgl1EIqp5NDQyTXZVFUN#Bu=`8Cwh#WVdE z)yj^%Xn1Ny%r6)8otcmBs&whxHuz!uHr4B6-NY(9a;Axo`Htd60+b z1pXTypr4HYLA}Mj(4XZV`(E*vuj1Qo<9eU@QQp8?*H+$i)7L+g@!P3)E$JoVnf&HFz$N~hcX!+ib!l+3q=pa>RIv|p5?xogY3N-{S5e<|7L&PH~M7J z?97@Qdz=jOK%BFmOMF!9P|dj!ejy(x>(%asb?IDqEZtMS@ZZD-z92q4`>XF@Wa6*& zM=Iw_;92hReHnRHzn79f%oD>~E*#l((Y$^;qjgWLKl85pQfKh*&{OdtehqP*FVnkR z_BZHUC(BlP?Y=poPfa~mJi{-N`SJ10or5P0?;AV6bVRe75R@`ZPx=~2!D-{Ai%pSg3UP9~nIlj%3bH-Fx^>as4+WW@Yu z=JDeJ_z=BMJ-|J7-;(zs=i+m(_<7{cxX1F5^$$IpGDDw)ys7_9e&bt#C*^s1pSqs@ zV}3+)-QhF!dHf=|PIgJJs#iYvOJ*c*>K9R;_uhd#B)_S@!8_`O@FZPXeUtuRJ{LYj zkHQPp-^}%aqxjnB55Kj4bK~U;4~4q_f!n+P^w-+V*!wu%ZZf@Q%8Z`8(pT_v*Y)21hw( z>b$EiFMjigo~t7|qWA!><3sWgJZT=&-;0Vb>RUD~%vGa5h&#?NyhBI)s?D*7+Ri`m zFMq1{gnutl=JRWNAC1HZd=p-$KZrZ-amDWCyUr-2{wI=-`OXt&(352a4eLB4Zo zqf&=9WJKmPIA`*keXy?h0Iv^if3nE2))~R4>pYkn+9QACGsi!_EBHw1>+p{MX5G8* zn6miB?RgW+Cf049D6=O1>9_Pzoo^}~>Tlj#RF8F!@lAN0E{AX02lW>G%|7^E^AW`z zy1jgCANb7egFOGvj=mKqynZB7m-gPl>PA~y5ALucg3shl{LQ@(cj$=S_W!Y@=ua7u zcY^2|=UtW2t;)g^(T`ic7c04a?QJfeot_hU2U;G&Ti`zRmgKsr|3(Lg&&)v{zVyMiN9%10^&vdK zIg^i_Gx#jz<7N*uzjnip;IrZ1!H?lSd5GWBe>49~os9kqN2&A5Z+i!Pcz^Gs839M( z0pdCy&}hH|AEkbCB7*zi61We)FmDi^RCl63@Dr%N*$4Uf^_{<8(W_K$^i=64gEkdf z9?|X1aS?a;C;d16RQ$quz`N7W&wBpJ4yg(CA$gwP{@Ni2e;+kAE$FH8g?L6sbdU8} z@}JoUdY`@-I+wbs{05h}$LeIxFP!QfCi%kr-~PwTr+r)XXz0U+f7Ju%edd|-%jrj# zkL~ZeRV@lXQF?PUt8l^DgAU9H^BnuV{ovJgrxi}nX`M555&f}+-YY-2-47c>9UV>; zAIux1Bf<~%SN$USJgluhVRrGGQWE^i{2J!a_d9fO)38f-hj&2SH{VOQ_r3D5d%+Kb zZ#oZn0RATaF6cME^2V1^6YwOQN|zHK=!oXlnd?K3GN%lFaNp!Nbq0Nw-kb4$i@0Nd z&1s+`%E#(@@(>(lZXJ9^Pp$abgBPq#Kh^iUbMZ|buFj0aU--d2=I;~FlKBN(f?xPv z^R~NQc}?p}%AM*{p7ZnaPi|ios|V5+@9>G4|DYc0oi_f5Z-$Qj@%+yZ zM0~HkJGZU*=ES@S{cqx#_blaO|IIy?=joI7S01vz;;;BXpA>hz+xJV6PVG~lKN9d6 zeVs3p@74Lmv)~fCy|^w9c^A<+vo8FA2PD@;tjiCM2bj~qH^>*n4{jgCGx1kk=QGD! z)Gzqjmc91T+9nsT3-L^S6L0a~%oR?4Ph!K-`rFpNeLUh@@xHV^;bdJ-+`(JuQSjMM zOKOaHHL*1CP2a2UAXz`dvy$^MKBPXRUg#dHUwrn};^{3e%89Do*soB_N47`wUvZsI zD{qPqz8BvV&-fAb2fz>dsN^BKr~f9Nxo_gxPsexlI8pIHOfSLbuf6w?hZdc4Jcg^~ zP4P_J5!dN3vj(my`@*xUVs%sb4Zk>W|C;mvv-?oUL;O+J{eQ)?=*srNefy|pzSNJa zO$xllJvQH-ufjX=cdpFMyyx;lA#XZo?i;<2e@C2DKjugDeiomCc-H0Ha~gek-_qE+ z;)DJR_(4B9y#9N;%Z8N5{4Q?UxX*yw7N^dDqtqFORqTAz-7E7Xz{AZWGI_}K}w{8vi0gp9r#r!^e2#*!dj<4=e{GP&jLq9LvhhLcI zsE)25Qr(%sB^-cS0UGag=<-Yk|I92@ZarlW#_gr)=HrLX9yZUg}?wrwSLFb~c zC)bC(tHC#@@Ak%rs~6ds@2@alP8||{po7C_@&)~jPOEQGJ~p>Q{g`hXKZbvkmAAaGS z(Gjg{f9a{}S?28F0p=d`Z}-lUJI>u3(|_?S@l1aJe5QZX`?2D>dZF*-TfyJtdHOGY zEbhS7{He~v?1!%!yydgp==i~}zD~XHM0j_HZ_v7Y=FTskB|cbJ9TM)-FOpp-W5UZL zvO_&qexvuur3`De}f;)aq-{i1nN%o9esf60nRVqApJp}cYf(--YHf;_O6C{ z%jJy@ru5i+BKWSBRw%Y|cjqz*=fOLq=F_ToKQU|EC*!A`AAC&y8+}K;#r$gg4gS61 z-no_c*W3~LUUT5^3-Om;EpPg7-a~;)+_&$tM=Th;;`l%B!VIocE$ySNvHgWh@L2jU z+~@sy@yy&A`(TbNpDuk;To-q~uK3lrZQkD$!6oKFtNVZX>*(*_9(O#-cq0Dlx5<-dtL_z+!=uYzu5ZnL>F^b)=mcn5#;zLWbVzgf3w zwKDP5RS(Dd{l#DYN%dIg3{LgFp7@|1d!T69-Gwj9isE;_I`H?Z6@kanjp(#^A%1aJ zh4bEdq)ff{EVj(**L)=PQ1rbT?Z;*Rs+UckTNWS;eB zcE9M*?uh;?p5d`Y*0k?c`qq^Ze8#^nZ*F_FUzN-$$D<2#O5JhqO?ko`Z#dO^RO+#O zj_?k=j$fz;EG##C-Q~}mk*Ik7z{WpT`YkqZ#k%_^yx(NUz}=y*V|Mz%iyFL|6Pq*5 z*JfV4do1qM>2U8owSOv-pjX4c^i=*Ge%jX`Z}8lNl52ur15RyU;(@1r-?<`!qtu-q zI~YCLr%Y;s&yl}RT@Qato<|Dr&=H*n_`!XHqr^!*kkm7*sv*eb7T7ePrAqQ5MSG+SA8()%3Jowc(=K?d-7bj zZc6{0fOo`oI*fJs3G9Qp9efq?n?4_Wla8p4BOmKOc7E08od^0E-QK)RK0NOPi97D` zLmeLI)#ja)g#3ojKXyai?Q>WE6q|1(zvVdPEkw8=ws zBmMs7gVRgkRJr=sdM}+s$Dvt3E3I0er>Q zl{e)feA77-&+u+}p07BwM4_4gnRX=13*xik+ZNB@YW-R6h5SZ8^S;QGzaDx2r|b3y z-P8HS-|$%RjP9w<>%OTM>Mv}TdEx%``}T%<0RHB`IS>9DT&=$a&%)>Rp|rmtPsXQ} z_J;XU{3UpcKJ56ZAL`9*SsV3uUSEkIsH_dsH-|%_sn)9TOb=R6bCEh(!G$F35zwyi2U%HX+y?fHQ$$u2x7JOIU zFZ`e0H-#sY@dJH_o=VrC(>A{E`E%YblO5()!_|0;cdpC@6nE5znk^an?AEI;2)-5g z!8^M0oA(*ikJUHTdG&>glYAhhPWyNGY0sE+;-wG!?)n#h6Mx-f=S(~^2SZ#pSIxYu zoFZk0R3EiF%$IYIodJ0h9`SlK7GF~?~wA9oAsn=bb z8|G5+YryOH{A;^6C8j@kG@?i83w0jU8O*bJXj{+SyB_*J)Y0J*ct_ui?-d{JZ2raJ zmrDH=(M#y5bVU9>{igg}&I5mm{uXgve4x|9>uWnbdq$)3`{JI%$A0ts%X@;}2dCo4 z&YAO|9-Ewp#9z9-zAt$Q@0QfHxGvXx8%7U%Uf3+GqdH^h|f`+^xu3heI1`y zkLBZsQ}KB`K>vKdo7=A%**ZIhqw43hXf(6KXJp4dkR~Ny%;Xd&UPHjK3dzG}E+k^gKAEs8AdMInm z_9#8I)Ut*rmWTOg>TlwiJY;|UxA*>fa==rS^M-zD``})vL%PSw@i#f{z^QPb`KjV0 zzUlpAbyNGx@5$E&|H6}WIs2=xuH5vVLl#s?kIFpt#PYl)Gb6qr_?Nz;9zZX_yWunM z8tGe9XLw+6xk5GX-WU42#dURFxLSTQr`LQ>`We2dAHCzT!J}Hv*%s!wxEJCi{Y+ff zM+HZTJM=R+O1{A7;T`=V;<|I@za{T4oi=moS)H%SITq0$tcw@&fzZLdD+l-SftY)s zjsve}*JwI&%?+EQLT!ruF*-X>!rTt}gYzpNi@)kEd`$Xd$FA7B{kMHPL)@WPi|hI% z?1Ook>Mi;=y-N$9sTZ0XDsS2cxLUrT@AzKx*UddJ*OE`nyg~QvuANK1sZb&(wyt|2 zA754A+mA;-Rya}fXuF4RZkrjYo0`vs7uI^S!B-JIQ2fx>3&mj()=iRU3}n&F(-##!<;952POLae)59nGD05;ehjbk_o>vD$qf8kaBVQT`jfnto=!eT84nO;6pl zG3W&N8(rD?m51C5{vGqKoHPE(TRv#q?yb9y#e9z9uefvM+uY@6-?1mgW5pdlQhEve zz!$IH!UrPHtG9>`a0z|q(K(NQ(Wt=T$UTPF$1gnpo%t_*6J1mJg_2E+YzloHe3^7w z@xi*zuRL#Ex~KVE_x)X?W0QA|1{}q|L!Wey;WOWRXV$6`6EgOOKGtzB-j?=arBieC zdzV}@sLG!3y?hn;v3$IE>SIfL>?@kkCn5gIL-4PO?;E$h^zx8?Vhj^_-N1dm%p1*Tr>t6RxIv7Juu`4-5VI zQ`E4+%T3P9-52yI^MCp9;4^WZzD_4_&crkGOS)B@-R)4yl2A7lfBB8X$xpgO?Xv4; zhk0WB_Vz&?r{KBkUwG)}%;@SS179Bc)Y9-iueu(biU;6B_JI!%POWvtC-450c~-)F zg7+sE?fc~1!?8J9cmRDJf5SJ`Tl87d?dckHd;FMgFK_y9aFlZfr#5JO-4kse`X@f9 zJHZdVzG{^AOM$fTewO=YADoBPS8aZKNMc3oUKDA*=*ZAQN5VTR=F_T^nIj<2)0M4j zAL#b#0p3}Gf9V9D5C1Z!`Wstfyjxv_{=mP3$BMu3!$oJ0eQ3aK|I8J(u6|zceW|}W z5Ad(~QFLYB%U@z%kU2!&i8ogbE>RDV-{`6LEq?rxiVd=&`!CJ6C2o)v)1$m=~GFH6N-E`Ze~Q^k&ofYsx|(6B`n=DT;K;ffq9PEv*vu-bmGZ4 z#GUNzW!Dux7CR5>(!Lk|wGViqcO1ujRQG|_WfzCIZXe)k_(8s)SL+kjKjgou`;Th) zN0kxJ9S`qO@zdI0bN;;_tDl!{&v!*fT=v_*S1RAJKQ`yzK5RPHV8)u~&QDkuJ{$Rb z_RZ(Ll@^&xWnDPcK6syOz<>t}pHp#Bs9*SR@R>Mif8jIl&EOaKCVeukRPSjG^6U<} z5#3(g5zp{r^?5p%{$sdWo=-l%;-vp3Ux>fr!`;_j`qtSk^C$QT`0(&~@A>doi#vF% z`{w-s`#>jXcC6CvWq!IiA#dvIaDL?>eI4#Gedo4`ZI?ZISD`R}hmXnp9r0Jaut3#v z`xi2UBdIluPT_g22*_9aCxJ0~%7@TSIDCsJnMZ|XzxkoVDF?X$k!n@zqAx{*9D4_Vju z>JNY?;RpMmF0F5d4sO1(JcJkeZ{isq>-^H~FJH6dqAG99jQN6`GyNj=K_21r8hnfU9znUCn4sn7df?_az+YSM>)tvndPzs`g6>%Q3sI>DYi z4ey)vVRn@4Bh`mJt8MFD$7ZKRe1qzG;#ta}cZ*(A1qGkv$-hr|ohe>>}zx*2FSu)=79+h*}W7=Ea z{m|e3HiTFL$H`N)O-|D@ly>Un0 zJc$#%$KTPs?cw0lb+%qH@Bn^|X=R3dR^{c-f^I}dR5zvf z(I4dGhAjs0wPj9)kp;v`)C)=y7Q`KfVALOoV|*n9e282ay}bHA%u z_L8lq-aD9hRra7&;l5c{zMvBfY@M4^rrW0Yo#J1%ZdW%?g73=yS{FYqJ-Pdh7eBN) z__pag;w1mCx=1oUqesDKbkElxzq#?-op%SlTE6h#;K^j(jc>w}`kM7g@O#o9+BrgPa}J`nXy z@d1yeraZ-lN2Sg?`Q0Opch_7Idsjn$3tZy9!PV+- z_JO|ykEI*&iJ5Du9xFaLzwm=THF1YdYaj5IhPQlIJO9Z8F`pw}e8rh{Z`_tIJLcE0 zzxKg>YyV-+?1R&eg?o$_@*}pYGPdu^>$U}4;=alA^3c*2x8?0$KQ;7Um@DCXwTi3HwV2m*3oDxLSP(-r;-ou90== zsc^OaSnH}E)6WjBYMc2+^Lg=e8|K|sZrFz4=i(=DkNJXpuevmTp*}?4k%zLc>@umu z-)W)W-+6G~;M8LGe^_K@THe6B{WpD~`WA~!dU*PpaoO?vXP3M&qw%g-9g;4`?hz4OV?@Gb<~02YkR-eW7kbO)fbPqTywf^TIThP-T6+v(+)@Bh2}IQ z<0$?!`U74_UmsPYfqBd2WKw=5R{Ei3?xd7~^m7uev{+ z>iv0f68=5(`HI`Vza%Sm&g=s|k9YfC^LJ|9{d1l9XXb==Q{g^7ChxqOQzpNui_oj# z9k?&!`m@JA+bU&-xQ<`&fr#sL4LX531Dxu;MLMFnN%|J)_HdMZT;zvy|ETa~{)9OV z@`nu0oM0_CPBp-ire&fS~tN$MG z*DcrIUNB+amG_T-Et$UarklHjK34exFBI3EGk8ay=L@=LQvC`|tL=`4mu%Cg!nBlx z`2^mn;B%Dc&9e!7=)HaymcDy)s6%>J1Ky$2n)^jZ`TbN1vP zKiod_m5uSi7M%vpIQwvTZ~T^S*Niwk=um_on-?!W$j4P$G#|3zx|D=Iwf^g$$X-|{ zC)Tg$d+BoGr1*RP)wd0)a%4fwr~6sybXnS0^C>ED#+`P%SJ zdWk+(=QsbjS)KcK$_n+j61iV~Jz{%K1oy!u&Y8NNxC8g;ThyO*)jQwZc44)-0k6w% zcr0DRyCv?iIzz1|y4Qc=@VWmwzi=vFraVvIu@BbegVrz2PoNHYXOWNh|2RB7F48z} zslUtSNyu;J6X@^Of1w@=KWw|MQ_EGk2csXCHW~Tr$GgM3)%^CCoxi2bX9fO@#2t7C zKjwp`Bhm@*tmOL8bGKgBYHamg!3RRukmvC%|4qJ-hva!Wfw>3zX39U-wbRFy(nDQZ z9->FlAL!uT(G}P210KLnKtJ<7ne)rHqK+=E!%^^#`j9?%>+0(;zt6hj4xbqO;9hjB zc-@_|=WmJK3%symtDF_Ll{p%FcYW2Wx8M8o(wPx|$;i$Z|8e`f8IkX8*{|4up8wes z>Gy|!#dYgC4~_Q?{-DkN%y{+3TY6_sITq_navsDT{r>7L;eFnp4zD}A@%s4Jr$2i2 zm8S|OW>u+r_B~IY@`ZkX+ut`lH$Uhw@(`U1KgQqGv-pbX>&bkm`kwb1-ZO1~(EH#A z_}99=7d~?y_-ybC?`NIeXycFdHf4qRlYA@sP35;;TSk@r{p$P)=aa{yz0Xhx~_XFAj6y;B`Lesx_9rbxFRRLEmwY)wAFd{z>`R zyCw8rc}P8W`r3;!+m8G_(&t0>7M*@;;(yQALxChUfuo3S<@E9T{^F>QT*OK zGxVW|lj4K-0Qs&?_Nn*rZF5(~=0U6T!VjO7D1CI(k##YCA;009_7~ovgYz-z*W-Wi z9?Ivp_TE$L&GgV8Yc7g&Mt|U6Kj)RjiL(afPb|1(Z})Ne^CkE)z2`prml2I$o_jLj z67x&cdEs^Zf{$PP#aq^WeQwPMo=S_vGw*WgyLHas68EibpMKBPoK_&=UWhyL+qiDE zsy=aNP6VHsBfu}mPwRW>Ms-i#KK$BK^LgMXbKU8*pD#!+K6KrR&{t=kf9YdJ+BxqlsDlAxKEsPFT{sUU1mSKcjk$pKZrZ{1wB=K zpd-T7{u^C`kLi&UQy)q%l^*Kz>gag4{Dv307ys>CR(=h6UYz`I`}MpJsV^z-#GyYm zo{$^MZ_a~xnQ-+t86Wn0>Xw{PcjAYU=jCI0lO9EKG?Cx z=J@PpAGe$KQc6Ny#JYHkdtra86J9~^Jtm!rd&SL`0k7wVgM0KdKZg?BahD%7P{4$Ap+`SQQR`wZ#~_JJQp-wfS| zZ(F~U^Xp#lYv6_WCcm6@onQPSU(v%0RxT}^(5GhILk|`Hbx89yk#*^E&V#y${dLaN zrM;uzJkV+7p+SS+n!a=U@$e3aIy!%~y1#l0y+mIU{Q>XpyYt-1IR#S^)fSb=`g~bh zWFDUV70={({$KGoIZonlbXxCJ(2ew?>laZMF<(P|Qzzr&r)zlENMAnvOgs~Jd@p?F zUOdqA+&txPI5n^Mf)_3?+h=iXUIiY2XUT8$DD`gfSDd61sLy-%0Y0O9-ZW_QGr!l! z2{;ulp~Hv|_LsjTd4J_0Jd6I|zNr`T6Ual?7hTlxs_jJ+`mp({%}<5boB!DN*`+TW zip?)EUjrXfZ;|K4U${EE(i3z3Jf-{Mh4e{rGWp%q9bHS@*?hyX7>0J8#-D7@F?{(ZizyF)9wr7Mn7~)RyKF|r^U+0XT>U+(nZ84_%HJ9}})i-mx z9}=I(FB(@GK6HJj6OnnvbVT@U{o-+Ff1G3u~0m3iUU69!|Bt{AYN# zJS0x?DbTBPTYkLjywpPhuiFQBovxuib+{4L!6}V(B-@zYhIi9OE;qby2o@+ zIMw&6ck5&IewMna{6_DipFMqZo!Mumrp4ledm+!$f6Yn1xb>uuSDb%1_*U>^`{11U zZ}O(N!+*vfCBJ?8a9WZ6PaF(B5cL3f2cM_YiVyRiZhP6~y?4g&x;YK};6FUMZ-3kO za)Pf7?{2fA_#MAh%9n7?+zacfJIR~=8$Z|oG_Otk6(_|VJWJkGFI3;uKP1o7cj!@c z7~cy&={s4F#PSpp9Z_@kVGj$xiP``-pg-h-l{rO|> zzq%)O9=tbWZj!tyJ~(IgSA8Cyl!wf%laJ*geU`pgUo(FE|0*BTVZ;aLflmQ1?3qZ< zD0F9mM3EQHN-KUKH~O@HsgjkmcZ5E7@fYt_mxgzo-%o!ox@zeiC&POZ>TfSSS-*Xi ztN)73ThVu5UG-S&(v7U!bM2qif7-V+%!h%a^a-2m4*$X>dhE6ps`ib|RijU;J2?;d5PSwt%A0Vt zJ|A;^bG`!BKoRaO!_MZyVmZ;Hz)m`t7kTvARgn ze9wHj@$-EV9zfsG7fQFMD<{vZ;L}~XuyDoCK0O(!;v~Ey zZzjhb^D^-v^}kHd_Qeq z&i>4-2;QMj%A4>n-u+1Hn;ZW*XnJhU2;GQZSw0pYoCo<$T(>TtIovm(_u!6QKR@*z zcAtC3URoe4;!~g_if7KR`=&l*f91DV-g&KD{rv0V?|&>;qtd#Jz=z<}WIf7%bKh>> z{&>7=*s{ob9qKsx_4I|}oBB}TYUfwpR5ukT{WtTj^hsPfX-=`O&jVe~ zy5fU6I(%lXggFf_JW#TK-Kskxbx61m-r?hyH!F`n=hxDw=4px#?i;)#Z|VcIzv84i zB%UP?iNEf#`Et&K`v#}tE#^{L*L)4{{)#)^JHWg7`~K+N=(pKDR|XynKfvquLHu=o zSAQ_~jvd$TiRmRbW;J_x#j^7f8>Za5e(57Qk@>Xh7v>weZ*(K^OuvY@V_m#Zz9{(3 zh@Y!>TNwJ(;0HVle&A#Bz3ScO9#}Vjvlc~e%fCM|Uk-o6v*306AnwS==3u~4ty`ae z@0Y9dC%hA{9-uxiuG2N-p-I`F^uO_nfAX>SH)sDCzcKPaRzyG3A28sZXCAyhb#s^( zZ-0~ZYC4yFFgHoP8;-(ngTJ`C(WPqZYm$1_m9Vnzp{J1>W3n8spwq% zXX>WO@z;9?xBOLm(e`QSF+Z($`yT6Cw@&-6+k&nv{>nqn@7O+;^7e2^#PjS_g2B*XIE^xIfA435yc(&!FdSZdv5k?i&pQB;Z$`p^}++~{h68)y^OrCY?#dEzDFV6_JeOPl*mr>_K@mQTqKZE^b%}-pBuyOR^XnvIb z7V%hp6Yj&G(Pi4tOV?%&XPHB@itJdB8m^hO=Kht_&xi6&{=q$b^l1H3b(-dj`mpiU z=3>L^V|yi*cl9`mA}PXtcM_ylwBo z_p%518#{w1%{RgmsK-7%yxG@#*PRQTt3NO-#&ag%8%u-?BVU} z$|MDo_eS%hX3NE_&y2v%srSx4_7-^|K^>C=FF|sd|lM%F28_x#EZUNzHi&J(-krM zyMInQar5_2R*ISH<9;Vp&e`#YD~<)P7N3{;)%(?A>Ns@J@pguXVQ2Uqd5b(ipW6R! z?k)Ta_t6#WXIPrAZc?thi$?o8=n?Zx%V^nm^VTR{G^lV#nImV<2l6+42keahrBB7f z(5>hLys>Gep;u1Z9O);;d46YUk?wn&-jg$??sP68`B1mtF73%!T&r^9J_p10o%_62 zze#$e?=xq_yw_>JRk->6k;g(hUHO~;=Dqn|^+ItI{^fU;&dvS8#BC=-`30SVe#o2O zO6xPcz{Su!8+@VxuULHJ zAHEsZsowk|XQMe7ctmxw`>Nlv{IPb&qq#}(_(A;*juMCZEa;drGlyQkUb~)ybJTn3-*Y(S0qmj0^eJr% z4k{E=XUN>!?7PMH=Z(R?{0{!t{B`{;co^?k-xpqvUV}ZD@2O4(pXs|bC*9md`bj!| zdLwmS@mQVLUM}bIXZnZeqr^@88Qw^q#UA(ve2IMMVzt`Y7Zq6=+N0ZH%$*NbPq+~E zJK+h$A$f~@)Ay?L>bq6v^}Y3;969rze%WL4WBAOua34FfABR22TjYha?>OCV(cHBW zUdK=JzwW_1#`mSaxb&&7a_oxg^LPULEb$NGCLUbeRPTnP>|Iyq^}Y1qBS*~s^W9vj z(Hso(6r4+U6@L%voA^F_iN1XMP1QHmdEsAuq5K(s2Tp}2;Xdzs(_CB;V11emuHDX>KAYcJtCais%(>rZ3?Xm#Wy^-c@^>g0X`Ei@Sbpq zJ(=u5{n&h%n$LGCdH9N?pjGDHf6mYUDe7Z2x5K;O&%|T*fY)#?esX2Wi5H5#bSlLE z+ArdJ@#}D3pMgW4E7LqZgrmeaIwm}#y(JqL{q*^CID0zWtpc#d2GCYpnFx9rVD~g;6Atn|DfMVd}F`*A;n|n({y|>(|WS*TSD9^}KOkRFCB+`E~Ctsxv%1dGfHk1_t(4 zoBu%9=Do%5t%i5tb$-Wtn{e*=j{VM_3CxMaN746P`PSaGyGZcmY?dJ*e$w~WzpGAz$)oec#0&A*dlN6*1HWEj z&z!9%Uda|yKQ@28@k15r%&dJdkhh3$_$YCcpOhDx+o4`)4=Ej!dthhIg(vkFiWhkM zKC4FmFypn%Kz?DLWoE06+p|vW4}7mW16)nl#!s@djY2GM;#0+A{!ASmPL1z7 z;2-3Lcmn$@@rd%Qg)Lv+vh3sZK;IXY{ z{OsbOGIuNt)c0H^w1d>@^HyoKMf-&7q( zo`ttp*W-8oZ_Nw3rp!y3KW;c0(#z?8gHvY@y>`{hgU&?!FTD|-%Nzl9GW84b*!SA| z;Jx8X@TuxJcq4Nn)i?Qd^;mp~ID}W`cldR2o_>AM;72B}TfZaP!<3SCMa#0UJo-i0_%uR#ZGPY@lnIT-wockEpL*Sjz`$s8AU#{c5&&C%l5-GhDZzE?jw`!yF^ z-fb=*KM9w3Z{7u-pxNNIpC|lsKGL=Ad9CeZlV2Vj#LxZ0(>^<@OqI%;Qlfqwd=%Wr zpTWQ4(AGlvYZiI%WDx(}^polDh9C5I!zFkaeiH7JZ}L0x0Qi@F=g%G&I)3(NQi!L; zufq>`IrHt!WtG1HQc_bqCfFnsAB ziIM(+Khu|_PguP0-h8h(?|#|ukZ}#Z-1_mkC?CTAs!P)i`d;_T|H_Zyb#=YcV|T7w z*XT^dSHr*J5S*&+52vb=ncHFRxW2k;-#N2?dH;>UPycFHZt9kV1@Jn*j$ik^{3N_1 zzTxf7G5p`{7xBI9%zvZHWWW5nd_LumhvttTyg!;>qD}_?iWlm7@%1768J^4BjtohAW?(+;jo|U~UKA2rk)iar}wz zdmWGJrs_EILU>)hoBb}@Rb@=G?&kybWBWJXedVp>%AL;z`c3g%{u}%&UhFSk;+GY- zX9em3@}XI~+ax93bRepenM*~l441%t;+r_nPs+3CuAIy7bSu-f@2+NPVJydW-`(8e zU_dA4y}^^>kbFpd;~(%*@Z_<}I&*wATw?DIUIPz7uthtzrS zsf-Apsq@DBNq(K3eSGic#H`f^!<$Ff{%uwLv`F74&%$fq5&2)b%z}T^eKr4Cf<7^gHTK_#HYix>xoapSR#k;3#v5 z2DZ58bkjB`LV19Gz3}R7ZC8#=4C(mg-TaPt%s=ou`T*6*^!w8};veV{#W(iLpScJ5 zn{&lYI2A5gJu7zfRAPEG2ZKF`Z}Jv)MrRI}z&r9c{d%_)_@;l4Zl^=I51yp&a}VMW zK5BcJ3ptBU*c#Qz#A9`lR$ZnndG(gr0lecL#EbZMf!7d^#d$cDU$@5wPs^|Kll+5y zb?iaDiHE_LJhlAum%jVxY{;LfJ5?|K*u)v%Ul;Aq5|7n!ykm1B@xSVY`h4Q`sqEJr z4Cm5M(kYytRDIXOS7%0kT_1}2vHmQ)2HXe#ddK2?{C_hCPTpdFp?)X45xkCv!B4UW z|Ban_7wo~jSNaEi9q<{w9DCq*-gu~gvFQtoM*UfhroS=Zi6?W%={q(gNQGCP8iSz7%4n!Ux&dcY;V|=PO#P86r|JFPkk-PBx5{X_h(_~st)CE{kw%X9wK=Wm!^yhXKp&xZPZ?32Jp z;g#`|crG|qer(>AIa=mH-~7#co zF#|2-V+Z)pE~8(wS#MZwJn;{;2z@hEW9$m(|pjVo6B{{ z7V|FXGE>jLf3a}W(}BEO{Y_njKl9)41mZlNfM55$_>!j=PX6GB9sfl6P~>+?wtaE& zk<9@-39pOu{3L!7ey|@>pQZQ~Uq5EQ>NxWG4sQ*8?Zb6>WB3Pnz5364epuh`NVM;Z zz7PK(AEKX>$I=zUXX+yItc_K>4Se9P{Xwv$Ua8=zQ_=oSaftnjZ{~yJmFa)ouQ>wZ z8=ioFfPdu|@%a$`S6!N&!4LKimEX9v>$Vp2qWnU=uxF=AlfRyIEW~rEZ^BXPZ}K5J zbA4aWb-!ZS(N~j0dms2;@524cW7)5G%zo8d#5X#Hcs`1rRvsX}sXLjQgqL%!`sTu2 znJ?VhDm~i!tL~4Ff+u~iIJEWUId8AJv~Skl9ejztaeeM^2|X=4W52OVe~#;WJUQAY zoRM~9__QzH4&f5~y1J9SQT#7EQ7&Fqew{tU z$76b0JOMt{ds836PqH)T;@9~z{24z9ue%3)lsW(M7Whm&ezew-DGR?z5BUeUTAddU zqt0udx%cKC#7#Vm?}c~7zlYUjqeujf-O_y{Yl{1Ey^WWs%_7BP5_}{weo!j5pDJ_t1>SLupyXw!5 zIgj6QESh_OSN6T|q`_kZwgDpq?dOz&ql+Iyzk&eH7l) z+*@^1dJTC1{D3D=Kc=_8{_1+Ow;Zex!xPXOz4Z6ghfiOcvq?|eqGRT zO?C$N^=f(Vo2|1YhWxrd%Rf4Q`ChrZvn>$c%&Xuh;Rp7B-`RKKwkp|c=Zf|ei*Iz9 zx1L;FIoG9qm*Tv4VZIzc=^pSfJMR88Yf5nWO#e~P~z)Z@%#?F-l9sek3V=bf9yiG3rD+OnI6pp zsrq%ht9K;ljO{;P=v1BjXM#UY7V4k9M`8f4^E>{V{LQ-%hiat1c+=SJyF+=FzEJ;d z$%mDfEIf2+-^}HC(E4NP`}kkyipO-&-i3Pi_J?oka&6U=K%S+K)qfL**n{`xe(j}& zOXxD6-I(y(U9TkvkK8@5MB932qBxI7g!{~MJpDw4>Z$9_26!(0IO;8QAaIHJCeMOP zoC}xm>v)2Mh8=#m<+_})Z<_ygaL9G5gLwYz?d!g5JEe7Ew1*V`K(C=6ht38r+28uf z^|zEhAJCWJe>?v2z^Y!u_XhS~sN>L)dKc=_{EoOOAFA}x%H*G`XX%@vFTukMIkK){*NSNYUXK33dsEM1zwo3vr}8ZGx#Y+CP3=u( zXX=~rJeT}UK94`+&*-_#kuBKwtBh`^E=2to`s(0Rc!xc})$)1u0PjsdnZA7ZfnSIF z3f%s1mx_-h2l_?o{dntbiQndqmH2jd_;SkmsP6zDrLI@GeXB)vE@TC8sy$culK6fR zbq09={D3zqbN|7=dlbIp7n)R?Q@!Kiu<}QDm1w>;&jN81J`*?nH~TmJH~A*MWK8Y4 zd2(iCgmfVK-|z&^WoLBpQ9iV-bK{BY4u$4SK)lv({^>8l#-3Gkl&n|#yv!l~k$ z{!RG>J*_%}IPbmrZ|oP2f+xK<_uyTqGhF{z;V(ZQcPu!vY;fV4c~3-p&*ZW21OE%3 zIahum&yu&`jmDij*rU&F*%pYKa37s{iB-eqOdFeh0lx#M;tBNouODBj_>Gkc#(Zza z{A*t7RXQU)|MH+)M)b)QGrv#VjNc1m?xMIUZ&4qDQ|VUBm5>MMC&L?c@A!78WM$<4D)rG8fPe1KQBm&+V)do|dZ zc)=dvK7DoiX6Sbx7i|2g=$->E3Rc^B>hpNc0CFN)6n?5itR z9SF@K5|8nd>TmRjDO-!&+IqvefM16n#7+4Sy@vWG{jaL2NxWRiJ{9JbOFkXQ3&mq` z$UX3r>^I7@F7E+xzxXJ9*zVVRgHxSrZZkWZ*s$n|lSMX!-UU13*Bh40$QW}WJ+cS9 zy*i|QYW$?{A8+y2Yegc2EPA>EZdr0#+Kj2;~C zDbDi`_#Jg8-}^(yE;XJyFe89>;3)M%dS!iI?pNI8ANVu%`M2LX+b7|P-4S0R&g&n- z@AzJRM?M5U;O+f4_(5H|%;kAv>gchgy0e~5JQLz~_+L2Ho(lF*w%O@yz4x6DeHdVd~?5&%2&L{3|_;Yl_(Bf=6~s+8$H>zW1GbFLGsWQ8ykm3Vin6*8(MF3R@B!F zm*5fQZ+H#)G5;XW(}~%KazoeK7T?lkYt*Nv-hx+F_otuyb#V8Bx9&*|@m%JS>c7xW z)@AS)+1I?aE|}P6%+R;qNQmhdfvd#}@t8e`oBSj@qko8xL-c3rvF^b;7LVaGdn)iz zbg%G(x`@78x(d2XaZ?^(o&x_M-=yCe^5WM;Qu{55{u`ba{{Z*VYl!nZr`GxK$S+Az zKUwh$b6+le?qZ&1}p*hocKXBvxDV-mV z`Y+T)#7%W{{gCwQ{BP3oN56depIk9{3my?Zqo>{VQuEcXyjU>GThwvb!^g!ZcItTeh^wKhUT8Ui`1Tg?})wLVp1JHOJ-p%X=sB_Tq(i%>R1F{3P8K z9UeUlfA)39Z>JvZz9!0B^l$Q$@%1cxiN109g?NlFF+bHih9AUDd6qb&j)SlE-sJP$ z4qr2=#L$euK2|(|`}L0DYVjBkeq*&KZhvUenkXNlQ&@WJ&&SKAZ3y`@=fc%^4Sq-c z*t^jG20zHVZ*R4%`pLS7gASXL22{$H988$BCG*Z%=R$E4Uu{pZeA9dadc+Io4@{pl zFi#AR_-10RLraPr3Gpz#mtS`e{0=|KpLO{xYsu+(XM>I3)m=1i)skp`i#)bs!=FCh zP$)UzAKb6J8-8#u`;Gi>p%#y}>%AkSTk+o1_3+^Kx5xvWi|>Q`#0&Z}yuJBywQibl z%@+x&kv>&^Y;Gw3pswd!e2MxHzat+qk5oLC&u=4CTz|Gn9F73_6Q(y^!y zgjVn$_v%?@YUv-m)(*p_p0a4M0ir&2@j(#f=9#~!ISC?>}<-d ziJ!I_nHHLFw6628J^5bG5o2fkj(6;S;Xd|&SB8JHt}R-A_JkuL9Ufjo+;lEK$v^mS z@D7|RkHz=hzw-PSAK$tu+Seh@!@qcC@d6J%?wZPFPdvXm8=t&15AV#UrNUrO=v?`xJwcn&iawL`&ipZcT|Fzl zt|uQ-@5XEJXZ(XbAn=*`Jigi_d^!Dsrguk77;>zo_cNBJ8)3_F8=-LHBqT^sxh zm(bhOKj4+w1Kfv4yz`1(Bi7}}A2WZF?#di3ypcVx;wC+exXJJMUhzUa=70GIdKmMS zPd41q_olWdBAtRdBpwmJqkmJLWly~RdFSE@+RS;pSXYleZug%J~es`^&$9yu1&rvpNA*)si`01&-9a-TW4-Q zJ_?WM9{5RnOYmp4z8}=E_?%PRoo0r%V?J=iQ@V)#_eB6`|!N2+o^`n~%prUoF1*UiwLX-Cj?;ocx#` zMqdXV6J3yd5HIMX@J8NSnOnwZZ?oobfdAE}=3SV3z<$}8dByI*z5{XI{3yD3d`YG1 zeM>!3U~j}%SFQJ4kDet9$Jm4ZSUiC|R-A{U;9vVJd)!c|<8?P@Me}qcetmMUiZ^Y{ z7E>RB&&<*C-soxZJNB4&e_+e@TAdDr_A`i^jTf$b^u~LRg>awmg?Hou;*fVt=h*c2 z)w`?Qni%0e^+J7W@UQQ6uKWEz)dk5no$G$i&nD-H(cCh-sKm#zYzivKmISG`dGoA~B_@zu`dC*fapC;5;$MEtrq zB!3g<<+1!P{9wOR%{NayS7vF>7@pv!Y-h{QeB)^F@TxP{ZSFQdgsa6(eutguL&1CU zlPwc=cYL7s$pG$4OKIQm`0tnWVaYf2sGo9a-s3aHw=b=JG~mzp2j{W}-z#rnzx0x@;PECYponzscY9=_4e=sT;hJsS5_y3fAPw>8-Duw()p{Rc;UbKUjDb$u+&%XtY0+t>vOACz1ue{ z6gTbVqA#JBGf!;M$M@dYHT(Gho@BrBSox4TdgY@dGS{!181j?)FX$2JD(H>8V|jpk zh|lNMP0h)nYKe{2ruj4({A;n`j z3Qr*4gj46%&D*L;>l`tC2l63#xA{iuPI!B~9Gr?LfK$!cGY^k$MciyKW97a9dsD&( z*FQP@>eK52^S0^x#Ci6sZ=7E*lKX7l_lv9w<%Mvc`rG71KevCs`rlC=fd7Sm%^9(u z4Bp|__2b|@_2bC9>0#hs`4E35FXSH{8<(T!JC9wOyLg%JGf&t3c7I^Qq+&BSM0>8_ zDD^`4Oq^dk_wbZ`werM1sXwavmnF|dd#mx)@>uCfq=WBdbr=3MWM?g|gX&di-rH&qv*r=|a;j~f2dcf-a{N{{X#9!I%fxX(Sn zXZUqImwF3*syx8C{LbvHU1xXyph%4W#h2hU%&(?fv43by!{?WNusaA)x9$9Mg>qZjj^e(}6mfz3a~9C!LEYa1Z*h>Ds(w{g8Nj za}Ug0f&1vT>A~%B#Dm-SC4a*ku?P2nUl)(PH}BZK@k2GH-dcKpYScFaSJT-PshZGf z!!1Xm`ey#IKQH;Vrx_tzG+dTsbS(hbJ@b@qV&g{%Kty#`(m zp7h@Ig|Y|tOGmmWU!El&o>~y;J@KjTS3Kr_)m!A<$2Zrwv0~{{0e*+hk^UDCuFgP5 zYF~$WgYJP24=)GroLGG4)o&d>7XDhk*WTOTO^S3Y`u+VkJORHW4(a=%Cy)nBJUVFY z9}AO%a?2jCxN~k&G`|Fn@-Eofsv)!AANuIopmph-`@?%PLw-^nhdsp48S&o43-wrj z-G9SB&=c4L!k^*oU;5?ICqEf*JXAmS-^6)3Cily)Kk(Q$XZn{}8|g^ZW7TFC3)YWP{+Z;m{;K*%rSHi zbPD?4u3b5{SoR0fqrIN;A^9=;RoCMu#kY7~V@oiy)8kXug*WH9vdqf+HiY{9=}Y`K zb$>jUI3z#jfAKK(^KQSKZ>rDJ_sIj`GksLzF?@zkb#B!svo_yXAwA@G)Hm@Z@D4lk z-h3~7CeE{8`$fz-m1nVEdb#_`bkF-{qm#jx-Ga{N8l4NwTfuvZo9h1fRQOjt3tpEW zdvC)l*E+E7+Y5p3wFi37^}ByBRqjH7kFv+SPTqvhkFC!Z!}ra|v8rX0_Jw2e!uWW> z@3b%1ph=JKPDOe-eqDVCzb+oL2X!*`E8oP!h!^w^aB9(fKWxmlW^?dM>SqIAyelh& zf9bX}Z|eB`_d9dP#7+E;@3qgTec^>Sm3cBLG+&wito)GW^Rt)B{EvF9yoLXz+jcHq zPW_nwh0kiVdg_VQS7t={JbuT%IyhCFr)zUA{42jOpNs#6qu?{~7|+E&z`y*YIwZdX z|H3=s1%3xF=N;2+tG`uz`n4`UHqIWypQ*=+LsPca{p_ViX9M=Y@5J+5a4OyF$X8$M zb@qe((cEVEZ2RRtY`Q`9Av~9OY=736_XZ^v%Kc?Xk4P6J?`|`$M~Rt_C&cJI#ZCSJ z55u3~HQ0l`eDPw$tuwaPm~=i+51@li&GXv7K5D-p(hbr-$XoDHcv}Ch+4akZt z4b93V?M@BoC%t1hDn1@N7fw}=g;O`oT`;w4;k3Y9s@}Pp7o7Ck(I_wEf5i)NUVf}D zJ?_CKE$=;+Et*#$f8&466^7671mdxGfmeq6Hr#Sc#qX%|esmz==no^;MR=Y6We;>9@EQLrZmOe;L&vVoTW9o33j%&J@6$zAet#}| z%=fwnI5obGuFebh(R-T1DQ=pZgx7$7>7(L#MCanc*%@7C>9M81DA_f~0=NYKYk#5s zH-6H)a4w#Uf1sayxOktfJCpWAb6NQve%*i5XURX9XM;yHXRqmB2X;P^axT2DY1P|1 zG)#>4b&xB2vU^ zmsj7@KOrW+z$3zwbPD)&@nYoVJ%ckJ_%O#a!_NkIdwx6y0T>s=xwU37UFP)9JDSv~b@RRg? z-UVF3|9Z!N-2Kf*5A~ZF?Qikk=u^$b#{aIoc+=It{cTmKPd=x#Ge8~N> zUwnzWG=5TkAs*XDrCw-`fV|te_JF9j;1P?o{;a^l#2m5s{!KX5xpe004ER2Ii+&vM z7*F6_-`n-&w{CyB%C111zq{kK=f=)I6Pgq0T=PrBWABY$rz=)}gD1T=eXQmi(Z!4N z-kbUm+y}4gH{~bAd2`C@Z!P`%(0-|*`Wt`dzp-EbS3Lf2`&;}sc?+J_{hCkU9v=T} zK$8Y@PlWbE;;Z>}ap?FLld7&hd@;at(Gz&b^f3I6I7BB#$A4j9fe-f#J{FjZB9ArK z2QP>3gU|3Vbg$uC3pS*$NC~HZlvH$iQvR6!H+{GAdHjPsz$0=Uxlo*Uzw-Hh&kPvRuuSfldaQR0 z@8A*XGQ~Ifn>n2FSbU%R#lx7dT&P2nCqJpRFXCb7HSVd{>9ft*(u28an@+yba#b|9 zP8{MVzcOeHmHAvNMB;kNR5fB-fP-^Z`4<3{v`jvuk*kB zYK7&i}_EqZ~samydk(l}g{jYq8 z{mSR@JK`oDTpo*`6yNx>mfcrA{qly)Fsb~loqs6!*MI1v;2rqkn$Q0EWn;Ypk-m?g z)aNdr&wS#eso$JFAJ)2KVBQW-9g5DybHUYkd-WE6=jfQ6Z+9y)D$-ryC*@h!cAPeL z^Hpb}`*jcQ7hcC3>BFX<#Q(zU@)o*nen-53CyPAzdglxM&jwj*>Kz?eC|k^4T0B>o z0ULk$JMnZhM{9YV?<@aU?M(P-k>=O^@NilnKX$J7<{s4F;3&M2cwtVj`QWK}-Y;9A z=(Ok^tG9R;{+s*_pXz(%S@^yS!0_opc#9HkGCUPFEiSIfKQZ}=U0M7$?mMQYf8 z{G-DXBb}~y%zn*l6NmU6eiH9VpQ=ulyUg;D#W$Y`%~g|c>dWURohvWo*X3`{6_2C6 zFqh|%su!B`kDtWTI#-=HK0j8Mh9~L7+yh)KkA+Lbd3|4ad%8^eKJ&rp#MHatGj_&5 zuroRZc?(`yKX2d9GHSnl!--J7NjE4SdvACn{?{Hcd91!9x*&55#W(x|otQk9Zks=o zZ_=;JW4+_H|LXeqrbZ`1xT?Z{g3>=hdav zaqyG&R>NoRclMsG*LQn0HIx_PHP{(H30LEl}>mx0(a)U za8(SBa=&<(h9}Q8owcrbOx;wyMINh82A_FvbU}DT`I~ynxVp94m4E#7f6RHp)AA4c z8N_+-jen3|q`X}Di!T-*4U2yLZsMlKiP5|^e3UpO&ih{Wz|Ook=h_Qn&lUgc9n+DD z^YQr`yyINBZ+TX$Cl0)v7SKPKw@s(Ouk#Q3FMO{$B>lSYwP#S>RGy_z?R>UHcjtdQ zN6h_-L;tsUM1GQgaIShc|BHX1kGkQbv*Tw}JR0ri#jo?f{5rk_E>UkWUs->^i)Y5( z|JCoWhO2MQ|In^MC!+p0?@fJ^{lYu)3v*}KueponRV+JtIA!Rv15rP^c!BTZ&*Ve$ zWBHK2MR^uI?M*kAYIXAb#;Bic(vai#E&n+qniHu`#(wdAa4I_!H@!D=so+$02AA+V z?m_4bJoI2#u{eymfd`wcfX4abfS57}4dKb=Rzw%?>dvW840@p0d3e0gaALitsi?yFY;K%z3Yab$%yKO^aSE2Ue5ROXXa?Z4-^0R{@jmN%ttgY=<87x-W-;%Xbk@#9@nVXx%Q72^2Ee9_KSZ|HnNbrdhuH_h$v-qZ`>2m5B&gSx4DY`!Ib#~Rc*8``%hKXwoNI=-6!#aGkQMtOIx zf!F7|xF_Hr)Gx##|BasDWTV}i-)xf_Iv4(B59&_**_72{A=EdIh^$C z-kWpjD&Ri#Li2R-)#jY)=hf$9zMOZ_zS+uA)eByX>Q3qb@>t(n;IY*yqw=Oibv<>Q z>5U%WapsN0V8Gt)H_oW>@4XLn#qI$=DW6YTJ@u-6K98Ec#djNhS)7vH?MU-G=(t;RPeB7Ek(!Dn!k`jB@F@4#pJ$sRg2W?|V~Cqp_A zxY}G+{G|AXHxlQ~-=Wi`Q=ns_v%z!mXUpI98}uJ|2w|Ei0A&9iNGN)(6WS^k^vg&!6U-`hO%e6|JhV}8efGY9$f zka<}n5C0zJE%KrG@8x&kDE9zIdB^zb_wRb=^UCdyMRO(i2k{u6%CGYe@w_K~2k+?} zvtKw$J=S|ucT#6){9WEcL$gjrbFuYN!8`MY-gWMg(%U0^CLZGv6~lgsxG-hA8ld1CZN_Fu>Y_#Nls`}lRZ!x&_SYg-h zKpv}4Lf&oe7aXPDEgtie>SS;qUE7B5t8FQ}@j@ss#M{%E;|ch6@6EhSejWcS&syD~ z?vw9ab1?D`@?-Tv{z3nccY)uLUwCi&eDnvXUwFs#XX>%$@0c&gKRB12#Y$(jy5s8e zkq%GYl+Fe(2S+t-lJD%Pr_&<6GF}6(tR6e0Y`KCp4<(0%Ufpw`&I_03pIz>|m0#Re zXu>UB(k}$?q&g(~)mPWzvYyL6cYPB0hj^WhdVv4N&K?||StoDDvw=8Y?60@(T$gf5 zZ?Jdq?!s-h2J}1XdUQeT!Q2D>3{IurF>f1R&CZ@b-0p^-Ud$0QSIzzMJNQ1lJsqAn z-)`wE|N5rij*#D>1A%|dJ&+B4!ezjt&*-u}SCnj#Z|*s zRPvFAcXo?(nc|x`nJ~@$<5tY9E+z2lF4) z$>0b2-H+q|?;Uz)pMjalQN6`G#;@a};1c{99!5S-pUQsCU97gT;PvG{JRN#(@&NjE zyat}jf3pvezE9mh@r6lO^!(?WXiuAX?0ew{e%)L^x(eURPvWCmKK;}S%a0z8cu)5W zKfvqm7cRjQ$miAl>1+yqa@~de6;q;oUVOt7#OD|MBzrJ#MSfv#Dt$kSgN`+a^r{&1-kv-C;3M68{}auZ0_wBzkWG&M2)>sKaPC!<>YcRt{;9TkZ-z& z_`UY@ss5XJHu5*PM1LW_u1^^6DbAbwg^$Ab;dj)@)wTYQyzeyl+P#cdLs4DOY@uY%Irbj;(Nt; zJfb~#>`Z(s(Ph$mBYUJo`m;!vdFr>WSJgie(oc$;`qbbO|ELUD(`RnWfU&0>LyWuGOFFk?# zjn2J1znVSJdy4bkn|!E6wfQf;u(d!8u9gp()2siOt__aDd%9o#S6##&((x;ji;vp5 zIe^dL-`nTqI=XR2dh{;n@ZbmW0*?s)+5;kgvlpgpu~7vlKYS*zpA7!Bmlm#et~{1b z3@&lM@bCC9CT7+CV1GEE?)keXw@(Sx(fLU{0YB+oIQN<>etKf?eYs-rB)?ES!;_W#(c%aNel6IV?* zkd+V~p6Fhoih>eiEMaF7yxCC&8bI7thDaj?dYCU1*K~ zT*4m23w22GBAQQdd2gRQR-NIK8kJryk?>D+u6QATa}VAdT-~vD%~Cxxw*~sJ{WtOA zy{8VoQ|aklq5Rl?%eMHZe$Q>+9mL~b`Ng7LRmL>yerbA8Sf%K4)G7(1;3-7MQ?{@9RDD`ng0Nv(J5@b;f0+)eZ3^=%Qyc4PaqEQ5B}Trw|-K8$fmDCc*lQp z5BgB#0d%_J8~d%ddPw5D-?PW;d3C?`GvI&itJ62Gf8PDVzx2xRq6&2fy0SnFE}?^tw+HdK*Pzc&fAZ#qQ2zzJ z9D5Mo_+R#*uS1A9BC$LH$_1`EzW`irYR& zjrJ;g$MP(7J@=sBNqmEM_y;v%bFh@aF4Xig;mOHV6aI2V3^ zQ}NZ}v2*SF`r-3C2MyS`H`)s$&w|(GE#8GZ_SLqdn$`b#N3_2MFIP7CvAgS5%@(`j z(+98E_4kIT{zkXrd)cq}7UlCr&%JkPFj5#lmkR$-=H2bT49`0^>OY2~oC}}fQ|qtT z(etbP*<$J!cv|&tdM0+7T?5qd4PHg|4TRMU9ewz0{T?n%m4Cc;v0S^ z`H@{2|5}b1Nbw1wK*8-dgAfQ84DLBrDu=9lXSZJ#^F@`Z{||*4^gRPXo=e<>`|vyC|5#JvnzqSN z{f%DE+!_4=_FTp5D)3zKd>?%^oJyyl-&B5l>WfCblP_L~=1jxucv^KQ_>BI{yPzjn z|9zwV7am9re#w)uW8D2&5nqj$lV9-b{24oAzxIX7V-H-@Zum2KkB3`6%v!T))3yMP zQb$+cG!NPwLw2VBg8lk$bPDPL3*LNv((WF4WBfYZHhiXjfv1&ks=tXt#XoxBv%G6& z1x>2F`B1S7CqnZ;)Y0)#^nLmn#7*x4PUUxo4Q~0?(QD6z1%GdM&&6SzBEKHL4~3nv zU-xk9^ka`yoSrMDuMX~0k1cui*gqy*l@QY(fM1tq`ET%!`M(SEkLmI4-1KOku6wZ0 z-8*&<;sw91Zyc_6?vUgqNjLt!A`~}$FTW$7$Cs$<`ET%!zHxO1`%USt=)vjuUs`{4 z!)t<_k=|21K;2Z_jJGp!=-w}LJ@MU##bfvfJQo}#UX;8r@yBt8vI6xX=i=q)J)NsB zNq%9Ugm_FpDZYuv?3eBej)EuYHN;Kx<>)6Ty!YV7eV6u`$J;Oe!0*7-@&187qr0+i z22bF>!Dr%-_a?r{yX{St4-FoBvg^Lnhr&O0R2>#8zBe>)P#!BU6fe9B?+xFl4~5@R zXV4!&Pk@*6j^UCigLif)^H;VQ9OWI0Z}@6DNAu$4vHZ^es_yR{!zJ=f^%ixqJ>Blw z)$WCp5uWtDaJBozN72Qz2YyGN<=Z{$zE?a?N;qe2`{zF$v@@EY%KyR-^uOLQoT|?q z?+O3fBX-lx#j}3>W>JK9{5Sa~o`9XnyYYxEF4v{;i2r~4)WkPD%&NrQzn{4)M+^@W zpBK_qh(r8?JQhzI|GjCguI_YT#MuZx$dARL#`Bwh{PvFZA^nbfpnt%pvR}Ftc+y-d z^X=zWnVXdP!=Yfy-EY2FV9K@-{#8Hbf8DRV&^>%!;+wg5W}b|2iFcunga1|U#z(ma z?*bo%ujbF_K;ScajhXeU{akC%*{E-vj)|UDo<;uvKX@1NH*?+Z=vI7vsXH!&>X39c z?3bU!pYgx=688(Qt6x0!My+oP&Pxfl-O&BEW0_l`dVq7~-Og3tao50 zcRpYH5)P&-R)>6H-20_dP9+BTGjWKXfZy@n@arG;&HAxrm7@_K#U9}5E1K<@*?4be z#48`}QSyPucApLT2Yx61zu_P7)o|bTA-!(fdn_?*eq#8Tr%$Fud8~J=E&@k+$MVfW zH$PpX-hzyX_r(92w_V}I=Qrgjl@_QM;ve9LCCj@k=~=%>%=f}O{+l^);)VGd<}~PI z)z={}lxNXvh!@_m{e^JUvl|nByX&>&fUXTb6OZXN*spsqpUbwD27O-FbjQ)YXgq};>l()cr<_NT%m-Cq%Bi9G=Sb8pdfBm;_ZalWGS(D>| zybvxir$IdSz4ju)eY-C2pJ!+67ycDDeXsl&4{i>!`muT;yzU*x|2O_jo+TgBSEnCR zT?CJa=b{^w2biOUFNx+ny?&Dm^(_(z}gVW&fTw(TcVm#$T)!ID=l?Y~+# z_t_1-PZW;9J0BOH*s*h?>@nZVe(6i>FT8X16MwCpni=U+`AL4AzEA#E@5ftjOZ+x> zOdTC>5C7&|n(_U{HxGx+E}T9+D(&BU*E@Gu)HnML3DF)rypi|j9h)QTzsa-o&kr8l zF5i_^S4VjZ`{mc!nR{?9oT^R+N74U&v7ue3^u~Lmcp=Wa2lyfTUkx(mHOPqisPIPg zXZ)}Eh7Tl8{rFHc*InNt{>;8+c!ytSXX>$d7b!rq(tsd!KLSAJoins=doF?MdN;_uv-6^L*84&bQi zGnaLFXHjNAZ)Cp+UGdst=S$4p@K4aUZK*+xZaEylCGsJA+UTR)ueeD+2|s+6^hd9f zlMaMgN1oZYX5_IDA0;p3*Z*I=3p_3T4nB(gngb_~HAmo&q4mdK_2;q(Ptqyi3D^U_ zBX1Gk=wbME_<=4S--oC5F5q=Mm;FwiTkqZ3`;&y2JQjW!vFX-FUw!y^U>`R7^^Wl{ z>>=Ji*f#^G(i^E?n1|=R`Ck5kUsn%s59-qDWbm0eBk_Bq^a*=!@&I!L%v+(aRxkA5 z?76zKcf+5?_SqY#>zQ{2|I*t#7ycDD^>x4{cn$mY^y9Dx^%i_8ypHcPe?1I^NWPib3lP@$N<4s%0&FWsOxua4d~mi$wR`_D#vs=2rBcSw=Ld%qr; z73w#&CrEt5BW~~aR8s2^nE~DFeWhCbJ@Q;;#KXWP>H++Y{x|v(d93-9_+R@A)t%s9 z_KQapkL6k7#pIqp4=DI~j%Y5nI*z)Y`=w76hxi@unEk@l>aqMUe}<3x@Z8x|-*i6} zn77Ry@O|-eeStVne+EzDqvV@Mdsn%u*K0*%IaBuE_CxQa zXpfEfCJ#^-IX?Ka)&tU0qdt^)e|G149iHw`BOyj#!tcm0%>9C^^@}VnSz&yQ0qId6 zitkmw;2-ci-doR;E7upEl^UpbtDB1RcmjN?xuN1QI}_i;`9BA(YF}~G>_EJ*mka-J z{Z;!be4Tqu2Yrk5CGxCz-roJH zZ_-clJM5v%{t-Ld4%;2r_a$!;haO&eHtg_Owiq1<9??B`7bTwh^FW*Wr$h0W9+6JW z{2liIpW&w)qneMbz<~JbRhD#65s9)UrxECKP&pooLJW8P`yQcvvT`Zi|Sm+iu$GP zX~U=Lf5Q{N5BjB@>s^=!ojd3K!}@)3A)3!cZzOKA2mV*RTc5jg^$E-09=-PO6;Dmt z8GSGQSKQ<$@i6?^T{|9l`()?5F?p;ygE~&{T+4fwy5VfpH-je-hvaX?Qaioa=v2WN zeutl=E9Q61v(eYAu4kTZeBB>k4eyv|4)5S;=}Wvf`4HZSeiC0I&vFm=Gw}`nt=)9v zva8R15U%df@WXGGZi@7&{0{u!z47b(ulUBV$K!SNP5UABx#O$V_1LdH72X9NhMvn_ zW%V~Y5ck0UvWEy)KRkKzu)78Z`mn_d`H;RD`>@4haZ`Nrz5W}2mh-t4BTj8N9MTiu zALJM2waJg!8Q!zRs$p}cjm^GbX1BR_jjMk&q}OPEtmokReWr%`vwnW#zQi}GT?pu| zCZ2!wy$TN;iT2<*_w`jX?>#>=+X8-FzNs!^ud+EVcv}5>r_W7)_no6lqy83oi#))b z209ygfc?_)3p{~3S@Q26boz7ol&C($Kge6)2loq4s++23;m`Dq=iQY1{VzV)9_fGO zE%HM71ze3cda-ALM?M*MJixE(Q`0}RbZ+h!CT=?!?bm}R<%Rs2?{yD*-(B(RhtH%3 z_-cLQ_-gZi?MG*4-i5lqbM>1>^K>uo@3!~BoV`|=d;d8<|EH)PfRBQs@W1LJ-ZB1R zbLXda4}7U)49|tHmUq(|eNz6${rm3CAN6O!QE-X9;Me^4MYfpY5^~4LfJMlUu|4n_KJ{4~?arTHqjW6kQ z-S4sA%H7tx_m1dX_ds7FUdRLF-R_s3i=DX#`9;(xJY!$67kkbNTO3*cOO=vYp*%}G zh9BT+xG$=suX!&#kUc$wQ{~-sZEz}ohVSFo*{}MhbKS4KM)cMEnRpzJ`}{Zg@$%{Y zGwa-u7VSF_hs>EyKHc!(yI-aRO<(CyV|~8#!1wa&^d;&6czgZxUmn6ow`~$y(CvYxagFh25;7M}?B3!*S$1A&gy!G!nPnDL$?)zb1C?3mW z>G1Riz$JC+otV<=-f4k(#dresWB*N@$DjFcRlfP8Wx1{y0UZckka_VZUOWEUu;wYz zd_??%{Fpw9J@Aw77O$CY>vLx#KgoWd9^UNhz3a{e>Q3rI_&)U(^;mU1Ive>LT|6E} zecn6H_5I)L`_Ik{*_pbj|K@(ZH}T??+XmNKJ3dE@|HW(AhmC&_-{kY|p~1T2^#?cI z6^QfvWSLvWXK%CSa3BuRm++J3bK$w*KK82~K)-G;7v5eyfS#*(R``CA)yaYSru%hn zeBDXj0-v!ne3W<5<@W5UUsXFE>G>LT7Toa$ZhllUDx zm;Hr!W%!qF(7AAl{LQ;yzwC^ky`umF>k8~AqHU5|X#jnd_@%H9KI+y>& z|LQ~WF7O0!syO6aIwttHU)%RbJ$P$kpugKY#@l-r&V{4gue=a%FK>Z=JO1*(s$RqQ z2Jku_+*}F&%^XAd8$FCTq%WUeSC3uR_|zL`ze*3yvw{26(b)sNCtNL$WoOJMMJg_u-D}PDSH#<)vj5%6U z4%E7LPyW5*@M11btnB!`jX@= zOMV~M{LN(-1N-0T3HS&4b^nb$z-RQ}zL)*N)!qf%=N|Y6@y&bVf9c}oL+0e*A7&?Z z{d!J=-O+q5d6xI4&WksqE9NKh1aNBf-Y)l}%LDMg?$V^`hA;-+mx455zstmzdwD z?ysH&N6EXLD-O}=$~VOeI4U#m%g5J`{X5cCi1W_fba{>;{~#XM>#%JnP<~`z$>bLEXQoASljsq>3*XB>@aq@1^uDk3gr5Wd4Zp6>=gMcE zn7h4XVl*!(;gj8W&VI8{EdSjvZpa>_1^l}Eb?!q2SDpN|>6U=s!S~^fy1zQ=gE2i1 z1nvPo^Ddk#@8;LpnL4_6{QtFIMEy-(h(~-r`(IDzEqZCc&*eD+>KF2?dYwM2GUQ+X zp7-j%`ChosJV&@sos9m>Toill>Du65?+w4MKX&vjrxqSfITQF^_`$i}vAM$XO@0!N zlE?DD@(X#acL7&B7hXS?w|mK?`;JF@VZ3AU!acz2?7=(`JPf_3cae8f{+V-c|0l9v zbzVAByd0j^J&14K#e%Dw=BbsrKfJNqr(xraeWCb9w+;8%G19;b!or2JuCFe z`h@wjc)c>7R=+7f>A%5I;=DSJd(h8t>-v#Hzq&gkP?y#x;eN$={7&nN_l>BTvLaL; z^56J%x?*$l2M-wjuO8zQBb}IY@rdGu`Rn`{`&H+qd)3!Lr)y8KcflUw{TZIt{vmNw zpQSkh@&G)Syj$Ml9rHWzI{yHdz^UIaT9Iqc8_R>iv8@l}`DI6F&$j(~@&Nb&o}>d& zcTy+Ab6q)aakf6AQzKnl)6s`p)%qhnD$VPDoijPYCF*bZvql%2eo>=DQlS4ro`vVS zJF88zg~d|>`W^GZ?SJFfeXlxXeBOfh6o>9wTYvSTdC4LFpnhEB-Ml9n7F}%a?@aybXT?9^*ciU5pKjR;g zwvTPOf5~^j6_1YXJghM!~){u{glKhOo)=VMUBeDm3@Z|ISr1{m0b1%8? ze9!b~ZihOKc|rVx@5O`1&kK^r!jlv3`*ronk$VGqq5PPCP&b7q#p7p|KKD(t+-Cyu z7%ovay=wXTPSs}bjc}^(bq_UvA9D9C8F^yf1^i2=Am8-f%!3~BYV+GaUves-Q|Now z)MwWHurA;y*{^$$x7b6<@8Ip}J@vQXAM}0E!#GzSfJd}nT71KU>z6+9T>c@ej~@ut z-}s&QxC!^U-#0fFEAeD-N$*zY`a&fNE)C&SbJg@U(-Yuf+=Ka1@VfW5^xpK^otEW{ z;WhA+&b6NbJ`*?L5_sKz6Nm66^uO|B`xdiL`mt`}Z5sl(&09**|l;R)CSzE7UT9_&M5XYh`CAzcvu*E@E<@%3)F&wIm{sO!N|@Q!yu zR{{5VZ@w3A^xy77Ir#3tnrU+~0z9ocI{d&M_;vG3?4!c3)8XNb)J5Q5?*iWO-ry2@ zDkkMzx^nfWyP|rF_~stqC_Ie*P4#2`mmUVLrr+V${WtY2?^u14J?J}7*OPbCD;M2e zY{Jr^r=vXz;)OnU`ZM3_zriK)3-!&BBWC~kZm!fo-y*!j?|5(UjyS~59)7%X>zjw2 z3VpBnTyG}kI<%z7k*MF)e^ZynKiqwF_fK=*l^)=KU<%J!HP z;RpFVzy4Xri9eM5r9e!6tj-Jf>3`$T^h4g#W_a-C)kh*79)IQ@@Qr~{k!kW*G>wK z*C<@~-Q`Ka_Obg1q|G@SdT;au&V^I?-zd&^p0x7&Z?=c<4qec5PgUD?Q_&3(PXL#| z)o?1_NWI&+=4$r1Dkscb&Q=po4}> z{zAK+@bHgho8nK>B07A`S3SHGkvUchJg zb$N?;EWUM_(5q(RkxTui@03oj_uAf29CB`QLYa+C&Too%TJ>Xi2fq$Si9-)$ZY}w1 z>hWl9k~*1vxAQAZ&0G1{jA(w8x*mSrf79pgd+iZ3*9ZO;=kclL(|Q-~0e*+AtO zQ_ou5`T7wh)24)nlV+9O`q_av$D-jXY0N1e9&h>$CMc_oeRWceN^f# z9fH5pAG4+0_LISr>ZanG`>kHS>H5OgUm4r>Va-KdMx77wh;+rywZCOx z(&@dQmOd86WAhy8_~kA52k(NN(ZkRMb-4N3DmRZk6X19Bq2LkeD%`LCrtTD9-&8+V ze}kjs7jOxCfG6ipNu5{ygW^&DP^+>{Dz+)KE^Oa$&o`Mfk^^;K`CBZH3G;$ZMx z*CBb=y!P+9Km6++^gF?4>O<-+`VRCpkDv0~)6XqVi{^}Y7w{Qgj(=ch?t%Z+cfcO# zJ?W$9+PpV_f-c!7Azxj?0IwmRSMRn* zSAT%_CXdCh!)I5f6<^!%(d6)kX-oV4{^_+jDb=ZI9+yK(|6a2|xI6`c2hw z(he3qe)hiP5U$qGd-jg$Nx%JaF`ECtubvy+mwKeK@axQy-AMiW;nLXy}dg5`h8&iAMTy;Lu6L`mPpZldpG;dJ; zrhiDDrG9~bs4?R7;_rS+4AlMMb>HhA=u7x@{y{z@k5$Ks*Y~*x-wW@+59|T2Vc!hA zE*{(CC=RJZs*B(s;Of<7*F5%Q(bNF{tDZ%F=3M);I?P+qbJ(;Cq5I_@oO|*1ueKD- zvny0L_21}e;T?M)%&qg^ybE!t(^~`k)SZ_*>PNSqLEKE5Fn3w?HVH92;(`yti9;Vs z375^Eb?|VV6VZMVbN2AycmjEr|E4b=za!65N5{jk2Rc%9Ugz?^{Db~^`W^ZQ^AVqY zyHSHKJr0I+y8I4)2VQ3n?6+{vS10ZnedGDL8vn7V|dGD=Y->)*dow^X|g7oV>(>ber+PL!p z{Oi5JB|Gl^bxHAt*<$i;{4aktZ~CuickDSF?0kM#!4{7vEWr24WA&SEXmekVL3dx$ z%RRSh)w`GUtdXwxGH(z6(&44`e)Gq3#TJBkIXYebOdi1R;2+#?JpTZn>F@4&c@L>~ z%W75;^k2}Q>30&}x@g?zmwR7=(?tu=Eu1y}hu;t5JmVKNa(2>U91HYr* z&CbN*XK!zGXxa8zp}47jp>L+vO%ty9A|W->pQ#tp*~G_rc>q32zn=F--?y*jZQs?M zd8u#l@*ZgRi}!q@YukQLf1E9L>b+{uwds)~2A}zF9~UZ>{q(gN0Y24S%j?sx?pkO? zYLpkU2YyF>QL$K)U#i`3eZ=?SJ%2kh!` z@l8F8E(pJl@3U`49HJv-zaw5<{&oJBFGP8_xik8F^!w{?(LW@gFWtJ{w$GkC9(cz$ zOfTM|+P!DP2iDa5rPI;m=v;Xg-X6ch|H_ZmyYVIPj(*;Moh{n%RH@WxJ`DeWN7SFC zznh=*z3S5PO+3N;XYX3`Y-C(7M9(0GMs(K>2{0et_|uvIdbMb z{j$f*H^S4xJMPy#;Lqd%cnxz=UQDjC=CudcM*VN>SKjS=;VA!29~ImuUcgb44 z;h&69yr6r9qns-*lxN{R2e!E9bkjB`BE4sjFnN03l7}Ll0=&bYv9tKNiGR?)sV?Gs z>CetgoBaBu?B}EUy!S>g2k*ECdDa(|#tgsf!EI5W1plkvQlr(LB@KT+8cvwBCG*Z% z=c4`6;=Feuf0JJvTv+*~X0PUqbea4wytB60>u*)*m-!#{H+WLr$^FXbzZlGEoQr>;%hV?fpWzYdVd#I^nLO*!T1%!Z{3bn|aj@7GJ2MhP zI7V`uP#{$ue%ox%PKeZuq)-i7!Em#9mNL+n?77JaJv zkovK6`3HW7&QW}W`+To_-agBoRhRTk%C{{(4@zAr-A_=Pbu@js(djx)qcIVulus?l-7v>{!F}pC*df( zCws658V^HPtj=KWmwJo3zj^@t%YM!0GB;Enu(x6MnoWvd3~KkA_rd)+vd7HXlfSVu zd5d%9Ep(*vEVxg7vq9seYFkcakC`hh&%%SlCGM~`?tGCRK zco_FSrd|j?6zE$hvG}PAf&N1IoBgr=Tk~pPjN0|m`M`UV zXT`@0ann4-};&J?c`yaI3iPw*1*EjIXOd=|2 zAPtm4WhxQpFOj(nWr!rvprRBCH#A%4ilp*sx&s?|$E*D;FJ$;VW z`rYeY@3Y?fdH#WO9qTxcz4v#2_jm6-f7hou8oj?SV#nq=+FPyP6s{IGotIztUH*Ze z^xyEQ=6K_|_;qg#44#{ zT8_+!;dOcf=Y`kxqw5#(eotiV9x-L?OEFx6k4m0L>OHt$yfQtP_p5&4e&I89-cu8v z9$5a`Ly`GW^jz!$eh}xy3-cfNN&Q*$lkAudM7@w*c)xfUdO3dG960$Vzy8{FO_tpI z?1AX;;2GH?cTEj=2k$BG_Rhpjd=&jdvnADUTYb^tz?YccXC6~>{f%9y2e=3GmBW16 z>G|(S`*U?{&R&DzBMT3^`An$$i*NcFf@fcoL zhh!IUiGGpxV;(Brv~yPI=k*@cariTK?7VbW@B_b7=uF~`hqmvJoRe-nrH zT()BHwpWfs;_;akZx+1dtghC4TKJ6qm!EWA-&L1(Uiq7Qz^}_=@kZ*r14k|YvQV!x zLBE4f)%OLT@ssc`{~%uQJ2O_MFW-5^pK;0;X>SfIzdiQf@KNHM^WvlAvFak`j(fii zUwNeQqo41L?dNsB;*j^?ym+~#8-}J1ADxmg|IGUp=jDa!y!@H>fY*R0-Gg|+Kfq`F zy1bAMguSVE%is7Ne4qb@_f+q84PjOS;t*;}w?u4h6 z&#T9tsZcrRoTBr?UODzg$1fkk+r#VL8G9>PX7rS+?>HFt8kwIe@2>j7;)xxa=b6Qy z@#}bddL#V9wh;^eb@Q3I@lPlA^g31kPzrJLZ4+N#|9UHh&$zP7hB1VD3!s z8PkjA-M%4eKf3l^H{>`SiSyor{HUw#Zf$T#6L{#P9m?@4zhZXWox_C+u5+8h6} zGHzG0_S&dK=Z81eKlf~XeB77?m5ObT{5N(X&eLtb^y2a7avoY64KMp(Ar>1Ud4zfKq`lacZ z#5ecrozWGWL$6-wy!OiJcT)Ek-|##1Q4_}I9XhE)TA2U9Kd=k8) zqpQbvI2oH)%&+4S)%EBR`E@)OKFXd7-{p7k>-ZA7!G#xpa{BzPhhlpM>9*Oi`*jb= z_~AdzH@#nZfcHzk4)>WqDL=LkMSek7LI2B7;t4jKZ1;GNdPNfY)ZkQg26ep&@7B7n z!!=7|c`U!T|cDxFnDG8CjJ4xgI{;Q_QI$i%P;USaH={(g)upX&HC5D z(ATW)&)(#3aB8l}qkd}i?dcdFCEvs=+iTSP_Is*)UU_GzJ0 z*LTmI;MWJwi*9+nbHN0=5Rac}vTALmhqlM+-R>6;)3-q%Cf?Kgb-(Hi^uPM0&-^id@8*s-qV~Bx}Z&;QDY%DnMS{o<8=3;HPa zLVm~lg-hgF?$>-dann8EC+Q#HDCc#*;<4}A&me!JQ!sbt#oNl4?bvDCzv)Q%v*dgp zZ_n@GQ|bH6bNp&|l3!V@IV^-+)IACmJ}{Db;@y>c(yeZlDiF}_b-MBZY) z9Gpror;gL~?|sPt^+V$2@QC7tec1A2 z`4IoWPpTKX2l)5Jex2)AfBRr${)7C&J>Y-&b-bs!67nJOO@1t1sH5Z0;1W8f{bRo_ zzj)V~Nd6`c;Y-wq_+L8cw<~Qb_I$y$VXnJ=fAs)*0yx$E!oU0^{Gi@_)!;v7=Dad1 z+sI>QQ;BAH~J{~CcGoxq(@XAQtyV>@n`-UT%DW`sb`sQ#E$6(`@E5(Qsu{wM|7|J zx_pQo<6+cWgeT`wcGVur;NaZyWg7&jj#A<=IMwX%LDK`^aS`F@d7W0 zkD@n{2YA2k*SsM5Uv>fS$Xgz$^T^^>@9dB1bnzwlYI&hNwtTU{jee{!Hx}pDj{dFj zt{eBo=4i>g@qJYnO_@LU!4on41Aa$6z<+ZOa5Y^qosD}iXT)4J|4kkMKez{ehn}Fw z8x_89eqoM;@0v3LN6p@t^>~$*nGqeodH_7B4r#9^T+L5I{5O6_eg5%rua!(Ylp3iA;1R`Rb6n^u)Hm5NK1#e0hs+_O+ZH$JUe%Aq zc{qyBrb{TY8IFLV#`7J5(kS3EXf?(31=?p-tPNMwJZdMv)$ zJ>X&RJ9q+dp1p~0bQSg`;ZylD^#FO6c){<8$M&O}D`C#`^lzJ1>bZPf*vpmtUHK+^ z6W{2>>_bu4OZGeRP4g7^N#}*DyRo>sj@yikWsj>q1EIPX2cslF=@@Xp{8xAW-h(`VpM0{-{92D)cREsk z!$`qXE01*#?$`Te7xtsu z*CEdm=ac^%Tte4ou7tP=@A$6$y!sjNJNzVk=D+FR#Cxjyad-o#D%67_EJjUGna#P8s%;WPdW4?|zV zKbUW1-z`6he}MbgF`fVqv%1@~mlmB)Nq7(XX3|f+cBV*!!(o3RyrX^$@9@9k5PVj? z=k?Fdzw%6k?}JP93ERU&kI0Ul7d~^p{0{se&f`n$tu`-+{>=Nu8_^r>>HqYrx0flL zfcyAg=amQGmGz_BQ=u-6S7yiXgL_by#%svu^&OnIv`*!`P18ajD;*R6AkPxt=yy7o z>v5(;lY9w&2hZgms=Tye@|q!;pGLbL$eSZ!UOaoF0}+SluIwkf@R0|OA6$KQKl9{9C)(V%E|wqjXX1r>z$41% zE%1HvSoKZz#{a^p_W95a(tFx-HR8+9hJW;RW(-%WU&yoM z$K#(rS^I)vsgZcWPnwG&pEu{U(SZ@8J08pkdM@8pN9WJ*lj;}d_lXzgInuS+J84ga z`7rV?ALN_#8tTXTo#=nz2Y8Ym9R7vZ-2*%+AA+O!Gk(&1L~}62H+&Qw zbh8|rIy9+#Fj7A@PZwW;fAD_kALLp51H7Y-;2+W@4UNK%5y~yM(~+0-eep<(zH4(Q_;vo5U-ur=kLiEa(doA72JyxABwpW{JMI}&l7g-m^|Z`;Mdh#(y4;4}T3?tw1TJJZ)8FZ^}GTh&_bI1}bz zuw!;1-}HXzQ+KQ#KOpbQ4RNwv@H>T@E@-~8?d&k841SoOzN&epBH6J%Dmc}7)lKOq z;V5&}=u7B8;6C*s{0>|qUKH3}_l7y6bB4Wg>fQ2-hNll79+i<1=6?Ba$vn98%HQ}0 z@tB{)8{y^5MjmHD0I^WHlySC5zPO%3xA@fz@h`8)C!{|$e}@5qPXDEp7~Nw8zO zSADm5TJ?GPF}#lVQ|&K= ztNAnhWb(ddyfVDbF2oCdC;7k8Rp?v1EGoU^>T|XRJ)-yP9@Ken>@ud~-f9OU`J4Jh z-%;y_uYajjg5NP$SiOZ_Zf4ENS5#e*DGCJ~h0YJb-?ZKau`0!Tbe(5 zZHuE3Tx~A4`kT1vyXsDrE}VV+x{v;f)p5i(-%UQRJivGHXXfnD8_D1B8hAPJ0^U(? z;UC_rG3ti*FFqORW2L9Xd(L^N!=B^wQbRrjr-~Q+nK-XLz4-@LONbHW(Mc~vG<+~4Pa>uqv|EB(K--V;}Nw7EX;aLBhJdr`z{vW^o82rGHhMv<;$m?-BnG5<>(3M&l(@O z|J!dCo{soQ`6hnyl2&gW?zZbx#P8TgRq2z#Rqr}mSH>I3-@IS_0rGAofDN` zy81vYk2T*&Ux#?XF7#)~TkuAO-+lLi&u*R(UwzG?e#PqLPUy?`-`p?0&pl)>slI1G zp_Bx_jz8lcyfb|Vcu(~U{tOR;N0blo>*iAF@8%!qH9mi4^=p|$<_7*uKLfv$9OwBn zIz0MccKq;)9Fy*N{%DvRstze0s~55h^(^_O_$EJgUVSJR&Z>B2wMr+$95{Tc`i1!B zzv030QTDNl^CK7Lf3wltBQabeKel%VugosY_f#)b-^53W$9Qmh82-#VgZsoGJQuwN z{f;;U_vzO=cWT?ZXNLc?SHn9?zF+#NiA#05uMF~zQv~;`z>ZvM&5*ZHV>Sf{8Wcm z^CirQbPxWUd!Q#!@5Xzk{c`urk6zsto0q9x2tUv(C-%5 z^h@Kx<=y&^-2*=U!QU9+AC?^Xv`Z$BxyH z-S5B)Z%SGH$nmfTP z-czCW@yL1clj;HRj`${j!|%|i(wFd)>>^nQ0`Kta`WeJyc`Uqx2dA^4d!?7VZR!3R zuQu2iz5LL5#hcAbi`g6hpx&}#?%A3XWH_y_NoUC7_$$8eN7nR;xGTAy{A z(&3*ySMUSB!#~I`_y>NSf53z54=|rVT~EF#4#6cYv!1$V#NOOt-mCkitH2vA+cIq8 zuLY0Bc`vHauU7V-1%(&;EcMrey z!s&Qn|2yCQY{||DPUU~SU;m9?hbQrf-WgrRk#+@sdGp?N(c+&Hdk&>$2L7z+-*W`; z%5W6Cj;Hm`;3)aql5+DlmDso_Zr1U^$N$)~E7W7J7aHjgYYDK zGpB)Wg|4F1z_s6u$Ui-{het}%b^o`4h_%nQ}d(d~QUj$CAvTWqg59*u<_<`Sn*TrM^i=R{< z+B&SxB}0er2>TZGQQ_t06v#Jk$e~$bju!scdA(ozP}JY1bn1D0%ACJq{uiEnuI}Xz zd|7!<+`RMoI}6^qGt|laH@uwh;=%c|zAeV|8QgnkjOUUU>Ss{LN&au*1;5Tu+5-ZY z=og{ezN|on1w|h{5%cTt10BD5VKOdpUi;MeU-SFy6ShBA-AO)V&IrHG|KdGgYdRtx z^-P`wyYL2k@_3{j1Gi;(s#v8eb~+m|ElA(t{+E@Klmx| zXa1YKP(8qR-9xFn=hsbnIy>yi6z41daOeK6cP@+MoAh$dYq1XuIF{je(7B` z_22e@$S=Gz-^F{vQTFqy`{OH&0OzN_AC9;rIIdHncE_W-Bb z_a)BLd%9ooLZ6zrDZY7U^4N2xoa*<^OM7BGm-mZL<$uKsanrtWd7=D`|GoCA3PY=} z%ntoH>V^1q`W-x%IqB+~>LTK%zFT|R=vJIpoKN2SAP)I%v%ly3!zHi2cd*OW!#jhY zB1yfU7Pem!}#&s_J_$@snz zW%J+K>_Di$$uICF@?-TO^(;IAd`53?Zqf(mwEQ9JbTst)({p)e_}_aIe;n^!g0kuO0vs4k)pF!XmXc_rSS zGcz_PlAcQ+mHq(ljGyGsPpR_^$(_g6+mt{GFO(!SJ~ZbkpGJwf;=eh030zjzq< z*L$G1S7)$ih2FF3lLboNbmE_V&GHsJI9{2b)E5fxxZh9q4Epnz*+-*0%0G1A!AsJ@ zoL>J;|2#Y9AH+@ib#dPNedVI^hZ^Ns5;-s3E5483-uwM4=YxBRd z`}$)MzK?&PFM(6(@c2o7z4ry>Utjvd_Mn5N|5X=(C(ZYy8;=k!bQD>kl zhIimId;9Re$@d^`s$bwI`APi@csY4t>z_XR;QfI+V|!uXRQa*<`fu>0c#JPmcN+d` zvlsVVdNTfWL#MlQj@cP4?KFIIfj+qs@>uT-o;07pJbv-wxkYbJ_~H4iX!BiJwR$c+ z9@{_Ew%cXPE*iTp@Gy8T@j^WSPoQsR;`T~+ZaybHqRWKW*+u^?&y@M$#?;7OL~#>N zmA9DRhd1IUZ=S#L+izQ~4?LH-KISRVne&tK7CK$=f`5?5if{I@>W>}N>4HZ$JbE&2 z@KpO7mlep2=vLq}|IPdoeqEkbs^UjE+br1?={pcF%*oLw;eOeLI7F{3Zh8;$H~FT$ zMmfIz^xS2yZH>mia@UHjxlcxTW&93%Qx8z*?L7aHj~nJa7Qw0RVc&%GXPWIf66!r zwezJ#^6tMrdu~vPYR4k^8~&Fa!@qbKx)pIqexZ(|k4hd(|KPm%DDTW10eKc2Mc?;U zi~Wt?o3$>2AK+@b6?`hbPu{KG;(pb62VZgJxi{@R8aF;1_b!&UBlMx*5%m|+PvZON zg6x&^T|5_E#j&aH7Ekn>8}twU8(ggqMZQUYhNm^3z<*m=KQ8mH+t2p5f7x#8inRwq zKRUgU|E4a2ua;-I2YG;dxc|W}Cl0nwjeotk^hY=L&y4VL@B{s?cx?%bP_kjKJ(`km}eJvnFo2Q4O_iTQQ@7cMc6 zGBU*{T${3sypewX!G{aEhYb$9rQbRM0V``;&;^N^~b7r!`1Kh zFR^aOP5XkbO&y)zkzZW-;No)=JGRB@7j%yF5AMMnL%90$>C5h^xcO+{cjz4D7kEUv zEAN4BMV(h1(!cp*jS62?8gx9&aS<;%T{8HgtjkhD|B&yRcSV0DZ^47pSDR-e4vCxM zg}BN8s<$k!cyj#XPh^GvX8-Z7&b1yo6rGG7t6p(UvnnSd^V-z?eOLY1`;`yPK4!LwBBK^|ph29yz<2~>@-tU+?h2MF<_MU)u^!ezkGjE&z7cVEj zII*_Nh0P1(O2Db!gF2Z!7JsH5fQP{w@eksK`?XIPp5!0kYIxGzadRU1U;Q|62^=L} zxCe9d<@0z?eH}$Fz2ey-%QJ$X^d8IwoHS!__Q8y-pySuS8SbIo-L?AfI9vZHytx0R zB{Jd$zi;2rBJE^MPpki8+e3BV?z<~@Lf<0)0I%cE8Vwp%_R2?7LOsBD%^jaw^6pl1 z*X#)MQ|UFt3%CUT;2z)-bFtwE^AX?s^i;R^zsL%DBXy?*1OB*SbH5x3c_IEme3J** zKg5ppAGaBLqVy{bPlr9E>V_S<{&1Fg>D^}qFf-kJQObJJbz zPZXUPi5Kh`e!xG-FYu}Ays0OD**vHAn!r!8WADMdEBL{8)urKIbrE=SL^QK_j~>Uu zUM~EOdk}}jdH+p3##i$Xc>CTxqKUW7+z{$|{JJ;~_tBBcHye&Sx7NmmYvPd|-g>n8 zQ$-S)n?G7r^xWJDbyNK{O%HcFH^J|ycjMQe>F`p?7oR&8!PV@-US&E3b0r?DKBeo{*Y}2b zV)`$9m%Yh{=z{bmiJR&}?aN+TuIziqBJ&a51N=Z&As)j!cmi?AckxE_QE;j`4Ri{4 zjbt6PIIn&TSHtV-BJ@$tE1wsK{_0S1!tD0jBl<~k$bN=hI|i=WIH_ntd{d8Y{l}o8 z59~~j6vwedTw1WA$V6bm40E;GKy> zctr0(-;DktI%s+rag&~a|792MfnBg;ao&DYeSmcF>YMly@q*tmNA``ZUUkc)=1Sm8 z^o7#Bjw#ycvI)&I!W;~B96AO3FMP&Nx(9kW_e+;4zmT__{=45F?n}<+=@i5__uxIy z_o?&xZ*ZS`;D5z8`~zLF{8&8JAMjFvS~>gOof_s7;1ThC;syV!-(THHeqkO{_g$BK zU3}Ivu7w^4`p2v3)b<6~j^P7ygAy^c~=h)Q9Nh zo-X_LZMoJR4*MbT_D%oR%c&3P%V!t(tCib;XZjcdxLlWyK@@Y zagibod+%A99{LM^f9j#jujs!!=pW{k+Vgh(GPx4!n{PhQrT&`Er$b&irt^~523pZB1SqdykzV;A(3@PqsqUxN3%x=ZyVO^fXbeXPlTQham2 z-b2ZOUkz=$eS0Ktk!Q(|TNE!-I%WL9&==~v$vhW7sehgh#D5dt%;!>vbPw`*cC4=% zugs3sW9c>M!ObcA@U{nQt-SwK*!M+GYYv7wBs+$q+^_iN{o=tNE4#dZsd6X6{608U zzbQY7Cvd+ve0|<8SB}~h`c36S?1GL7?g|{x$Acq)sNjnS&u8qEF>_#3B2$;Po}@x0TJ_b3DvF;CJkw zr_<%n;9qfG-tC>qTg(Mq@!73&W-OW)i9>h|^&vVN_u#y62|h|Z=6C4g@m%tGx*+~c z9spPCmxh16GkYJ@O}z*6HSi_+)bzh4zX!aWccz}D@4$bv577Qi|4qG{ZjhhE6VOMA zZ~Xc_IeS+w(WP*xI~`f~-GmQ^t_r*ceh2R3C;c~mT^v%M=hw@09~0kxVr!V|1E1j^ z%#{$|N`3g*ko~Wvgnb9@;ivLL#y->FK&Wq;rwd079$)>#A5Wi&{y&vzmUJdXIbrrP5$`ssrYD)QY-46 z`YZB&)t%sVeqEi6pTtMey{b!#n_uT%aQr~sLor_2Jh9}t2mG1%c4fJh%Rg=YPaV?x zWpD5qzwUndb^bvdf~)02`iJBJ>V@Xen@fe)z$@c5;A(me^*6i`ye@B{uU60Eci=Pr zA?^C_KR+}pM?#-4|Db+>Ka*#vZ`w!2j`=hH?Uf7mJ%81i9RH>RQ8&dClz$+5W&P5b zp})m<-2;CHr>b|07v{^EPk`6hcB;^!s^=U#%R3gxoB7(h!1u`m{5R+QxXj+A4eRZX z@KN|wdQW~wp9Fu#@0iaeZt^?y5Arv>z4xFlqMuBEfO(|)+}Sby_lB8Y{xzxp(V&Ns z7uqKw4-kjK+>RT@+~q`%Z-Y zS@IToW$~EbVQ=yPyqtNYm)~3D{ddRhjRwASWO}3Go5TD`I7%F%Q^3>Gm#Cxb!#0=I zdpKEjS;0nU{nnRHw7qijsvKcnCLRV(&C`1QW!<{(jPL~NPWnact21BOera`vM7109 zOg}Fr%*law_%rx{?#jMf-!;GFc(w13O#0?bY>upW0jHX4Nmqg2@m=#v;9vgX|Fz%A zJh6$dZfIZX_mqS<&p)_dcA>s$e-``zmw0FFIGKlGZ+HT@PrvE>>3JXjWX;)rz&2e6 z&nkQ@qGM9`SLYSy@p5np9vt4G|LuQkpKnX$+Y$6~-Y;DnJ?*q_>!l64{%{1B(D%_@ z$y?Mf>NhBVOZ%Fsp|3-Io?rJK;C22?oafiYp)XhN-#Pc}+{9{Q(+0fRB0bEP6OZLt z;*jsMH}^otB+iSQbsG`H~gD>IN;T_*~zj#mkMUruK(Si97+*aUZsJDniORC*{{dbK{N8W=v zgZH2wD}NJ@@xKe(zPj`FUv>uFt9%~r=FZgx)I-FO3EItaZ_I~ji{I7ie+v>ZA<=*^ZsK@%Q z`B8`ds@rzsb3cbZOZ5x&dAJ%b(GO{^yZVs+fQ};?ez;}n$!N;I7H=q%ejxOt`>uM6 zc(G^OXV?6CZJq=_$&U50vf~ORU#NTk>MgOlskwFXSnt>T2l^=WH}8z@ia&cHHP6m@ zMR&&oHWxm+YR1|6e4X;!E8M&-O6IH8A?Y9F7e!8H+`75fnMk~V`^>$ax&D%jhsOUJ z_IlFUh{tp*-kJEOe@I>^-#j(p>4D|1JrtSGtb`8*##bk-@${6LvR${UVcHRAkR`iHa|)}@4Vs=yu~*6@+;P{M2oFwYQ|Rff6%M!G8Q<7)^Sqi<_XJ$x9^@D1 zz$M?W{MdKp$L3{r@4CIny%+C{^(Cp3!M}7M@Vfs7@4yfAQO?`;XtOS5^JK*ICEgkQ zVBQLU#!s4`Dj$-!m~Yf^=hKgte{X$62ZFDLAMo~g4LqXr@@LLVAEkfOTnTkZys~`W zfAiflt9sQhIsU6ie52Qp#~!%osegSidP(3l-Ys70tAlaQfK$C+brErs4w~PAOYjf7 z_7vZ=^@7u}I4?gg-*4J|RR`ut;JL(2^+I(>xP)$yU6j3h6+iiPcB{BXZ@OXxfASy|5Xp5U(bAU{(~Rn+8FrMHshWx{$r~gi7{Dw zs@1A-DiV+J8gPlZGvdXeS1LTx=Gk0gfD~em(cAeu1YIH}zq+ zT2u9jyWhwR{#TwwcO?&S5A-GK==v7nRJ@!%3HKmB=6~6ny0m)BEv1)MfB&o0s7Q++ zXBWEtXauLypYc2VgL*8yt}nFU_vbGx_eq{$$MBAOP$%PmTVGZ5n>H6@g+40v?zfs3 z>Uij;yb1ZS`i1+YFHyg6zw{b^o%_mVbBAWc^gDPB_^jw%eL9`)cRUj3g;1YO}|J7#+KX|`*d;BjR5src%#6&5<6jeQ#U;jA_SXxCCCu|KfMVW9OxF#LLlD9C&{J^TQjZMdq!jU*N&LGxaw( z${bn#0p3v$uL)uqct-ZfT~j0VA$dUZdF8S24n2(d zJ8-J`a{6ZI#KcYK<=6FZI;mo+=i628J8AWx zBEg@@H_b_>hcSof$`T#^xN`gSxcUPlf2`X#XV6dbJM4{J;5GKnEZ67jEeB$CQ+}PU zSl+Fk#qX$J_;2RviWlO%^TH+W7d~Uh{+s;Gci|{`%YUk?a1U^lzH$G}Toir>?z`%a zKac(1d_mw5*|B?o`{;t?Etgk$tH$8JK8PO}T=ed~YfeV=T=c(lnvYxEyz2O%TVcoE znLJC~6u-{@!jtSc%JJRz`7hoX`ETB@x}LhJJOEFi-d%g=Bk`UwE5iJB{e|Y7rssS9 ziDCE8itsz~dH+ql(0^kW?3jPR8_74-v-mSSmwQmp;&=F8cHz6`YtS3%!=|g?*Tpyf z!Fy28x;3-Y{k0#;2>Mj>m6LG^-V>gbU*I+1RQ(L{H*=}f8SrQMoBdVd%O$7793nbW z@lE|rzUe*4=fyX1NIbUZRo;!amj|#5{oUp+nzIL=@n`1r>d&InWf$<7IFyVZo>*2(C_dMzN_!cJ5x7>OZc-=1NS~yXvX5uci=yfgUMdGQ)}BlRr(-SF?kDyd^yj?9Sh1nOD#XX*Qrx6t9i zlZhLmc|Uhu6y}$xGpIwJ`_^Tr&$%`&+%H~39w5K)-LV7qU4Pl31CcmS$6uw-$YX`u z9gFgvzp;0_pEE+<%|FOnya#@g9mA9A0dOk)ufALNkiTQmg`c0ynV>&&zv3}_^W8_^ z$)0z3#_`B```-83sJp79MlbC?|A*{+DG53}{oVL8=Y{*cGd#F?Ox^?jOg{r$?fuFF z#Cf<{ofn^KA0RyNh+{@1<^_sj2yL%yp|SRRY#`l?6uijOtP4s!&| ze}K=-8{~Jy3wa?P(fj2e_+Rfq-weO5zr}lCZ~RX3ci{*BjXz84cKz6~zwU`&d2qw^ z`|~Y|>vr0@eQ@iPS;^m(52;_!wQU)=;M*acGeh5u^G+`}{Ych?ozZKzpKM+Gx^IJy zi7wuMV;Ax){EqyX9?^U*|E=uXl_xE#akl^MZ(RkQ0-Wl*;*j?c<-ITZ^qK+*e2F-3 zKLfp|_sgH@kM&)+PktdU^j-X<{nGm9>F~sP`H*}buOS}GhuAUPS9WHfHy7M=G%`oP zd<}j_95Rndo~3V`pQI-c=QGmouepC}di?sGRWE#U+U_uCgsy_0WEbYk(X}fF}oPF@v)POZp%JfKmM;hE9Q*QSF5+cXY7riG%r&< zz&oRdG0%}5C)Z8+Uv&}q7Z0v~zSoRl6TiRnRPeva{UW{#M~N5mLU;%6^Idg4?`-gs z9~MpTJ1^?quGskP**OyW7L#$R_-0Sg`a0jvyRhb29m`dvR=&}DMl2qyGl&;-Hgs3= zSbkDJ4*YB17af!Ov3T*?cMB^2_}=u$z9e?+yKo76Q@?Nz?3lf&lbL7E@4(gUf}fOM zcxU)^{!HJO{uVlZ=cRk)C%p&!89oY*f`8?k@(X)N#W!(N9?On>S3IUqRd2x);D7hO z`cCWqndxC32zwLX!d%OHinZQ)w%k%-;_)z#R6PKGFt?e0 z()=BL&6(8})GSu;<8YewK<*qb_oypUdlfAAj6F=Q8bE_x02=6>0+dJEp3e<+&wc>dRlE{e^I zm(Nc;Ic~v-m1pNDl%Ln*NXoKM50JOe*@zeTYI6b2K^Ete`;QBcxvTKTb<2Z4gR8|i z@4-8RcUpBHGy1wh84;bXxM>bO|4R>p*N`834{#Ja_RipHd>?*t%*tOTFZtRD#PmSKl z9tFA;bCbkPx)nIeelonLccxwlPl}uJZh64{H8+2p^~2G~{hDW9Y4OEbAFcRjFQR$? zJ9b`u&Atn-({00(_Hy9~)C0r|^|!Ap^qOCHd+r4O439|XNbgDKXbz{kDZP>Tsp1g4 zgMZ-9#7%LYJ_;`Bk@MX*A1kph_;q<9oGKp61LQ;EyuLcPZ_p=~RLIx=OjPIH-t}&8 zwJYev{5Lvi{(=9+)7t;0zlCnw{J!Dk-fmUlx9`Iq8@TWG*$eL}Q+IFd9>h(&M)Dju z=QZzjSiWE9l|Q=&fuHnUefj+Q|MY$lab7+@{nq>~>$T36;CDJTEjRyyas?A~&~S;q zMR}HZVLl=~mpISf)Vuq?F|kyWMFkS--Rh9H%*eB6#SfX`e#On~#ue+#`Z+7+&+v%; z8~v~TSotxYYv;{_s{X6+sxUuF+~n8sT=HYQTr!RlH_i1quh;FXr<$<}pg*KP3b z!!f-E|DZmkA6?&=o*>#OTkn07;lU7_Ahx2`gt^ci{*47mnh0ykB_T+-7=D?<|?$alihXI7C1B_&rA_ zwk$Iz#CiIXQpYB|n!C@0Fu(fZ)1Q1XZfAC+ubJO*zw!%lQ=a91^+^o;?(XkC{VwhQ zniulx;->rcU3x_Gg7h_?+xvqP-4C4(`+4C>d;9nu?@V4;>gPAMT|6&W&;_vzdZP`S z&M%eGVzd68Ed0fqqgRFthaJ{MYaOAy#iu58!{DS02lsz4YSo z=W?{$6y_u9&%%3}8*080zYgz+7yQH04MS6hk4{O!1v9r#%9*flro*;hW-l0VAl}gOlIKgEPD$`*>htD) z;g#WExEfy~Kc=^@Rl4lPhIw`cy|O+)^+LQSJBH8Xn{X6==DYfY?fX&>@ZZD>?^oVp zUx)s}qOnnZd!CI%p+Cz z$0Oq9=#9i7c*p(fFJu>Vx*uMjbJzHLPQ`nwRe9snYFi?_r};4ATiV0RKj^&Xr^r17 zzB(@X#oBw82YwP?z4N9ftG}t2V-|b{_t_J~-rO&~n$E_Y209z>H_RFNyN}8~VLX?9 z5&aDCFMESi`5p6^=;i36lnMzi)Al1fDAyuj?lhFYIxgzo%3C(@z}>bCBUl z^$Y$%+{91vzv`yy40Nw}81+JNQ@vXpl5fI&b#HFls(pnoV>p$5=hse`zj<-S#JI-l zl2eEFO%M79-^F{`@A!_TAX*k@Pj@o z{tT{WZ~UbD^&Wa<)V;0f)@{+;8r?e9?U5SO!Wq)YGq&Zn8VX8$pqDqfhAzHIyST3JtK1U;8~@LhN(8JEC) z_HXJ-`lVj}QP+QdDmKSZ9bKK*d$_04yos|v&Wy!*_*cI)otXQTU!3`4K!-%NfBG-* zp60l)3-`0hsY9w~z5C~vd%xeYHRwR#2XQl5-={uwvchBYmz%*j|8rx|)5-(nh3o?Vz~0mu?~}jL1>tw_XX>PM4pAOYl+hEcz1XrK?aMvKJiBr9MvwqOZ<>gG=BCamaVo zv-Drkwdw0n*Ykeq+I-hLgG;pML7KGesI4j_{s% z80Qr)+ynnB--LJ6H}PC}@WC5Lzq_;FfjEivrG5b63e^seRRcinc`cg zj~5rX^o5LIZ~8Z#mp}WTu3z|X>STC1dhqfE@{KL<_UTBxfKxC3dykDe1H1!Qd%yB5 z`d{%_9xK0~%QRokdr*&6&w``WTkt#jkJ%glAm21^5Pnd{k-y1X;C1)lzrlTQ$)O_G z>@9G4R%DI<{OkSVqxduShEFX#e96e~hUQDS2m8q?_nq;2<6JotRrBZQbnmj1M4r#r zE$er--o~%vX}t&UmmTB5)m!vY$+N0vbV#fpyX7CeerQscVl@uL;<5V`H?L@TIHlLt zQ&IC)*S9V^{7`(;3x^&lH9vO(U#%YCe)(Vb%m0c)^bhhkerG__x`!LIIS|7;GghWA z-+9HKu{fk2fRCc%*Efz=Hm|~*(`J9~O?3}=7=7dFdbiwFNLS1*%+cEW+pJ42O&o~eU-^(eVdteUQP&$X@aX!Taa!zt z>8|RRdM5u1wGKx7FP;DoE)QUDblW4JJw9MYj|~xA0$0PS;wIcDzVYkq0)Bw|%DvU} z!hTo$lNaJs;Ys|NcBlpQzoq-+WOYj7~>mIxZb&>NoT`+$B(0|^8|Ar@kqc&$Oy{F}@ z;~{Q}$IdIx+b8k;E4$l1(d(alNW2h_;dS@xy#5=U3is)AUoq#)bw3yUr@sZivo~e# z596|CMeY~h$KKdQ)6YM8`0auFV)qO0=&Pf10@dW-Y&5A5xs7Y96d>x{e!b0VD=kEmaS9h+OH@4(!Ax)r#Z-;uY_R}Z}Krj*r> zoSlPwQSU3Rd2wlctX-`)X5={;{keBq_kHi~|F`|<>_Qxpw25*L#p(uw(Y-yyv}~v+#+*HP{rJ-^Z_e5B%AAGatU8<>NUM;xW5WFNDwVo{5s#EvsF0D$J+lf6ZSP zkHt6mSKZ(J`fu==y+-E1t+3Jb&WBCOgG(0(WZ2k8i*p?@u&mB)dSD}u>j(=Gh zw<}qDZDgNt(23pOZqu9JPl@Fh>8VgRxi|FSS0t;pKSiU4Szi^ zL1%vY;M~uve6%v~JG-8}xXHbn&hCkBefNP2zCIe;gU2rHon#m8fxUJ4d%r3Dx;{X7 z$9sUQo!9$K&SUX%_%rwy?+HKPOT1saG945D%deN-JZ<%Xr&1F7Tg+vJ`$`=9p!-+t z(Dc~|tF{I5PLdIEObYUv}FmA^A9 z%+sY~GPi?2Yf|vNgFlWv8q=+)&%=}O4jd)#<|lpEepBaF&vIUQmU^sv5RbhFd0~;M z=U0EY-LGMe0R90!`=!Lu-8E}>kMuQ*o9qoA3R3#&av=PRQTr*PYjUP-l<_EE)CH#u|Su3v)Q}F#MT(o}NHF_8yY!Ep%7r8_g+@ zZ{CnYvx0uzTogPEKPg_YH+}AKpYxgzt`CsCxnDSn9kaKEXRf(u(l;j~`Gvh2>KFJv zI!E__-|=1hTjUq~I)4T~c)#?&_H~%6W=}Ew4*r3?`L2De;<5U%eU|tgeo}o%{UVt! z@gDd|b$`5^`5N?c1K#L%ZMmllCfJ30Q17N!hR@(%eKU9?dH0?Lx4z!J^S;1quw!vk z{TR!(duUs~S4(USdotlkI7&Y69@vFFAnJPjgL|NRrOOoG*oAlIJ>Y-6 zGkbsa0m5f=y7H{#xr^@CdGUSxnfEI%R1erzJ8#8lQ%;0=ukf#YzF+#t-*;BZj_fJs zf88%U=^nOU(so4W{QKjYm0LGokneazr{Mj%2k%#&g||1S0e`0MU$bwY3#MIlJZe#6 zcaE1I+!Xc<%45X~IO@qZ^J_hJ-RZE06h70RCGR!|8DApK!_{-z`!#Qce%(FrlOvygzUT4Rw?%km_NE?S{<=8iyL4B{_*Wh)&vIVz zm<~_f6rN;n@=ba7;{*QewRlnfg#FUu8y%i{AztpBDX03q^U|K6_oT~Im*&^;eeR)B zzV&Y(o$*J=FZ6wtTedQH#j|_s#36ks`qcclnQJ#*_(iLeaigrsooYS4E7Hdr;!uUz z{SP(X7JFy<$+rLg^>g#@$~B8#*<6VSPELNR!>jqie9vT_R$YWYGl$;1t98Xw3k{uc zH0)CoFX))qF+PeN)2Ffv`31aF%DjC`*rOoM!%=v|_w)!zW4F7;P(kH<`c)$2+bsTsdui?M# z{_WKwH|0MN;-)wxZ;@xwYxr)T(NkJ|R5>l^nD8+8^)}sZ=yI&k{J6o}yINNIB{TNl z;6At-eo&vsE5lLd8`0C!k>WM*;NqrxNVbb+HGQiDis?1zALK3SE#i>8#r-~g->8N) zzFHsVKkz&B>!q*!F}~)eFC%yozwSM>ZE)!8F@K(l)gkE=0J9a_jJ@60sN%pzwkvSW6k-(NgtZ|c(O$5&i1>fG;| zAB*^3`+4y&@*(emy?GDv3%Jkw)t7WK|1%dKslPwy3Dot(d4AoVZSR*qGv7$uG~W|G zGpB)WFuC3WN5Q}LE#gz<0Xe3u`R$LQYa;bneFt=S?3jO0@0MR2{b54A7Kz;fKgc)L zH=8b6F=c&$FTxxQbq0JNyn|1r3-aIalgvdOGBpIy^tN4$UR z*X0-QIupS=^hWBj;)VQ8T~B_DxA$Fk%--;F-b2WT+H{+K;X_+vI%xCS=*;ca;Lqek z`kl=EVi)pb{Q>O4T%UE>-3z35`)95tJC+~Q6UYPLRQSw2@W1qvX_|p{289WTqd2<_ z$lhOh0K5*T`mXq9-impd?pL0L*Dz;u&?$&R-WgoN-ptovZ_calmc3=~e(?M6cC88d z1wRQ_!@tS-1^%P%4Y*G}@BQLw#bfbJ z9lg%g$4ce=?0BT_i{EK};d84u9N8SpkNGqHmtTjgeBFU$S=nCyeW0}&dB>U zZ`(f0G7YX9dft?k0sqQ3lk0El(%#wnZ+kWR=)=>2fAC#%JD%uqU!f-VpNR0Z`h1e# z!}2!ua}BDKBVjHT{{UCx&*(jC@0>RMx<*UG91Qo1*I;k@zTgLT4EMp4cx7`#oflui z@2ErKf0N%?v%ht^{IB_Et&LQeBUKU~h1>_rU);@9Ys<8fBeIndSYeLyDVpL3@X^{qXMn z{UiNk{F%N`cEL}=CF-&I(Ve%?8#yXfe*AdI3*|%ZfxYo(@Pjb&q|!HH|aci{(oH5`?k&#R-; zWqQB#wD1mG0{@yunOi4rnuBcLjQqlX(>KGf z(=nM#C2qp0>{#BypTQ6QoBG?CX=|s{?Qt~1Ptq%^i+E?=ued4S;1+Tcbp9U zkZ>wp(9hKlx1BTn&wwA;F`mGDF5iXM<%Raz%ioHBlzmT|N=M>no*wo3=pNf6^KAS# z_v`(_zxFD_>)r#rqo2(FEPSC*fAXloJwDk@%Qy1=@$`?)zSH1x+^>) zU7J2(Jb}0g|EfFDm#_={@EZKDeAE6yx*)j3ck#61 zX0yiAmR8%DD`CDp-bfvXUDW)&+q(Cc9t*lm`J22Deh@F@vG(K0v)CKmE53xj53Y8< z{JMU~Wd5O4{}tb?OFbOsl&R0hBR@T~X7$QY*Hdr7d;V|D@%DcC9Xw((j-p@JN0nUX z)&C~Xf*<&Gys~!&r^+wPFEPguzvDfqALD=V>*74WL$3kvz^M=1^yN#ds_Y1JL(PfA zYxr*y#^xP5sY6<<-m+%>wzAoK&h~ZuUDtzm`1S3rMm;fZ?&|Q)_y_d>`5U{CXSs)- z&F*}mbxLYX52G*ilDapKuDS74m{hBaEWT^jrPRmqQE71M7)vj^6U6@^$WZw-PNEI*Qb70F*E!( z_rQ*m^EY_%g%uYRe;~?{fK$yCwx^<3o+T-tbmC@zGh|pSk!% zm_r2j;iLF7c>p~ZUV{#gzT|@Tb?aojmKF9m!hOvb9*^6rZgo&b*8oZdC7$>+He zbT;n6dl29Fvp=eSe*G5}j>Ya5j-sb!Z~QO*3{PO*D}4!_jd{>LTilklYQd~9XN3P1 zkNIDA?7QYRdk^e*)b4w$^qIXjHh)romOPf9^nT$ecxU!Gqo4hBdS)cPxd;4N9Ozflkk~&c>Fs5d&`i`KTgP-G7Enuzu?!^v(zE!C*|G$zx^WoB;H6KAm7yYg-29p zXi>aO>6GyY1MjIGfL~X?@LhJ{o$)*B0dO_FJ>3d?=DW_D95?^dxoY(G_6K;s-h=%C z;@hqdhE@FjRA%Vc)8FE}bWG}R=7GRF-UI&3+(rCZxn=Ek{njZXyfgS$zRB;nU*FyQ z{C)R+*X`BNr=}hYN10b_KBD>-#06&7fiSZ z`jSnVb#|;Dej>ta=$~)6bY$0B8}cOhUwpOqfIs6W`5inL{SH5=PDW3Q=fcC_!TmQp zjDE=HW_3vWq3g*oH;I1d#eSXZSAY9pM30E?gQM`1_#L`Tc42O){8)Zru4SPy4Y$sy zur16_<$uLDx?(&yTn$Ir6XgAxQ#R@Tam6ZsdnDu+aJBEkzwE+!t9-w=#kJq(Nx)Hb z6-U|?_~p%e*F_g!^xiMyi{(h@lkk4cTX7HaSbS86$~Tqhk+mjb7kD}Q1Jw1zH}Ch3 z9@mXKHsnYo?}pd$1j+sZU$S&!mrqJ>+!W~pT=LVB#tXkc6^onrN&O7+O@2N3yUuG) zS*e{r6+UO<;Ln`5 z>`#Z%-?`~jTq*;{h`4L*Y(#Cd!w|EphGJk~eP z?Dl^o&?>M{X_M3)xSOS=bo{-2l#b- z6g&3c_;vl#{4c#dKUwVH@C)BuuqE`1_;2`BI2E4?r~VM%zT)KryCeEZd4PI={cqkc zo>o7D^U7PCS3a-q1pj)!<|C?Sy<5E0R|n&q33HIWUv})g1$NiHVb18BAwPBx`rqtn z^If>le{){{jX&Gg`hjH?OP&gK5qM|t?xi)mT$>TKkJe@0drL;Bi|F%_$HGz14H)`J zsYaPGJh^G*sA*HuXGC<{@B>_O$Idy$9{qH7WUoE`mt7>|58eg_{# z2kpO^uOVI(X;SghXZoba_D1PP-<50CH?w=?nuQ1V-}Je=U+))=I@W*gnass!f?qe! z=HV4NCf)J;(Ma7C-oeYs1L%L{^Zc(kPY=#6;NSfvCv9my>qyuO1OLj0@MriEdII-= zC&2H}SK~GCo^X`>0`7y?kpSVZJrUslkyh%Cj3CJ+%jv+zEd;5jLqZs9`HN(U%El>Oq~HwfM4Hu?6a@N zlsXYl&sa1v%3Swv`f7d>eqa~!dH#X^7yoNM;)AEAw>w(uXe^IaAF|g7FQ+czoi!YH zZmo?A*M#}X>Miz@J$g&d^bacR5A%1}oA)5k!V}c!M>5BM{A7F43%CpSrwVy#g zProh>@$2#|_uC|E^~K+&?~mX~{|%oi9^<*hH|MQ$(KBQ6-jNycJN!CaP5;ZUoBvSu z?vd-3|28+|-FR?)2l8&Z3iSYXj9C^RHb;j)$SL+?!5e2GOm`#(ogpNv|;Mhys1&}>s=cye(K3{|7~ul zd2QaWet-Ht?`&=JYaYF{d|K4J`rM5AJHLvO@uWHfzfS*~?API6`Ngu&Yi&E-Bt42> zoV7f2LE6973*9e&ramOknmcIG{nw8AIr<`NZ~k-tIueT)`VQ!K+ zUaxXbnXm54jNx_ni!XtH#Ub~Af0%T^^yP8kdajOi<6c8-L6 zUYy5k;QQ$C_+RtE*##Z{xLYf=y{gsW&_D0H{+st8@20Q*{rEep8Xfs7^xc{}BR~Fq zcBhKx>{=V<3Y*8|ztPhkthH(73vJS4dfH@vhKHetxo<}9&bdY|i|C-$yV@PoP7_+S24o%h(3@A`b5^VGk^H-1OHsa{z9 ziz+v#ewsJz*P~bV&hWJISUPh$Jnz^R2B-haj9*Y?G5l=r|cuI#e7@6^r5SXdl{gC32_@+M#52KzXZrU@*|KjE3Z^?d=e-Pi)$>c-c8J$hu+>M9#9eXOw=EwMFX&4GPMx0rj&i&r8{n_vHTjQ3P;;(ztYd>60m zJqbEt?$}3 zeApU`Fke!)ZgxA@F zewMn>Tr>VbpFw|~AJY@a$Kr$U7VJ`Q#6v4jMD#}N%wAS|7vU2889a&K!PA!Qbn~l= zt~($07|KKX()3aMO&uUk>R;<7E;RKu_zPOuDPbYRAz7pUe#XH-4<%WDo32o;R0d zU*_#^3_X5v)8o;s34QBN`u0>*yhxr^#fE;pLp@8b7_%1Blc_LxpncbzE_+LJb`@S9_V54;PyoFW4Kx!VD5|F9)E^M zbYA?gKBWEy`?ZHtA6-9dZ`v~-U2*cle#A#xjA~MEdZfO9C-npLFVux}3Utu<@4c$R z^_?>!dz0uY%t^?{;v`<^%=%dT#lygp?ja?0Xu;Bzawpt_ zxjOgjy(E0K{!q_*%9d)?dPnGQ@;7xB{e$>`KU2q=mxg!j$?<**-d^0{$L25ig*fTE z@;p29EUzqEl3$lmE+zkAH-@*6kZ>Ao8{Lw*g z?~T+K=6T^?bC&ii)9-kH9xicSxM@^rqRwjK%ZEsDQz zpL)~%iaW`DNO{xVq&p-3` z{5;;s92Gyd-yV-B?#OTOnSKB}qff;@z?0@p?d`z->ht1L?Loe!-N;t^?m86NryxGq z)8M=2X3W*u7nJ?4#<#31bVEX2sPBZghkx}=;WPb0`Vw<9_&$9yab4XlAH%=-e!TnT zJZrNg;7L4}IV$^@)P?k1{0)Dm&&wY08tPd2*dDmSUr!h`cGAh%o+tOq-*V>qDPKy< z0txlS(?_D`dX~tN;K%gqaFn^l_g`M}X`UC)1s(=Y6@U3Lf5U_87vjO;zQ?jYUGs_Q z+oQg%hpc$ul7IG;v0u2v{pts}heyX`>)89zqhU|O#~C?0j4hoWjXbmWuBuheMm6hI zO?kcGp)e-_r|M_HsrK2(Z}uyTzi^bfMekYaOPdSTzu*_*+3VjoFLR*t>8MNTvO|{5 z&zjI@5Pyr-YCq@dhc4`iXmjWCSEgo7*h?io&_}@!{5UyIn!9BW>dmEHKPcAvP^aJ21eFk}`%#Ifp{gVAmB(A?wyj_>g_n(O268X(}*#n&z{jdBMbU}ab@wPvT zZcrYQ-}oE;fj#gG@fVLMzv*x4GhFlcyNKeM|HhBi$G%ISiq~Mj>Hxf)c~f}OyD+{B zN2!nD>i{#3G_|ecum*?Rfagx9JZpQnat{?kJj)eP_ zhs+7npV0-WkJ)eOajj}M`8`WQzL1CT%J?0+OmmC!ka?%Hb%kz!Y-L8Io_7!Y0-p39 zmH1012Je`wv!_9x$4`o9`kV65XM>)qJg3m9plid^>Wi#ecFl;!AO0Tjr2CcM@YUi2 zKgJV?4|Lo1z`?)zS^6#F8T{b8=8|shK7Pxhx(CCaY5PFvRtmIfI(tLWqhVeI{>6if zlk(=KjHb_ZtW_{!Z=F1^FEX#ei)$ATIT?NVQPn0-mRuh87>Yak-QuMG#^3a_^aJco zvRC4@9Chby7TJ>^aQ2KVt}d-U3X8dS0@|}_G`~+-|<(JyQus5X#ClZ-~Q{&x`0#R zYWYpw?cEY_-P}0)HNV-eO2v|yQ_e(hjQ)D^f*$+BoUrf87yJ!x2&tB?S%SAI!#PFnfvSfZ8 zpK6Z)e%;=D^Zv>6yw@FVdC8DVk3{TO9|ullzwXx_PWadU)VA&V=UUb|dx8!`z3G0% zb^gYWy%#F((4W2Y&J(!{l>I%zMms7vte;*p(>$V3b9|(9-eZh~_ zS?q4SPHA)%-0PKge(2+&^aiBkQvUU8efY998MQeO7K7 zlot4Ee2Ms=&%n;$>d@Egef7!8owtQO-rl*QUmy4OL#wuD&xr6``T_V!eKP)be%1SV zuUMQJ_HEOd<0ttsd?p{eU-=^0e#HlQ92al^j5(aFl$3FFEq}p)c{@-pXAyYqxqw zLmuK6_JP2u?m=9458?w{O@CJLu3ovCx7-!MJ7LdL#;%f^a-WKwSKlAM&X4KO@KLLN z?%%uo1Bc?{FJ;Zq<<99*u{KX{uAMJE5`Wd3`i0^z9p3!@8%uWSur8)!@?CYy;bKcK z-*s|BEbicy=~nQ4_-eWeI*?g8jxVbC?>&*YzVO(QD{A)1j2~OlcR-6>>Cw1JYtK}h znGxPwlrQ)(9!4Jrk9hTCmDhjR;y{EqGCy=vz5YFhy|6Me=MG2d>xpOTW4Ld~nb&IE zcr;tU)#46&xbl@t(k?1~Jj~V6F*&b#2=2rG793LUyN3&BN#N!1lWNogTUz)CM{zHA%<^T0$?7ulLKgRE{ zGdfavNIat_Fh4|Zq))~k)P?%!BgVJ*_50_uhWF>`VdUdE<5t{pce#@hejTn>2Z-zP zv3sBkGJm0Np@;eIxr2wE`XDpBWAkeMHaB!Ab2dJkGQWDBx3VSNgYTLXb`SO`uru!} zdnaB#7SGHz_dfIEUrXAah~Zz~l{euieSdrY`;WQj<8?zaLVYo*|AE>|CZ3L*SDZ}N zWmcGzZOHX|mk0bypDLcMzoEk&`_8P5@rdr%J{$KdKDb|a-8`iG)yGk9j(>c_fSzw= zN#GyUL+$}y=WqBk`w85GIjS`~8s#mPc`nSi=rerqaQ;?@Z_SxtXTA&f^}27=<_gn} zg* zeEi&)cdt%6d^F4@!F})pJuTi7PPKQ&zANzozmq(7i|1m$>I*sre4qbjE>sV>%{w zW{;MBxBezO6MyA-xZ3^VOYl4D`NxM3*mm`jd13E3T*BYnFTc>|wfA6V+4n|wsBF~^<&_84jY*MC1f%M?V zcxCmmxSs59_;oxjJc*yo$dx{&$C!+Oci=NR5c9{`3clBGc;kx_bSwPgz~4H4@r=I2 z{4u{UH;&)o$M7$Gl=mU=QBzvxzdJ{#LJ9Bd&0cv)!81b+1l|)5?taw)?tyMw9k9OT z9qrR+W=+^fn#>c(L*A)iXVWu3_@Z`>q6z$tcn$gz^LpYLJ5z76Gx1D4FFvS;oR|JAUzbl#zjF8= z-roJ{i}-Ke3BsSz%lR%mY5!#RyU$ji+AJ;T|HK(7QRoP3@^vu@LUD6zdU~H*n?qToBXD~>HPqHp&sJL?m-=3Pp^8OK2_aa{mF0J zj+%2ke!Jygy-T;+9Oh8udGQP`VQ26IKQ{Np-|SmSTUNG5^(w~#e}+edOYmH9wfQ%F zC%V^aXIH;;phafn-9CGA{LNe69Dj=m{BKHjwIke_m7M0gEx-JFE)+Uvv4ygzT>Ha}Jm z;kn>Ra}xUg@({dEhsTe_U-^PP&_B#BGduOnm%E~O@5^|sTInf)*RWSjeq+DtV>%Fg zs=l;(^OD?Ou1Z<9Ch{%;9?^e;`;zk}o=Y7|H^|@2p{)KkGvk*Q$0GZn#RvDG&npkX zsT7?bhW ztFPaYGMAmXUp$xmCO)_a@00m&`XclXYv0RUwtDsY(0_xY__26qkBdHo`huO|?ddY{ zC3uZfzvVsqRNb@)&jqJ`cH_R~_t!cQ+3RE96<%3B=5KWm4j$3!&;_3G@As(OgLtOD zi3gXDd-lm$?U5=!#qh8AYc2_|!Or|Q{T6uAd*jJ{J^e!YO}~X6(Yp`oH#n6p{)Z{G z()!+fB)ns;&N9a;PI{-f|K<@N?dmo?=;Fm+-=**KU3m!ZgV*s<*>ZKdEB-w*{`8)f zRo65-8ztvAxQ`zv$DL_AE7shaCq2CXX0H!@--5XVubf-(^T5OSZ~OvI#hT{3n;ha1=lG zZgtbY-q^Rd={I5S0I#gyEl$$^s$1Y+asA6G&sJ@8!eK4PqGL6?=3wpUi;Vm+k!pN6R6+VS(~l3@4Tx=o-n5d|K_XM;kSLoawqU- zbSssvyy2P_O;<#l8eP%3__>q>{L3%kU$_KMOJ4$)I4^$Zjqgq^`)Yb>c+W?C@Zb0w zzeu(-eKL3l9~JWa-+O&}4SM3Hsz2pU;N{>x^N`L9NBM8`zwnv&yTxDoVdP`@*LgqN zIRDyZL(fDV5B<8L;LP+0?kjw5_KO8GF1#~)?9u9XmrY5CzxrhALVT+4s$1Yx`EBgd zV}*xpS`*&UmFKsOxuI>1COgBsXU*>aby)d)=OghKF9%2AHTau&hF?!V`|kOIb&f>( z^YVrILO&MIr4Q*I@H_UD`ET^#bh>cg|Mc#I^YY`*Tb*k9)RMDdPEB9WKF7UjyR-HC zG&Q1+f`9o1{#V=)C+#6pAKOE3UxogVd315o{kjKz2DoI$;9uXo*{}|HA$Ux>vl>t7js81%pKy6_uLCMoG~x|ea8Y{UF%=d zs^mC-IPP_S#_;RMosDv}D)j3Y8CetXS#lkqeuIDU)o>JC@>Q+(-m3S^_SoJ7bJ*e; zoNC_5{ldTUynJB}+kUwPe;sMPzRXAA9`v*DwC45f2WMyEuYQ2}3;R>;Kf_Pzx5z`u z^)Vb}E-4xR>c`R>$wP37xbDA+>+aV)r2Sm#Li_M0Ce|*!qhES#Zbp1aj=%DoxHErq znGyGQI2+LgsRR1F)brTQtuvzg&mB23?8J!(uI9(~_ra;Y>%WOR@Vfq+edhGE`n>83 zdRl#H{TBUL??$;_b(X&L1HWcjI&J%3VgC*uJb6!pJOuB+)#^<;UAh%>Ny$3?Wc+}i zRAi1Ma)2Jztj8(uDF@mgnlciS26ft@)o-JpEI9=>he z>D0ILR|nn*e(>K4^~rVcp4=H>9}|CDc2~Wdr`A0e`ETw)f9S)!OA8&T+c+9LtM$~C z6_3Z}=kWwnckg+v`To;k-<3Wj-t*a!lU^;jFJD5OOfS(t@30GcLHU>;t6R(^$wT%V z$@BOJaR)Ew9{3x)u1`h}^JR|7A5>|X9w*!Hd!JmMKB@J=NIir<;}>-N=0)^zUM@BK zo@^VBM0AeyllC5%Z-FQE1L%V6k%gnoseM`h-_<*wJQJB~rgOxv!#j8ae&HVAU${iy zRR7I;DCWu78QoQKo_Ah)aPiDNs4w`1xeh#+xMQwEeDGcT@2!tN`u>*NQxdQJG;rE? zHP1$qhQ9dd`Z1|d`^k^A_@MiN;1}?(`xVdN9r!ok)W6@k5+CHvs$Vqk)Ub;++wD&_9;n)SK?X+>G~b={4+Q8asM? z=576U#OA2%C(zHrpW%1J2f9o=7yW~}@br<-zsgeN0&ZBH^WD+?HiWrbIz0G6-ZU@G zFML=30-m(@)_aR^3EZa+;K%Y1+{cgkh4b3S^zOfUe{^%RePP~-J%|tDr2R(n1v|sb zv4`)fHvaSYn@0oxYtJb>>AUJ!d`WVj7mjjXeWxl{UGed_kA98qpA;YP1nO?_0na7B zu?KZGo=bn;{vCOUAB#KcEaz>rsO@L>=R6+Kk*c%w^|p6RYxK!42O@SR?xE+4}s-XRs& z;SxGsd+W^O=oj*1=iO1b$mFGWW=8f(=m+%qy63)>i;jf;ra8;@8>c^fq)2MeSKHGd zp7A&P%=w%5o$!eEHtS~v9_IV)m%h;PriW+mogW>jz4>8WwZAS+m~Q4-}r4t&9gI7=Gvph&gfQJ&i|@a_S5Gha}wrI z#P!MpYJNWMi{oLQS3RWO^j+^sh-diq(z90|?b{(e;>UBUZd#GO=<0a%@OncgpFI`T zd8zp=D{`LrS^Mal<<3R+>Dr%4Pr#4mP5vgX^EY`&--#~MT+-V`s~5d--VdSg zB%bjL_e+Q8zwu-G9sP^v2LCZUwcXY@xi2lx%NIr4jDK>?*XJUc8%TdfxHRv3=gA^PG3!z*=1>il+&fs<0gYk<+{7U!Jvy5C&e>+wCK6IynM}~tIlUn z;G^8HdywDs{nZ!p8=gQuhD*##o3m8E>5JI^fafwVB40SKI*Sfu!?JGoEo^o)mgnIe z^^pFM{X6<^>MXcK-NG;MQFpw%dH9RRvM2Pj)P>hPTJYnK#_WsmC3ph-4*i3B$ok7Y zr+>QVbSxjkXY4_owBPgCtbR}ZykuJJJ#4%Nyu&Zf=NO&)^<^`{ci-xH>h&y5w}kgB z`9-nin=@nag7@t+;h*L3Gb}ft7ea^dWfCDsqBnBh&y;LeO~j@^bg(z|LmGx``b+25c)0R znLe*PR3x+b1C8#@nxNkiC+#zbC(n(k@pI0RY4H`!djI?S0&8OXY54{Hj=mG!puVZR z>0JVLMh9a5q&@%m2l+x>C|}^ue3z~mt~O7m&#OLGw}`(5<~&`#(Q{c7@;tqf{u>;n zJ~o$hckizU{kCjNxL>%0oxx`rZy#th@mOlGUw#2k>KB^lHGjoj;QeA7V%df{oSccZ<{e{ zc9^roE8{(VSH2K;{#%}wPQktk`2xQ~2Vx(Hes|&hTRy7y!j8bpxnK2#cxI1_J{i9d zAH;Qiva6ojJ@JbrE5iFCbT;Co{|2YB2m2KC1H?1$?$BMSkL3&VS^UjA#rAl!UvbCW z%$P0L4k*6vRNQC6w;%qo_DH}{-os`O@D6@QU)nv`6Y0C=1H@mvGJaCsf`_3CQqS8% zFFwG(cyRT+I#!-{UiT{x(VzJ)o`9aR*Tt)85(Cy!bCEp`V2}GA9A|sgLoV@Pj<0&T_wWLGpz?k@5vT zN?n+5)>C8d-M=vGPc^>@m&lv?WO#z*ZMW@Tv}JpEpA7D^-_v>FKKTuA?;iMtJoNgK zjm>9_+#BI(^|P`sI{n3}9=l_EPT3iL5`V@oGM81}+be%cLjC5yT{d;#?!DjKknmme zWctVSecxyMtwF731rzdxdw{FqKKRW2vNQ45Ju@Sw&VD0xVcti7PaBpiE$Efq zuew{E)uPLpcbm;W7AMzP?iY@7zt3jl*dA)D?`^?1N!$(gBJ$U^+D<+pIni+c!(EBawH@pV>Rlmubctm}MS6w7C4CyLU&;SEqI_opLP9&&!+1bpRa2 zFU%j~ckpN4-TATlX9MeXoe|j+Y0r~9gtr&ZoL4@UFMPMfnQOOZi-$)8H?~>1@`sGj z$8lbJWa(`5rSZRy)hPGFvIQAI|A6%lOLt|S%-n&xJ6p?V7hd$QyUf!|T_r3G>J5EOh|<;Jo-$bHaEH=fy`g zc(q!S{#v^*iX8u!8?me~eXe7V!W9K!0fnSGH>A~qs z;6C$^_%nTJypcWh@(_H6PfdUvet8 zk4b#M6VPSK^LQ9Kb9s~fI zhv$9B;T1RTeZ2YJpeyDV{+swfr{KStH>Eei)8>D9d$}Xccg0nnuXJC_8vpd?>Dv4^ z_p1(YUhe^#qjJCI{dcF%ymsA(JA?hIhv<#O9rqyq&dW36)z4?2414JHv%I&ce?c#Y z-%%gaRp1Hq^+F%=@BOLzICNskdy~w~$Zza{eg}@ay3fP`PwdeC)i%Kb`R2fp=2^?`bcUexdo6M`yg%VP%)p*xbzgAvJGom-WJ);pG)x zYd`fs#Lnz-fq&^s^e>#(-bH)IKe%OLjxQRY33;f+v%h?E;;UQZOspF zWl6AK`K@omF(n&4b7=y9WTY#_`b~WBZmPZr{Up8IO=XshD>!9);3x3}zRP~aNp&IqOk9Vf z_(hEe2E1MA+kf6!p`T>G;_p{wKcCua__j!X`*)r5M;>4HSD2&1ubWfrGqv8-&!?t^ z_w|zN7IlEW6TiUA`ETkA_gm-A-?x;x{b+12_JeEBop|`#TnY7?_J+N|8hQ77wW&^J^OBcHsiN@GUN1%IzK%pubr)L^$YvCpDIwK z!NRmq4<*+v>VRsyewbal?(%?t#WVGg`9gb6;V3+!`B>+z^UKan4bw7WbGPt=JP%jf z7vx<}{G>VnPBn+(9UJ@b^b75Q!xPxYFFxQk!XEl(=T#U`YIUp*Fz54iy_|^|@9l}j z9dk48LEUW+T=`RD4sI%YI{Y_t;TGxUN3N)5;fP9;(%#=Dw7KxYN3Hfx;6Hu8PfBvNP}gI3O?VrA5On4^~FXw*s_2}^I8L>~nckyT5sZh_; ziJ9M&=iznxC&is)dl1j?_J3q4R{HcSXM&DYz7QYaNqNZqHn`>LffqN=5$YE9pr3Wq z_2mXsUXvO6BJ!sDmFM9o-@UWM^2#4iJQ|r>#ILJ|x^#@D-7{xXm~)rso!58uaqRaL zckpuJj(%2$4;nplRiy=CucbWXeMq`Y`Hel`jo=;nU-R87zbLo%+EwR5|5(28exdJr ze_s5hmxFiIvHCdn_l;?rKkd0Q*%IO;UW32M^XdzA7C(k}%#DvLakSXGJ-3B-cf@t} z3qRn`{^7pX4UXhXxL?5TE!4tS&IuLs}+3$hB_dMa1?Q^uZLmyHd zD-Yqh{5N%gca7jPc|Oh@xvFFRl!QHHcrJdSKco(TC(Ywjyy(y2$9`H8(G%czYE2qm zxx|T`5&e!jR{vPt?Z3e#bSrS`!WnVDafi}4SXNoo*t23T=#va zyRK}yJ?P@~ht%D4j^d8EE?=mJ=qld6yzJq+IhI9ly;`!%*KeoA^x)0$I4#Iyc|c5nN1Xl6WiU(;(_J@!{DZ(d*Wimwh-N)P)H z*%@A$J>c!-dG~-v%=_IXzm^%FBh<0-MT5WhXz>gEi+(jL6@KA|J)sVehuE(?1ow&S z=E?N4+^_v-_(^l2_$c%K>`XlK-@NxFp5dePMQ*H9dO(2-E4t{T;B|EX{41{0IgY>g zlBI_Vt&E?0t&TE2Yg2LT-S3Az(|8#5yuJwD-habCz&qaa zQOBAul!xqHd$G*YAH&@)b{PS~>{*ZiZo>xBh4v4vCd59nTu6%)?WM}4w;4^+f z=h*(;_j?wayEJYx>h|8RkK7get~?K~<0q5<8~qtQjQ5?&tZ2G>aqAOdpKidvMVq{^ zV9%NeuEvAwLwYyLo(6f|93WjW9vrU>pUD^OSAVF<8<%9g`Ocl;T>^WEK1}_wQ_)E~ z<8#eN?Y%ThYTzIE1s$|`|KSx5&)b`JJaS(9Y3&USdFbysYMJm33;9lgkP9vNXBROr|Ms9DBWW5z?TXo)SJoo>%ZAE0;hT((wu}n|N0mDLv-7C z7`h%}AE@$9oz0OtD_Nf^KGZurV&1iz55@4Wya_+ZZ_W$v{4sm?wCxoy zO0cukqlaFZ_C}tBd(fZvT{;_i-aL-)iVyVF-d(2)vKQMvn*+BLyt~hm)bPF&d*H|F zZuOA==Dhk0^aS=Rn-{^`({s6Bx>xlk{iN^e$2zY(Pgjhu79Z5H_U|~axzO*656$uM z$(+%Pu^t0$o%n#WUsh)?U)P-<~c!vLV4|qiS68(95 z56q$P3w5`+4wv}u(BJbtv-FGH3G-z1lirW5lq*Z?M^~gI@b>OkK9=9~85%tB!}7a! zWlMOkBUvAHeT&z&-q~+eh`*Bt?=RQ>rxUUG>$~nj{MBz!&*L@3N$-|q<}LC5Z4>r| zyeYr22YiYC5S}yNT7ycU_kse%}WDoX7?R)QuPwO4%9^SLW@8Eyc-OdX~(N}vH#(dWI zL(}T+yL(N1__2+bJ+v=1@H^szJ{deIU$_VUMhC55NT(3|;?WkPn$(*f`7Zp+e(lRN ze_@XnT^qb(zkN!{Ij6h+d@}H7ctmj$o}@=Km!uA$?~^yZ2l(mDS$B_n*qAM$kK>&n`w1$J?_cSmmZxKQr_0z?@zD8a zqHNQL&8$1+Tr9uYeK5 ziRt*oGkG39QwQK5;2m*CzlAPyR^Il{Wa<2mPC*~iUNv?`w}J<^uUI}dkF)vsgoz`I zpN>bbeB!B-_ic;%kE{IYjC1EB{aAfo_yI1F=lL7Hgul_#j{5xUiywb=Cf46{UOF4+ z#S`fJKY81o@{~!Ir@)Ngyx#EdE(T4kXJUO`Cn#de0`<0L3NqG|=<-e(8&3%bG z^qz3Fx<#FZKNHW?Z}5Y92=6IQsDg*U=a$`|Sja|hxMo)(T8H+EL9*NUbG{UltX&e9K{ z@6$IGe|y!haiq@U2V>{;9w2=b9>zU5uewE^rwh{em*42+%(2$&{6fLv`;?$s*;}_1W&%iJ23sM)lUwf1EH;-ogxOIND^&wx# zZ}NrnvR^u}WP8w;HrKIecFyOzys@BX z+jlZ9=qLYw`xM|Eb(XpX?!(jKf6d*V`0J7br)nPy?{%=>N!gd4I#}ay*jFKM>Tik< z{6Zb;omX|Yy0F(h`({2==VWA`4W0n*OP-ry5AY72Rvk+ZW6r(6Yxk}D;l>pKr?Ovm z2A9~U;JkEVcv|_`9IHLDuQa`E+|JA_3G>qGZhowPp>AOh>{pyLe_;=iI!k<@@3R-h z`z>`}YQ||g}*S4^qK%RFG@R@j~&hj3L{+s!? zBF{{Ed+4ia(T;AzJ{ntsm0;Sl~VB;^hnH#h2iX;0ONZ{X%qC0q^qU!_DwW%m9&|S1uX~W^^IFf8CaZ#O5Fh2d>HxYGeCmI@e@C9j%Zb0@8T=q`F3D`vbmxmFB047e z9rc@j03APnOU|3(nR;HmX>Ji8CBNZ!#2xP<;t}yt^vdugp36J~o)&*5{;G$>Upi9z zx%4;b5$WaBZ@%ju_(k$};gbKhpY?x*tKF}?*y=a-K+h!~+gIGNe&2>2=WdVGS^DVe zH@vdCTOUV%Q-4TYmxsIy<9_*tJ{i9dchnd3zi<@$_21|xlXW2e8@`&}UL8Px2A3qy zspXsXM2+)mFZR5%Rhi8Z9h3I}XC41|+IvSc zBYU3gd*yHVNpXk$nq!UI?~L!gJAVQn)w5fh+LebNie`OzV&pBa{1M)99;Mq$EBaR-(tC+?OQq%J|<~D!0Dvt8*76=$PSMe;K8jyBue>SGt2gHiK3eOw z-*^89E}_@(UHJ_zdAZ53`>Ra+Gs0`g7fp7xx^m&M%&5c5!}^WrxHIgPfTQTwlX+VI z4POoKh`;v3wB2%|>SbSD(AnJn%@wC7EQ{rNyb(K7Z~CsjG=9?iS>|yT>28C1RiVymma0#4BPb<&=Z|&{yKCgX2&yGJk?8C+jWBab~ zCG;@(2f8abN<6E6d54iX|8+8=pOi1)zS6_0Tvcnw(XjWHj^BGp`n>8`@yz|=&-fdA z(C4*J!Tu=oBJ>*W*LT&0?g6fT@X^nXcPsu!)S`c(dR^CNMEWgsj?3O%9p=5g4s_i>WzdG9dMPueRiza{gu@FX6bzj-Gf zUlR7nR@sxKcgrmy{+e&WbGZk0Chq8qxL-QFvLz>7)a&UpQKN5aufOy4C83W)2QBX4 z!SN;Huf8e2pyS6sxLCOk8@qS z-@DG*dMcLZomW3g{|&D}@7e!{Sz}*ZdNk-g@ss+S`u=bqKX%^gnUCK;uW(x6Vf0(f zS&HlGLbycUg!||h~!9*-uKdgrSKj~xzsM%=IXK=&%Y z@i+V8|NniLKz#w1$V2RbU%=IL(E737je;NGb-2%5lKmxkPxArvF!riF@l(F|zC-gv z-HrDY*VUW$dzxFkv0T$T>&;&lH{4jgf4*^hBf5%JKlkrl{((br&wI+2YSns2nD^Je zaKEQFv@85@?raJ6;GHY@OrD3=JN5nO$Ok>n#XlB1w&&hzuY~WiGj%sRgRAjp`n>8` zdk@639}oRD{fF8q33Y(IRCqc34tvO+w&RX3UrUYQGkQdNWqW51UjEXXpNv=*=1t|z z%m*q|pZo7K@th5t3xCx(BW`+I*KgMKEtt?hb`S76osIsc_;9d)hjUNAk|zQ0B;ykI zAP@bwdnLrPo6F~$o^t>0SpQ8uPhX-xFRq)<@?MhoOQ%aGrVfA~#2q-5&K&*?dpokf zwE4)TOCovRf0K{ZLwGrUVV*&~`Ff3OT6}%m-Z-PuioA8V9}Rl0IrmP#w$iF>33?;A z#CPG}@8kPc^**>Knt6KfYiG+Ij^R}E($mM+?a;6E?pS^^*D-2#qr$ywXGY=~en;OQ z|BIg#*N-*aeERuCN28LZiZwcPc2&UZ;+Z;2Ji~k9cks0M5_9=@+DbJay8nFY`M`6j z3!PVW_4Nr?N!E@nZ;3###g4Wl(U-ZPZm@bq4*S;Y64esN|=2-37)91yfdiP8p zr&5pKr#<;_j=A(k;<`QqJuSRr&osS;x_eN)EZ<(=>|EHlZ9jqUs*j!5yeXZSIV$x9 z{jYt{_UY0SxL^2J{YG!E&cf4nE?%&}6@L^;i0j?~u{X)y4!k{_3ir7O_JCKGkJ*|1 zI}g1-u6n-*GeVzCU3lwb#qJn+bJm197LGC}A+GZax)t$({i<8+!{Zm?uX+=&ab!~Y z%@1FGCd}p2E8o6i=zveEpALJQ>2&e-@MLnoTi(R?;m_2CjX&&O>z>&M<27vtj4#(_ zV|XVBzmsea@Q!{!{!*=qRGxb}yjS;Sl=tM$Z?Y!5TSB)@2Q5z0_a)l{T;g3syggo7 zJ#SB>dyp^G-R|Ms_zF{tw@Z)Ace7u6JM`boXPJ+sv$1D{{fg`Ig*uBL>kr|p#Rod3 zhj_>hh@jGT53wvbgpw-9v{`QXZ3-d$vr0YZ4C#HYF9@0)++~C@m zawOcZIW^y<6Ki*9^+(T^+Y)=vhi=6lZ~GMBD7ac3z<%{1XJu3esO<}guZm8$_J-)s`+j7H!U)E09Vr!_;0=|uGe{F%BrW{ z-WK_;{X3=Ryz&0hTaQKRdAee_#D5bf@lkM;c&3j|_o^Ps-Q?IW1WX*{%ld9 z4%_O~$exhj%%SiLIx&4GeKPZ8;<`G^cin?{hTowR6L;`(`rXri$=~4OeQU$sX7`JK zpa+Mm;a_=P{H3qR3E2o?u{$@{PM+*bDJd_hK90yLEr0A1nUCXYLn{Qg6Bk_bE; zAE56qZ_*X3-`p>~5&f?`#NWJQzI95&O$(Z)2c0>5#*gJq_8_jq5AeFaY40I#m#Us) zQ=F~dJ8Ns~JQ?P@^_}n!>Oy(m`n5BkURK0M#I_+9ft+z}^z zm!09c)B)c6!k^)H?7O0mGB?iO==;pi!+qioUHmtf&R!W`d{N@k0^fDJK%T_`+2ZD}i`SLgZT;DW%!guKu^f&Qb_#Hd}d`4eFcZG+ce}E^&b-V@~_22g2)B);a z^@a1|5%nSMo%w(5F;vgHU%Z?=Bu?VN#WQ+)d0ss)PCBpe;*Gp_Ag;4BI{qd++EEx`&)~%hkEQ)A87TBRsfz9xvy=$wT%6zCQQKwC~$zguLmz z=G^T^EY~-4S&N$vNBUX(SRSJ9gHy$I^N@J({?}fY@77(%VtTppkB=D8^UW*?cv75% zC&fv5-u?%DC;5U-m!3=Bq^}m&;V66P`P((Oe^90W>Q7?$m!5#macqt5`zsH)fPeqq z_sWmy3FvGFkM1`)f9pMwxYM=a$T#|T*%deVdrywKyIIS2rw=`LG}hmQ``CkfkT><; z)UJZrE@#(kK`fybnT^bzvg+JH+Afq+TUDt zCRVq&UwQ)kgZT@4eZ2di4p6@(;|FyX9h3OryZ8t1+S^a-eQEXpSBsPU*ne|gc%6R7 z9IO21{R=u9@t59H9`fJta^A@_-}1(gEER8i?o@Q~$cHZ8ym4iCAJTuL6BBp%h5HS2 z`T6TV^2BR@20g7gVf>jn3A~Z`plXU*TtlgY>OJbOrvlk7n}v;X0%mp;h<%*fNRzCZn${Fcmn z!cp|q@}_=^{Uvap`_<3#U3f?Q70=*mI&-=Ta}rYu7I`N}*MH_%^#jB+_N#xv9@H1= z7IW@+M0_9pueeTMjn}Y;h&|{_4ZvyE}Mg{EqK#O>ckZ!7s8U_%Zw-{>t;} zH*-{L-mUl6Hzoer>+??UM~~LIBWpsR!Flly;@R_MZ+~#%k{z*ofY-}i-J?dcb=%_S zm+dTDtN+=c<7a0h%f5JTgSA-`>I*m(uY7*(YqhT)H!;is(kYljkuS`3zs`>y1-M=CCyHt(~O0YA`zn7Yi zoC|B8?RYZeO+0~q3tXb!G(SWa#D3v3`f9k(o-#ZlT;jX%8T_z(%B}Amsy;hnXNBf= z>(r@x_C&|i9j@utGf$#d>XB_Duih7UBYS$)S?qz%Mx3N4PzTsof&cATc45cVloeqv z-#y63<__>0`lfWn?17F6zpn4EpM^(MU#MI33w`&$-FL+vlKcLRUl}=~bp8{e4{86| z@@ehHm)N`|@_q}Ri@pyZB~JP-JJV;N1IgL7%dy)|o`~Qx{bRaq^T+(yJ%~H{n{=f5 z{_+sLQGEIOCch3n8u}OdZ{B@i5Bv?^w`lk0S?YegKkT80qwIC3|J65@H~BGppno{C z?fwZ19>|rz?{xY;QRKF%wGz>SOQjJooE&b%yoc8vdI+FaG+j zez!cYZn4Ku-_#!O)a%RUX*Yds*ass1;vd8v_#wH@QeVh#>I-}n9ly8(KR7Sm$o^_` zb^2NITY;Yzq`W>nJ<54`xo%Z2%#*M)yoNrRebCA8Nx+lr;qfuA7MamFO9Btxe)1zN zKIncR#v{7lQ%9GV`()S$v3r0YlI=l!pcA|Cm67p%r?-M7!mO@q=xKNf!`&#PP1S@2}R>@SbsI`&`$|EdeUdv?ubJ4$?- zb6LI*UEfm*BbNP4gmjcN4dT(`If`4~<$=WdQlUs|1oSGKQMzTj`}!F~eg^dBtD+ znfR-JOfRP&U~j1Wrf$J2<4gE)vi){yU!eD{_G@DC;q5}z>Ky*1U}FE5=^YyEO^qHO z@Y=u=#}9?~e4H0vhkt!nTsMEg-{?zTE;am~Y#WaRovwZsJpukfpA0^OOW<|7D|*`L zJyt!pb?NSiAB*emLBGX4zz_NXaEW;wys~#`cU@BJ@H?NU$Kvm~UU}c!`p&)xPF278 zZ|Yd~izkqe^>GT8uD@^4>b>E+{KEY@FWys~WgcCfg-3K=d-lwWB=gn((|7S2a0woU zuJ~~4hgKf!^lO+)f*+Fm0q`WezQ1Ox+txq4F~muE6P|pc<2%_Zw_g$X9l90yP5*+O zi4PqQ{ko#y%=FMV-F9!*TWb|QAFd{5pWt)3l^|Cw5 z-LWI$Z;QWb+pz0~>I?c`xWqhz ze2hmlKLoG)u6K&zD03b7b^9FkrR(omF}Y09%ozV5KH#hM_3Sf;f5l1rqwEv2=ZXFd zu7=OtuX@OT^Uk(Dum4tWa`8IXzkM|9FOi4jV|4jt;agMIwLKNl!_YC&d(J-b z+tBFb#<0&uo>#ZPsp@%l2B%hU{e8oH1Fuay`Q&Ad#&5`x@NTs_7A|2A^t8@9D{GUd z>%W{9>0jWv?2%O$n$LRv*bT2%N!uLZjo>~!jJidhhj;V~>9)mndc@IFKA!&mE!#rh zRNW#E)h#!A*yi?UBm9GWjOX(HkbV{&6MY~2i?4XMm%9>|;U?gQT5e`9C-IHl6^70bu02>YNv8}wA=IfYII zTq19(Tj-78N&FdJnSS!CzaG7&-y2!OJLd2&JENCVzr{nx&YN}fu897DU$9^C0j~`I zsxQ`b82)jsY3ssVzB$W(ji}ptVEt3ExZ@tqrp&LN=dElBdJX)J`;|B0DE(OPU(BuX zUCNZV55@Ma_%1yGf0N(T^YT1>Du1(goc-!w@C*0A-|9Eoe`&_(oT1OaFT4}ve!T-- zWL(4Ff9bU+vVW2e#CP#>@}{|Qd#Tg`_;v5Bn4584JOTd9{t~(%b>S0>UV8qsu78Gp zH(e0Eggz?yy=FKH&-L*uQR|GV8Ik=k?94r=v%DW5p2=_U4&3Lw{u_O@_&^^eK6szk zehufv@0bHj#!=?5^`+Sv{E+|U?d6U%-xcfQB>RPX@ZaE6aYx;vp0{^K{2h7z!2FJN zPQ-K-;tpJ`j`zs{iD&Yg`-Oj<*MGwki0kIb=H1b|fBD`S zK@UTxz|P?HcqVV^JDIEVo&-Iu`T5yX>R0Tva${`%g8l*S zbKYbgk=`?TpN;V|fA zEU56zy0EtnE|D+j37nVx;*IdBbWHThN1vN?amW3ih52JVjCcm0;m;l#e5O*aCr?E; zwd(xLSNCO2(8b$>{Pe0U-~6ldvB(^i`VB6z7lpt1Z~F7dx z7mp`U7sA!*7Q9jNyWn_4^F!YAfp^@4`a-`&T(^f)zYy-DpY$$biL5=FuKO!BqW9Ey z;&18}@l2mrKg)j;&&+4-sFt(B58s{+{T6<_wAYgITf1cm`c(Fy-lU5cC*@=Nw(W^D zcOdS-JNg%$f8BKIWSblTr>Zv({QZ76{NQ~(agrbVZ*VGJBe_rJ-3PiscoI)Q2SUG2 zuK_>6CGG*wMGroBR_m!ND;|&W)%3IzK7FMAhZD}m@B{m`cLt6!-!1O&WBN1wLb~F( z=iHSim!<{#)!%epcE;bFmmWr**B>%RHGac|N0lq@341W$UvuN)nZ7jMQ@_P|)$`)R z4}Y{AKJ)fNAwK8_h&$dBhD-1}RbKzHK#{A~#;N0GFE0OhmV`WSpQHI$dDD6AFHsM< zU%WE@ESdMz@75>d$NWt_FYc&Y+=IDA_>4WMH}##wb>GEv;dlHu{?=wu+t2RLc{~y) z^;jenU{E!U%$ zObqW<>vxOm>P>dGZ%&yvCT%+y>zjVGetGp8MGnNXHx}>z&a%wV-!!Mj&hTgS${BAT zXf*LyYM8^O2d4`Xe~Ulx*Tz>19*vuie5y$OHvhcuq%Y##D0!Y=;D7NCbSv@&JKNsi zjtO_qI1t{GU}x%0e3ZJ7o}j{)<`PFYIIb`@K3mt$b{5 zkv+iG{7w9YC(R|P1JrNuFMALl;A*-|?;7Fl;Yt4u--l22U4EhN#@o-nJ6EGxk7Y~P zdmujOx9A^#c1^GSZKiGr?^de=@Mo)^tnlT$<~t*HhF9iq>TdZ>{l*^btB^O%U#REd z>i?~|8Tmq;1y|E;i5*oOQwan zFa0d_FSITod zI`Xf(?|ZN9`mjep+))R>CHD8RGkC{&>6N_`f3$e_pK4~^7kNLxyea&^&TiZO{WbL; zJ`vk9l2Pv2Mavqe2K@uQCme-8qua*AxLobRakHegDjgU&qJd zPKzHJEIPe@US#hKKGk{EZ}2aFljre$?m<0N{DpdB7ne+p>Du&LoL9e4J)ewI<#}_> z>R5TmKHb)z{hs^E9w!2R5GRxKrv3$cSpCtWsQ26~3HU)hgzr1mX7EE@kEH#_-Wl(n zu?PN!hcSQ5&fs71Z0VHvOhubQ}nhY`=rqq8${2QG0B>~~Skleay!ID3LFULT!ax%;*bH-3N1(V#E! z-|))nEP38}-2=bCpUKB`@$wt|%a8d5Uc-O$o;%%&I>0BPh{_kh>HE4yF+O??bM zr~~k+>bJfF=Waj#*^XEqau4i*AB&Uhcl^lBOD z;C;Qs%a>GWm@`X4eId`AkA)}kJNRn&*Lm5Qe4+0TSL0#KU+5Ql?*Q)8=Y{*+uQ+LM zhrCIzL05rSrUUVwJDfW5wo@a2sCq6o=T5(K|2s`S$};(EEMm-qmowaG!Vv@94kT-)E1D@6y?rFD%yP>CLtC zrAO*m{wA)AXY>zrSL#^xkbQXGr8Tz*@4yf00Cfvo0Za;WOnXI?vNWgvWf!_1>Lkn)su^=PhC~-&sf-YWPT0cNM z^L~rGY0jNJ;3wIcJWr44yxt{9#*^@a@7fz`PR)1OnfQxGv`2t`(mP`6dAz;z`mQ|! z`g-C6e#bi%$vgpm$NnArY~*?Mn>tophkxZ`eLeYN$+maa-0{Ul341N^spjAKh4{Xv+i!ikWX8U0KbQK=-o+L-y|?zI z99zRY4&3Lwcu)7B4#-~c_|mm2b0_2>`NDe=JqoOBc~j@J5xuhc0Q?R;m$;*kUij(j zPJZ-oN&=4J$6J1Aw0ULA%rFnxaqN~af0%VTb`N+Bah-k=J~MYfKWQ$>{xkd6*%@60 zzkqj~7mmW8iId`k`D6POoL7FsgR5KAo4!kDqt1e>yYBvET&bHj$I;r##iEvnV?2!g zMtC{=8NQ@>!%8C;^gSHY+2p>ua>FRUEm*oz z?gV`lUJl-|-yW~wy=MF6=;h2U(#zpB$zWj^c=h2YEbdJ%m_Z?$IF&}`0%c=MH1e3`nBlzbC=u*(dT`9Ny)^+n231plIEvHw2xo{V?#E_&c{>JbpmEwD^-ejlPdsAB))ooQl_g zOZWx+pnt5Ng)dPL-Fw~X(<9EE4D&elnTzZ21KcP6%JX<|ev#$d^^2DjS|4TUUh9h2 zQZs@slOL-C9h}1*o-22t6RQQD-_C)$P_;vqH{Dn*Ki1ua5 z^K=SyHs+eeN%2=ab6$8|-O^*#rA1mqSrYPv`Az)fYdMy5EOx`Gu>Zk3SK`_Km42PS zso&%a{UQ23{aACM_(^(jenF=oU+C+>>!I#`c3y=6rB=uG@7P0c-!@(jZ!bP1*DdNd zI0{d|&W@~m>O{Xse+luychy<+QSya&#xJ~YiZ?QcLIC=WTWx`jRLC|qRn(mON5 z`wZ$KbFA(GkNCjC!R^|&IusoqUG=qN=g&p#SDvT4qJLPvX~5AzBT^FHM;FiV;O5Da z`=;`n{;~7=Z|0Klp77*Bf#&ysxK@l`r6G?;^_ckBq8ud)3-!qsNPlp0ajG?gYN1d8sSQlp1wB@_vB1 zFMf}8 zKJ(zp7~a9#^9%7zUz)$+?bQKzaQCbKW^P7(A^yr2=0()8-P_gfQuDy+z{}|$>+|lu ztjoD3kFSpCVemWp$L`mEQ*VCqz}{OwYjsgVeOzMe59`g;6L9RR1Q zyZtx)LiW4=xl7VE9?F_f7vj&%cc0pN^U9gsGs2!o`B**VyYjKR`|3Uu2RyNJXGH(& zJs*2q?Bh?iU;D23o4sn@H}&7_5x~QkAL>}YZ^Mprw+Fng9>Q1SJ;inL?8)11E!OeO z`iPz3HT3=O?3uo|-GU=A-jh9~FMh4Zq8E-wz6+m;>+m{UEpK{%(>>s$^da4^Jj5P+ zx7U56HdmN-EP@~O{pnV`Lnpn#vV?e*faRs(Qn1#BwZU`Nooosy}$+oN28e4t*za2fv;iALNU&oo;@0(RJq| z^`?1${Mnb2Zs>I1ZK)A{l0D#Q@rdq$zu`UMD0QqoaBsYRFtbJb{bB!u^NPQv$F-{6 z{Z06czqwz$hWz&K7j64a>aZhL7n&P4ze$I8@gt*J|I{rd0iVHr zcsYC&JF{4|LJX62PZ}W%Lys=%@ zvyr)c_57rAuPv&vGkZeZaSvl2s@0(8zLW$$N}aW)c6{TT_nnD9-!%5n&6np$nCsBj zQ(v$%`?Gt2Rip+P*Z^`;I^}N2nx)9%2e8>Bnnhe?!J-G7z z;;CEGqvUsN@FmU*|N5@J6aI`nxCimfzIby~_IAL1;-tQ``hwm_9l+n{HQu_acxp6t zTfnJwHqOf)=&r;a{DVFYo)(@Q+NI5mZMAlWz1ZqPc-?pPTf~Pynm^rR(RXQ)I?H#z zADUKo-`#6s?_`RTaJ4+|e#Lcor`dJ))OvNo;YfYq9>(wO`^S&-jztyk>XoZ`%M1Oz zzxUI!2YpC5s`m7!R<)_0D?#^)N3^d3K1*Dk+32pz&&K);aFluoK9lG1%JKz!;K%yB z;sbw!tMx^abwQuqxNrIWwGKr3{^m{X(^VJZ!SQ``Oz@d_=D+b{d9!}GIWIi7E^nAO zouBsDzEwSsM07Ux)0%(d7x+o_d~zKizrof1oB1rfvN;L$1)dA9!Qa@g_@IC6ymU6+ zk5wPzqu2vp1Fx+AX0DE|Lf^?egLfS9T=E5-Bi<8^5_j~S?4P8&(r2)jN`1^817Yv2y~6Y`{+qs&xI+&Er^09U z&|f}gT*J3Z?u*;D>z`{`=j;jn&Ht~pJMs3p-2MRW;Bd-NsC3nhP*Kz+LgMocO{kE{ zlqlk=xEYe6%v7fZL1(g#t7C?t_7^AL{d9OsyR*1BF#Yklu({eElx?mzIX z&w4)l+55fU`@Q$(e>Zh*QKjU|M0R`+0b2yL;f3_ z3P;he%LBveEFM>LTpdUM~F?aG(F?d*R9Cerex}-=PzO&){`_$GLQazPIDplZy_{Jsz3A zj^{dd>V-2G-gPj#rq;xfs|p58pCA3PuUQrDp zzv8idmdXBs{nA~rGxi{#SMToK@xvB99xa@35A-FQvg&RB_2ul~*Ne5jtzPB}Ik7yz z92fD}yAUt%zuED-k6p66a6&!=_sLu6;(f3Bg?sSd%smj_^k1kKCg&HgEK3{J;G07+ zJs15X{OiB5-&;C8(4^+<0^uI`b-JtYz3ETiv}I4m+5F<)d8|3e_#Nl^UijC46W{nV z{aNa7>_J~BUJg$6F2o`4!rU47SH7vyyXip8 z8BxcP2Y46aym$epzSR5V$BnPs9rKg!f!`@yfAg$cx8#Ij#<7zkx z?+I7qf8o@$pU$5(r`q0-cgrv6!SzGp`{c(V&uTY$*@y40i=At(u=yJFQE;C+Bs`g{ zGY@vw`1bzIa%BWv5MAc-*@@i;GP1(_68UDW`R#XCtaCgv--v&J&+K91&*Vb~%HB8c z{m$=2_h&DDtW^EIp}xt^=)vJh`KCIKyweW+lH(@bpgfjtrCMr*#l0Us6)kAoFZ-s)wuJgI zo)$kT@0M@!zj$SJfA1J?zxIw#8ht(MkNKYZP}KG0E&PKx^ta8&F5KKBUqW98ye?j( zUNULd)b~$^eJJbOo`Y1e?IxjoJ%Xt^(2=F_2W%H})_~lvp0Oc*> zynB$h9Lbe`#`i-u$LfXj1m=;NKWYDk|3<%qm$N@B*?#56>ht#YnLnv7sb<}(w|w$i z{seypKd@hUcgV9U&hK}i;ntWg9{$A>(8J(i;4}BjKd5)pt*F15ul#iRm0$I)ni1W- zVn*E~wYP=55dVNDfFIy9ejT2Kqwo*A<3>@hdCKT95~E)B24lk&pHn;xF_ zz*P$(c_AGsypBK9ha%3iU+-9aOD)=DU$5qQ66QhkXZnxltQyg)Q_T~Bx2IFU%c<+Z zeQ=a`tR8@uqpN6IV*S##lQ)F@L-xnYkL7R9g@55xb#(D!^7j{yT=MLc=;&AXem(yA ztucN_-i?=|*GT3G%vToQ_{r;5uE|^Jsyqqv<=C0Lke~Fu-W&TBkMUgU^YBiC1#O4T zX?Z3#hu(Xm-{B{d`4V=fE<%^N`kVB@*EifBn-7k!<|pA2_KT-wziYdEIq>__xe{=c zIPV>^2X%jQsk{rg5B~Mv#38%}zeAUKLC;zf-+X3EnEwEuu?O#1os9oI((juyS3qYlZ=@b-8O{if^}p7g!qygci_#jo23 z$WOvi_&$9o>Q3qmt+w~sv%A&w$b4G<*MEb5`3F3K_Xh8n-v|HNYmeW7*ZE)gf!@eF zh9|{0d6qi*{IO?-m8?ELnlSn2oI0~l#PrqtB%X`k@!s@9it})k{aH)iDp$PfwK)N& z`d)mAcPzf)ch;3Woww+OwV{7VK2Oi}TIEd__bRY1XZ>IMX7EPr7mkv@dB=DSa|D{r{^0GmhU|#-9VGMB`~&<; z|La_FUcH6hb7|jKFK_qMt}s_vd=uxz3vq}&%s>D2?(fgZir86loY&6)Pl|8e8#_z( zlXwDkQ#wcc_3SZs59%U#0{Jmq?R(AN88UIeo?Q74M(!66^W37$Qh&|L33?cKhb{{+OGOw^yH6-_+M^|2+N|f94(I3E&;^!hB`AEB9c30RI5*B;SL& zo_wBvc&}E`rTdGlip?7ohs>9wueNW-xp-x`ggvP9+WWvB%>62L`2Kn|D&$Jg8__>F zS6x~>rsIcqyf^kBzS(bTjtl#R*WE9^gnw|qbV2kQ=2gIbrR)B1WO0+FVSb-@Z0@4` z!nyhY_pclK<&@S(!yZ!o(l6he@@eUZPDl1A=#Nzo;LpT)b6o77=hw|^a}SNXEqkQP zs+`z75OcBd8tUEr89oZ1x~kHN36DRK6Y87h{L>p%uAb6$#o0OP?l;*#z-Qtc-8TFn zZo+5!X1rtet4_xM4(j{S@QWH0Oo$i!1Ahkhxd(d*>U`d6@eea{!W?Arf}bo{5SS1pXYzsgFIHhDLd1TPN!>blKXWozK{QPuKz{{Eg!-^ zm`hdv^udFpGc$wU$bVyJ_NJQCF!<36iZ@<#G&Ubbyr56T|5pFk(Ltl`I~L1_zOFT+ zRMpja&gzxEY*BScY65;xk4@&!@C0y)bJerxg5U@DV6MA4MEY^mWASo$BlBMAjl^Sl z3%wCs;(OTxzhkeQdw{E(fAn-&7w;*7vEK`fuz^UHXllNB6fVo0gzo$7|S2FeQKXuj3k?47y_XAV1cB ztgpkoO#MUmt*@6}aKi2g?xV{ze^T6}?@RV);*fmkOovYG_iV_Uz*k>VdGF*mmu!oY zzt?+9{$B6GTnRdU_`$i!Iz0Yi_@L~0iKA%=yk|21AaB7N!BPB~elmH$!0uP~o;dVq z(Eo~WcxCpBKa;=tUOcV(P=$w2uDP>pR;=ElzeT)|7pljK7x07ng?cPMiH|aO+&fm+ z!%yPvy^G4XU-dxEK1CDiWcKTc7w}H->(s#d2e= z+dro8XR$m$JjQeR-sJZ-GNkF)t;;5E+ehtzkg?+f3T-sbelvOTuMaO&w? zr+0U|AS=up{PG!bGAq70s2_&^O0xi z3#HqZ7xL@;Bzz{$(-Yuf@LYIAe$qYQtK~y@TIaF{{(&yOYv=YKZtr|7e1RQwsd!+!OPsFVFVwP%Cz@0<=gQgvQ?wx8(HqR`zBWe48V z+%NvY_p%4|P4x?LQ(uyMIM%30zozd^5A)@cc|>_Pz7HRz4+TE+-^`aYw}bu{uPkq& z*YMxmue?QmEY9mYXfo*Wx#?Tth`t*B#cQ|+{d(+-{nD54>+}TlT>56-1;x0CiLNjQ_>2 zzdm$WMxlIf#omSd*gR6YV)I_<#Pk=+1Mt=21^$d*H|HOIfOqf&3%4I^_sex}#P%ZU zzfiw`qnsu*tS;m^!p|7+_;>vOKmHIF~zf9(aoWX;RhR(w4z=$Q0TwHaJy=HB6- z#Pv=#uRs5yqY<2H?l^v39*aj*FT``v8}aLSF1lBBY5o^~hTjo~ykmMJ`5PT6-8NjJ z-|2$Pi+-rp^l&`5_wwuB8^2@Dw0Df3#Cy_L(;GE9w@;W~R}Zj< zlz(s!_&)Vmx+{DtJ)*ukaT87zhs1d}NRfe1jh&`|8}UxLIdt>qlDO zkP`a4^)q-E8$K>tqs5aMVZW()D|y=Py8fBpvqJst&u}Wf8m?9^)DOx3ny+!7)115) z-nBK%rSjh3GdK#Kl)u??MXxMg;QR2Y@R_=Zz5~1`e1=Cf$A!Kc53Zl=f179Cs#A-; zkBrQfU=QYS%CpjY7J4bI?%BTdYcq@fIBadaa?9{(3kw{MYYhJ2kttUm2y+)xU;L%O z!tB(9{DM6s^UB_ZxifH-a}%4JEq-sr>e$?I{#SjI-w`+Qv~a4t+q=*oKnK#b%etx; zJ&=2zdaU@Cy{^+mP0q`cFn`kC2Xf5Ut7XWkp$6Q2tAxnF&u;*fgm@cauGRXE$1tbW0MovZH8 ze(@#ZCVVE&oByDn%v?Zmp5O5x_*AUeGB zKFx02d(GNd|0X-*Cw;GYfv2@M3SOsMp+7Sxoo>b4FYki=!qxm)-VRGg9ayz9_AbL zy}jdJxPE%EgnNMd+=Dn|p00WTeUy2F_%m@H{>9s?qdS*g!=8Bj1N{ShroL%@D%|IL z^}p>eGym|3(-(yJmW)f}o6hZU@Sgj+_1qha7yM4UXwgGA56Oz;L+Va+c+Tb5y<$=F>OVT>J+Q?tbAj{x_M2!Jp|H z$7|pp-npf1?KKab3Uk%mulF{6Zu-gnnK_YqA-`_!jC=?_((_{M(S!~Yle$v3@Y_}AQVeurM}tx>hUd~Mt>@!!j>>R0l* z?7**!oA_${4qgNQAkKToCF{&P(qvGfgn2>3vaikfq*7McSBFond#=yNJP`gtUa0>~ zy#-DcH~(y}+}-m=b^oRL_89-WXKM864`tSc{FvYI-{{Qop88Pubv(E{R(`=A#7+4; z-qX2sc>3J&;CL?n7ysa0zz_ChI+x$EN9@YW+bp?q+ng|e2ToT@8_~$Tx$GQ9@K2@J3og*GboTqCO-^|z0e=Kj|AN)7-UgcSMaD5W$-S7jR z7LUm9z^VL$I4@qTo>BY$MA`MRIo@!pdN)2bt;d?yzckG=4-bPc`TLm3-7nlzFo8Fs zv*8~`Rrze~4U+rAoraVBtDG#s@Mc%DnFS#B-=g3d8Gw+z4 z(K*`ls^1?E;~tXzueoaM%=gki$S=&NvR+I83@YU-6`XtQ#(r2k}Ts=1V9mA>K z8=e;bkj%^J@1{4RTX7HS7y1C{*X1qzulz!t*Y~PJ(#!F`?7=%$AM(9dRC%>#`cH4i za3B4dJ*3I+SiMC)kFRDA{G>U(&XpIc&mTN7bI9P`--iA-_)PvLZi*N9RJa75G%rXW zApem3-tYwQFT8{Il+VLw@Q(SOZ`Zi~`aBai#du{pCUz#j5QqL-b0z#Y`8>a~b;Pn~ zZ#wf$tnS3Gt7q80vWy@`>pSm;jS;A-dDD&m$Vf2mT5f3eC z`c{{{sR?x)^Q+Bkqt{sW>wu!mw-pGwVz`7ocyHpo{!Ms?e}E^|v-n^A{^IfSCNGuk za;!jt-+`;mdxa>D-Z4M*iw}l1n*8bUXuzx0 zHtpM;8R`M{-MU}jE6#fta5cXJ@4(gSdfo-Sy|~GKy<_psx#~{*Iv%Fa%;PIQo^f_w z#koD(_!9S^KJOmqNc+$Vp-N3jQc+Fq>E^r}fMvcmVO z2PErvlHW1^>wD!{@yRi(do;?GuU=Q*vbrE@%JQiQ#e$|J>P51%-Ko`$H@V|Iv z@l8CIztLA`=Rb7o)En1D>fQ29{G_?U^d)p+>!z$tDW07ftLusL=3uCIn>%CP3clJs zh@0kJ;n(qJ{Eqsjcf9ZUvCaBr9Sb^A{F(p8e)&mplTKG%+8h^s!b1ign$w{8=D_!v zcO`B<)@VZWTQ>hCA@+Z5t;~7_l!Up+9T*!`DC9uXgT4`mP^qb-E@qGFOfLx?gz< zJCkqXjUKC3d&hU{W{3To{KL9)^MV>~+?0Ob=%Zo&gE|8~iko0DUv!#pjp4QRGmK?;`!1bh>yA=bDd*=Q@#cct_Xi zDG7L|{OFpO-LW+z)FJtGc*pz)@tBT@&RiT4-`s=u*0;qAqwlGb5e?i`^s|%sb0wS$ z@31rPSbprk$&Y<6TrIya=k!F{v8nyXod|x?J*cR>%LcAntw0{ z8Q!5I9l!MOMWZ*Y33?cNHO3TKKE2wrhvY5lZ?l(FOsQNnE!6$-o&_s+`+m>m z=Ox5rbH|5%+-B$Dp&J9=$M3LTcoJ{K&I*22-HDo#?^U{pA7jP5u`?!z0Q!{Wo@I9uwRrf5U4y7tgi5^%D)Q?V2}X?iXE! z{9?kjdE0zccu&;#{rVYSsG?3X_i-_#j=FZ-Q2>${GF7vzNf zy!bwRs(A`@LHs%%k^Rc&;nYfB=NVag_m5%THh&hSd@?ib(j5U;n>#Mf^Jn54KM6GyHNowd*gH!D#*s!T+$;?jKL1#lh ziH~Yf@n54ic0Lt7kn@kGBlhG?(3f}@>aps_CrcD9{rQAl@r3bT4C;C5v4{@DyMWL5 z9lB!s4%Bfgq>_ zZi&`^}-EC1g9i0(>0q%M8s>hET?{MXWm&Jhp8 h@AzJN7;y-$<{$7}`XMV-T{Zl&yt`t$c<_31 literal 0 HcmV?d00001 diff --git a/data/Wine Quality/download.sh b/data/Wine Quality/download.sh new file mode 100755 index 0000000..f9dc733 --- /dev/null +++ b/data/Wine Quality/download.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# Download Wine Quality dataset from UCI ML Repository +# Dataset ID: 186 + +echo "Downloading Wine Quality dataset from UCI ML Repository..." +echo "Dataset will be prepared using Python script (preprocess.py)" +echo "" +echo "The dataset will be automatically downloaded via ucimlrepo package." +echo "Please run: python preprocess.py" diff --git a/data/Wine Quality/feature_metadata.pkl b/data/Wine Quality/feature_metadata.pkl new file mode 100644 index 0000000000000000000000000000000000000000..bd8d7ad825e0299ca0c003d69473805d203d5c42 GIT binary patch literal 2500 zcmcJQUuYCZ9LKZCop+aL5^{;;jJ7tFqJ22D#TYQgKcTjIC16cyACz^nJC_-{**#`w z&O1y@@(?|BT3Q_y6{S|H4e5WO>VrgW0#Vzh6iY!Wttsmy^YZ3Ug%Q^-}4eL44G z_jdT+{C=PLet&nJp8e+ndCt`?7V=q9oPfFtG^}Hq6zL@oO~#<$0h6^tf;C_W?t`Xg z>5vFsWxr=YK+JHAz!g+bvb(mwqlCZRtQXy z;(q7Frn>y>(X3cfK8sB?3ISyV+A20x5m9)FsoJ15X2=yALb=8(WJ|GRShENWw=#ob zKnO?*KEsyfK)M|FIuiNR9SXUUk88!Q)K2l1SQshbN}XtIY+V0PUd}C&M@4gkAA~YT zB6dg}_of?F1uEq;RRT53v?*a4mHVS0_oV^TW%r3lUbkpannh-FC8Bf-RKfG3lM15H z)8%$G_(x9cq{-jst`f33_rw8_)#jt>^oD4`Yq~v9V=Jx+URd;a3D}=3Z>-Fd;D*FO6FUpxA(uqksyK2YHb{VtLj#mrDlkm7hOq)33QQRG+!kJ9=!tE5qv{d>I|Ol+BRO zc(I%UtTSM!Hfxc&&N0T-=-jqH1}dEJO=rdEiP~4^d{UFqO=HEub+u=X`hpqV6DtZB z4`|4+2-cyUYXbbkr!9%EKX*0a)Y_#dLsd<#MjU@(@cF%C$Fq&_FdmE&2&D-|lo3_O zRzkw$nq{Kqi>7Tm>RsKaywO=T{FUwM#+{L$UhL>wlI@0<@p5WWCsEH5rtNIJ>eSdK zSD1>znVP-Jy)1hfU{=>@UgzvfjxBr7{W3Iuv(7tKIFV~0b z(?7{*n$b``zKs@th_Zv_V?00hMy_*I%@(WeeF4{_-`oFgS7G9?>rDL9V=AwS{_NKM za;oJx#1NplpXzyPU(-u@H(d{Z%i5vDQ11a(f(Ofwm3>?EeKtWrh6fm$g)Af24%j#L iZ20lN1+ETs{&uBg^x}Qj{=ZXC_e_-E&F;UDvG60dKY7Lg literal 0 HcmV?d00001 diff --git a/data/Wine Quality/preprocess.py b/data/Wine Quality/preprocess.py new file mode 100755 index 0000000..85f8e9c --- /dev/null +++ b/data/Wine Quality/preprocess.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +""" +Wine Quality dataset preprocessing script. + +This script: +1. Downloads Wine Quality dataset from UCI ML Repository (ID: 186) +2. Preprocesses features (StandardScaler for numerical, OneHotEncoder for categorical) +3. Creates binary labels (quality >= 7 -> 1, else -> 0) +4. Splits into train/test sets (80/20, stratified) +5. Saves processed data and metadata +""" +import os +import pickle +import random +import argparse +from pathlib import Path + +import numpy as np +import pandas as pd +import torch +from ucimlrepo import fetch_ucirepo +from sklearn.preprocessing import StandardScaler, OneHotEncoder +from sklearn.model_selection import train_test_split + + +def set_seeds(seed: int = 42): + """Set random seeds for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def download_dataset(dataset_id: int = 186): + """ + Download Wine Quality dataset from UCI ML Repository. + + Args: + dataset_id: UCI dataset ID (186 for Wine Quality) + + Returns: + Dataset object from ucimlrepo + """ + print(f"Downloading dataset (ID: {dataset_id}) from UCI ML Repository...") + dataset = fetch_ucirepo(id=dataset_id) + print(f"Dataset '{dataset.metadata['name']}' downloaded successfully!") + print(f" Samples: {len(dataset.data.features)}") + print(f" Features: {len(dataset.data.features.columns)}") + return dataset + + +def preprocess_features(dataset): + """ + Preprocess features using StandardScaler and OneHotEncoder. + + Args: + dataset: UCI dataset object + + Returns: + feature_metadata: Dictionary with encoder and metadata for each feature + input_array: Preprocessed feature array + """ + print("\nPreprocessing features...") + + feature_metadata = {} + input_data = [] + start_idx = 0 + + for col in dataset.data.features.columns: + feature_metadata[col] = {} + + if dataset.data.features[col].dtype == "object": + # Categorical feature + feature_metadata[col]['type'] = "categorical" + onehot = OneHotEncoder(handle_unknown='ignore') + feature_val = dataset.data.features[col].fillna("missing") + preprocessed = onehot.fit_transform(feature_val.values.reshape(-1, 1)).toarray() + + # Calculate categorical distribution + cat_dist = feature_val.value_counts(dropna=False) / len(dataset.data.features) + cat_dist = cat_dist.loc[onehot.categories_[0]].values + + feature_metadata[col]['encoder'] = onehot + feature_metadata[col]['cat_dist'] = cat_dist + feature_metadata[col]['index'] = np.arange(start_idx, start_idx + preprocessed.shape[1]) + start_idx += preprocessed.shape[1] + + else: + # Numerical feature + feature_metadata[col]['type'] = "numerical" + scaler = StandardScaler() + preprocessed = scaler.fit_transform( + dataset.data.features[col].values.reshape(-1, 1) + ) + + feature_metadata[col]['encoder'] = scaler + feature_metadata[col]['index'] = start_idx + start_idx += 1 + + input_data.append(preprocessed) + + input_array = np.concatenate(input_data, axis=1) + + print(f" Processed features: {input_array.shape[1]}") + print(f" Feature metadata created for {len(feature_metadata)} features") + + return feature_metadata, input_array + + +def create_labels(dataset): + """ + Create binary labels from quality scores. + + Quality scores 7, 8, 9 -> 1 (high quality) + Other scores -> 0 (normal quality) + + Args: + dataset: UCI dataset object + + Returns: + y: Binary labels array + """ + print("\nCreating binary labels...") + + # Check target distribution + print(f" Quality score distribution:") + print(dataset.data.targets.value_counts().sort_index()) + + # Create binary labels + y = dataset.data.targets.isin([7, 8, 9]).values.astype(int)[:, 0] + + print(f" Binary label distribution:") + unique, counts = np.unique(y, return_counts=True) + for label, count in zip(unique, counts): + print(f" Class {label}: {count} ({count/len(y)*100:.1f}%)") + + return y + + +def split_data(X, y, test_size=0.2, random_state=42): + """ + Split data into train and test sets. + + Args: + X: Feature array + y: Label array + test_size: Test set size (default: 0.2) + random_state: Random seed (default: 42) + + Returns: + X_train, X_test, y_train, y_test + """ + print(f"\nSplitting data (test_size={test_size}, random_state={random_state})...") + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_size, random_state=random_state, stratify=y + ) + + print(f" Train set: {len(X_train)} samples") + print(f" Test set: {len(X_test)} samples") + + return X_train, X_test, y_train, y_test + + +def save_data(dataset, X_train, X_test, y_train, y_test, feature_metadata, output_dir): + """ + Save processed data and metadata. + + Args: + dataset: UCI dataset object + X_train, X_test, y_train, y_test: Train/test data + feature_metadata: Feature metadata dictionary + output_dir: Output directory path + """ + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + print(f"\nSaving processed data to {output_path}...") + + # Save train/test splits + np.save(output_path / "X_train.npy", X_train) + np.save(output_path / "X_test.npy", X_test) + np.save(output_path / "y_train.npy", y_train) + np.save(output_path / "y_test.npy", y_test) + print(" ✓ Saved train/test splits (.npy)") + + # Save feature metadata + with open(output_path / "feature_metadata.pkl", "wb") as f: + pickle.dump(feature_metadata, f) + print(" ✓ Saved feature metadata (.pkl)") + + # Save raw data for reference + raw_data = dataset.data.features.copy() + raw_data.to_csv(output_path / "raw_data.csv", index=False) + print(" ✓ Saved raw data (.csv)") + + print("\nData preprocessing completed successfully!") + print(f"\nSummary:") + print(f" Total samples: {len(X_train) + len(X_test)}") + print(f" Training samples: {len(X_train)}") + print(f" Test samples: {len(X_test)}") + print(f" Features: {X_train.shape[1]}") + print(f" Output directory: {output_path.absolute()}") + + +def main(): + parser = argparse.ArgumentParser( + description="Download and preprocess Wine Quality dataset" + ) + parser.add_argument( + "--dataset-id", + type=int, + default=186, + help="UCI dataset ID (default: 186 for Wine Quality)" + ) + parser.add_argument( + "--output-dir", + type=str, + default=None, + help="Output directory (default: same directory as this script)" + ) + parser.add_argument( + "--test-size", + type=float, + default=0.2, + help="Test set size (default: 0.2)" + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="Random seed (default: 42)" + ) + + args = parser.parse_args() + + # Set output directory to script directory if not specified + if args.output_dir is None: + args.output_dir = Path(__file__).parent.absolute() + + # Set random seeds + set_seeds(args.seed) + + # Download dataset + dataset = download_dataset(args.dataset_id) + + # Preprocess features + feature_metadata, input_array = preprocess_features(dataset) + + # Create labels + y = create_labels(dataset) + + # Split data + X_train, X_test, y_train, y_test = split_data( + input_array, y, test_size=args.test_size, random_state=args.seed + ) + + # Save data + save_data( + dataset, X_train, X_test, y_train, y_test, + feature_metadata, args.output_dir + ) + + +if __name__ == "__main__": + main() diff --git a/data/Wine Quality/raw_data.csv b/data/Wine Quality/raw_data.csv new file mode 100644 index 0000000..ca2785b --- /dev/null +++ b/data/Wine Quality/raw_data.csv @@ -0,0 +1,6498 @@ +fixed_acidity,volatile_acidity,citric_acid,residual_sugar,chlorides,free_sulfur_dioxide,total_sulfur_dioxide,density,pH,sulphates,alcohol +7.4,0.7,0.0,1.9,0.076,11.0,34.0,0.9978,3.51,0.56,9.4 +7.8,0.88,0.0,2.6,0.098,25.0,67.0,0.9968,3.2,0.68,9.8 +7.8,0.76,0.04,2.3,0.092,15.0,54.0,0.997,3.26,0.65,9.8 +11.2,0.28,0.56,1.9,0.075,17.0,60.0,0.998,3.16,0.58,9.8 +7.4,0.7,0.0,1.9,0.076,11.0,34.0,0.9978,3.51,0.56,9.4 +7.4,0.66,0.0,1.8,0.075,13.0,40.0,0.9978,3.51,0.56,9.4 +7.9,0.6,0.06,1.6,0.069,15.0,59.0,0.9964,3.3,0.46,9.4 +7.3,0.65,0.0,1.2,0.065,15.0,21.0,0.9946,3.39,0.47,10.0 +7.8,0.58,0.02,2.0,0.073,9.0,18.0,0.9968,3.36,0.57,9.5 +7.5,0.5,0.36,6.1,0.071,17.0,102.0,0.9978,3.35,0.8,10.5 +6.7,0.58,0.08,1.8,0.097,15.0,65.0,0.9959,3.28,0.54,9.2 +7.5,0.5,0.36,6.1,0.071,17.0,102.0,0.9978,3.35,0.8,10.5 +5.6,0.615,0.0,1.6,0.089,16.0,59.0,0.9943,3.58,0.52,9.9 +7.8,0.61,0.29,1.6,0.114,9.0,29.0,0.9974,3.26,1.56,9.1 +8.9,0.62,0.18,3.8,0.176,52.0,145.0,0.9986,3.16,0.88,9.2 +8.9,0.62,0.19,3.9,0.17,51.0,148.0,0.9986,3.17,0.93,9.2 +8.5,0.28,0.56,1.8,0.092,35.0,103.0,0.9969,3.3,0.75,10.5 +8.1,0.56,0.28,1.7,0.368,16.0,56.0,0.9968,3.11,1.28,9.3 +7.4,0.59,0.08,4.4,0.086,6.0,29.0,0.9974,3.38,0.5,9.0 +7.9,0.32,0.51,1.8,0.341,17.0,56.0,0.9969,3.04,1.08,9.2 +8.9,0.22,0.48,1.8,0.077,29.0,60.0,0.9968,3.39,0.53,9.4 +7.6,0.39,0.31,2.3,0.082,23.0,71.0,0.9982,3.52,0.65,9.7 +7.9,0.43,0.21,1.6,0.106,10.0,37.0,0.9966,3.17,0.91,9.5 +8.5,0.49,0.11,2.3,0.084,9.0,67.0,0.9968,3.17,0.53,9.4 +6.9,0.4,0.14,2.4,0.085,21.0,40.0,0.9968,3.43,0.63,9.7 +6.3,0.39,0.16,1.4,0.08,11.0,23.0,0.9955,3.34,0.56,9.3 +7.6,0.41,0.24,1.8,0.08,4.0,11.0,0.9962,3.28,0.59,9.5 +7.9,0.43,0.21,1.6,0.106,10.0,37.0,0.9966,3.17,0.91,9.5 +7.1,0.71,0.0,1.9,0.08,14.0,35.0,0.9972,3.47,0.55,9.4 +7.8,0.645,0.0,2.0,0.082,8.0,16.0,0.9964,3.38,0.59,9.8 +6.7,0.675,0.07,2.4,0.089,17.0,82.0,0.9958,3.35,0.54,10.1 +6.9,0.685,0.0,2.5,0.105,22.0,37.0,0.9966,3.46,0.57,10.6 +8.3,0.655,0.12,2.3,0.083,15.0,113.0,0.9966,3.17,0.66,9.8 +6.9,0.605,0.12,10.7,0.073,40.0,83.0,0.9993,3.45,0.52,9.4 +5.2,0.32,0.25,1.8,0.103,13.0,50.0,0.9957,3.38,0.55,9.2 +7.8,0.645,0.0,5.5,0.086,5.0,18.0,0.9986,3.4,0.55,9.6 +7.8,0.6,0.14,2.4,0.086,3.0,15.0,0.9975,3.42,0.6,10.8 +8.1,0.38,0.28,2.1,0.066,13.0,30.0,0.9968,3.23,0.73,9.7 +5.7,1.13,0.09,1.5,0.172,7.0,19.0,0.994,3.5,0.48,9.8 +7.3,0.45,0.36,5.9,0.074,12.0,87.0,0.9978,3.33,0.83,10.5 +7.3,0.45,0.36,5.9,0.074,12.0,87.0,0.9978,3.33,0.83,10.5 +8.8,0.61,0.3,2.8,0.088,17.0,46.0,0.9976,3.26,0.51,9.3 +7.5,0.49,0.2,2.6,0.332,8.0,14.0,0.9968,3.21,0.9,10.5 +8.1,0.66,0.22,2.2,0.069,9.0,23.0,0.9968,3.3,1.2,10.3 +6.8,0.67,0.02,1.8,0.05,5.0,11.0,0.9962,3.48,0.52,9.5 +4.6,0.52,0.15,2.1,0.054,8.0,65.0,0.9934,3.9,0.56,13.1 +7.7,0.935,0.43,2.2,0.114,22.0,114.0,0.997,3.25,0.73,9.2 +8.7,0.29,0.52,1.6,0.113,12.0,37.0,0.9969,3.25,0.58,9.5 +6.4,0.4,0.23,1.6,0.066,5.0,12.0,0.9958,3.34,0.56,9.2 +5.6,0.31,0.37,1.4,0.074,12.0,96.0,0.9954,3.32,0.58,9.2 +8.8,0.66,0.26,1.7,0.074,4.0,23.0,0.9971,3.15,0.74,9.2 +6.6,0.52,0.04,2.2,0.069,8.0,15.0,0.9956,3.4,0.63,9.4 +6.6,0.5,0.04,2.1,0.068,6.0,14.0,0.9955,3.39,0.64,9.4 +8.6,0.38,0.36,3.0,0.081,30.0,119.0,0.997,3.2,0.56,9.4 +7.6,0.51,0.15,2.8,0.11,33.0,73.0,0.9955,3.17,0.63,10.2 +7.7,0.62,0.04,3.8,0.084,25.0,45.0,0.9978,3.34,0.53,9.5 +10.2,0.42,0.57,3.4,0.07,4.0,10.0,0.9971,3.04,0.63,9.6 +7.5,0.63,0.12,5.1,0.111,50.0,110.0,0.9983,3.26,0.77,9.4 +7.8,0.59,0.18,2.3,0.076,17.0,54.0,0.9975,3.43,0.59,10.0 +7.3,0.39,0.31,2.4,0.074,9.0,46.0,0.9962,3.41,0.54,9.4 +8.8,0.4,0.4,2.2,0.079,19.0,52.0,0.998,3.44,0.64,9.2 +7.7,0.69,0.49,1.8,0.115,20.0,112.0,0.9968,3.21,0.71,9.3 +7.5,0.52,0.16,1.9,0.085,12.0,35.0,0.9968,3.38,0.62,9.5 +7.0,0.735,0.05,2.0,0.081,13.0,54.0,0.9966,3.39,0.57,9.8 +7.2,0.725,0.05,4.65,0.086,4.0,11.0,0.9962,3.41,0.39,10.9 +7.2,0.725,0.05,4.65,0.086,4.0,11.0,0.9962,3.41,0.39,10.9 +7.5,0.52,0.11,1.5,0.079,11.0,39.0,0.9968,3.42,0.58,9.6 +6.6,0.705,0.07,1.6,0.076,6.0,15.0,0.9962,3.44,0.58,10.7 +9.3,0.32,0.57,2.0,0.074,27.0,65.0,0.9969,3.28,0.79,10.7 +8.0,0.705,0.05,1.9,0.074,8.0,19.0,0.9962,3.34,0.95,10.5 +7.7,0.63,0.08,1.9,0.076,15.0,27.0,0.9967,3.32,0.54,9.5 +7.7,0.67,0.23,2.1,0.088,17.0,96.0,0.9962,3.32,0.48,9.5 +7.7,0.69,0.22,1.9,0.084,18.0,94.0,0.9961,3.31,0.48,9.5 +8.3,0.675,0.26,2.1,0.084,11.0,43.0,0.9976,3.31,0.53,9.2 +9.7,0.32,0.54,2.5,0.094,28.0,83.0,0.9984,3.28,0.82,9.6 +8.8,0.41,0.64,2.2,0.093,9.0,42.0,0.9986,3.54,0.66,10.5 +8.8,0.41,0.64,2.2,0.093,9.0,42.0,0.9986,3.54,0.66,10.5 +6.8,0.785,0.0,2.4,0.104,14.0,30.0,0.9966,3.52,0.55,10.7 +6.7,0.75,0.12,2.0,0.086,12.0,80.0,0.9958,3.38,0.52,10.1 +8.3,0.625,0.2,1.5,0.08,27.0,119.0,0.9972,3.16,1.12,9.1 +6.2,0.45,0.2,1.6,0.069,3.0,15.0,0.9958,3.41,0.56,9.2 +7.8,0.43,0.7,1.9,0.464,22.0,67.0,0.9974,3.13,1.28,9.4 +7.4,0.5,0.47,2.0,0.086,21.0,73.0,0.997,3.36,0.57,9.1 +7.3,0.67,0.26,1.8,0.401,16.0,51.0,0.9969,3.16,1.14,9.4 +6.3,0.3,0.48,1.8,0.069,18.0,61.0,0.9959,3.44,0.78,10.3 +6.9,0.55,0.15,2.2,0.076,19.0,40.0,0.9961,3.41,0.59,10.1 +8.6,0.49,0.28,1.9,0.11,20.0,136.0,0.9972,2.93,1.95,9.9 +7.7,0.49,0.26,1.9,0.062,9.0,31.0,0.9966,3.39,0.64,9.6 +9.3,0.39,0.44,2.1,0.107,34.0,125.0,0.9978,3.14,1.22,9.5 +7.0,0.62,0.08,1.8,0.076,8.0,24.0,0.9978,3.48,0.53,9.0 +7.9,0.52,0.26,1.9,0.079,42.0,140.0,0.9964,3.23,0.54,9.5 +8.6,0.49,0.28,1.9,0.11,20.0,136.0,0.9972,2.93,1.95,9.9 +8.6,0.49,0.29,2.0,0.11,19.0,133.0,0.9972,2.93,1.98,9.8 +7.7,0.49,0.26,1.9,0.062,9.0,31.0,0.9966,3.39,0.64,9.6 +5.0,1.02,0.04,1.4,0.045,41.0,85.0,0.9938,3.75,0.48,10.5 +4.7,0.6,0.17,2.3,0.058,17.0,106.0,0.9932,3.85,0.6,12.9 +6.8,0.775,0.0,3.0,0.102,8.0,23.0,0.9965,3.45,0.56,10.7 +7.0,0.5,0.25,2.0,0.07,3.0,22.0,0.9963,3.25,0.63,9.2 +7.6,0.9,0.06,2.5,0.079,5.0,10.0,0.9967,3.39,0.56,9.8 +8.1,0.545,0.18,1.9,0.08,13.0,35.0,0.9972,3.3,0.59,9.0 +8.3,0.61,0.3,2.1,0.084,11.0,50.0,0.9972,3.4,0.61,10.2 +7.8,0.5,0.3,1.9,0.075,8.0,22.0,0.9959,3.31,0.56,10.4 +8.1,0.545,0.18,1.9,0.08,13.0,35.0,0.9972,3.3,0.59,9.0 +8.1,0.575,0.22,2.1,0.077,12.0,65.0,0.9967,3.29,0.51,9.2 +7.2,0.49,0.24,2.2,0.07,5.0,36.0,0.996,3.33,0.48,9.4 +8.1,0.575,0.22,2.1,0.077,12.0,65.0,0.9967,3.29,0.51,9.2 +7.8,0.41,0.68,1.7,0.467,18.0,69.0,0.9973,3.08,1.31,9.3 +6.2,0.63,0.31,1.7,0.088,15.0,64.0,0.9969,3.46,0.79,9.3 +8.0,0.33,0.53,2.5,0.091,18.0,80.0,0.9976,3.37,0.8,9.6 +8.1,0.785,0.52,2.0,0.122,37.0,153.0,0.9969,3.21,0.69,9.3 +7.8,0.56,0.19,1.8,0.104,12.0,47.0,0.9964,3.19,0.93,9.5 +8.4,0.62,0.09,2.2,0.084,11.0,108.0,0.9964,3.15,0.66,9.8 +8.4,0.6,0.1,2.2,0.085,14.0,111.0,0.9964,3.15,0.66,9.8 +10.1,0.31,0.44,2.3,0.08,22.0,46.0,0.9988,3.32,0.67,9.7 +7.8,0.56,0.19,1.8,0.104,12.0,47.0,0.9964,3.19,0.93,9.5 +9.4,0.4,0.31,2.2,0.09,13.0,62.0,0.9966,3.07,0.63,10.5 +8.3,0.54,0.28,1.9,0.077,11.0,40.0,0.9978,3.39,0.61,10.0 +7.8,0.56,0.12,2.0,0.082,7.0,28.0,0.997,3.37,0.5,9.4 +8.8,0.55,0.04,2.2,0.119,14.0,56.0,0.9962,3.21,0.6,10.9 +7.0,0.69,0.08,1.8,0.097,22.0,89.0,0.9959,3.34,0.54,9.2 +7.3,1.07,0.09,1.7,0.178,10.0,89.0,0.9962,3.3,0.57,9.0 +8.8,0.55,0.04,2.2,0.119,14.0,56.0,0.9962,3.21,0.6,10.9 +7.3,0.695,0.0,2.5,0.075,3.0,13.0,0.998,3.49,0.52,9.2 +8.0,0.71,0.0,2.6,0.08,11.0,34.0,0.9976,3.44,0.53,9.5 +7.8,0.5,0.17,1.6,0.082,21.0,102.0,0.996,3.39,0.48,9.5 +9.0,0.62,0.04,1.9,0.146,27.0,90.0,0.9984,3.16,0.7,9.4 +8.2,1.33,0.0,1.7,0.081,3.0,12.0,0.9964,3.53,0.49,10.9 +8.1,1.33,0.0,1.8,0.082,3.0,12.0,0.9964,3.54,0.48,10.9 +8.0,0.59,0.16,1.8,0.065,3.0,16.0,0.9962,3.42,0.92,10.5 +6.1,0.38,0.15,1.8,0.072,6.0,19.0,0.9955,3.42,0.57,9.4 +8.0,0.745,0.56,2.0,0.118,30.0,134.0,0.9968,3.24,0.66,9.4 +5.6,0.5,0.09,2.3,0.049,17.0,99.0,0.9937,3.63,0.63,13.0 +5.6,0.5,0.09,2.3,0.049,17.0,99.0,0.9937,3.63,0.63,13.0 +6.6,0.5,0.01,1.5,0.06,17.0,26.0,0.9952,3.4,0.58,9.8 +7.9,1.04,0.05,2.2,0.084,13.0,29.0,0.9959,3.22,0.55,9.9 +8.4,0.745,0.11,1.9,0.09,16.0,63.0,0.9965,3.19,0.82,9.6 +8.3,0.715,0.15,1.8,0.089,10.0,52.0,0.9968,3.23,0.77,9.5 +7.2,0.415,0.36,2.0,0.081,13.0,45.0,0.9972,3.48,0.64,9.2 +7.8,0.56,0.19,2.1,0.081,15.0,105.0,0.9962,3.33,0.54,9.5 +7.8,0.56,0.19,2.0,0.081,17.0,108.0,0.9962,3.32,0.54,9.5 +8.4,0.745,0.11,1.9,0.09,16.0,63.0,0.9965,3.19,0.82,9.6 +8.3,0.715,0.15,1.8,0.089,10.0,52.0,0.9968,3.23,0.77,9.5 +5.2,0.34,0.0,1.8,0.05,27.0,63.0,0.9916,3.68,0.79,14.0 +6.3,0.39,0.08,1.7,0.066,3.0,20.0,0.9954,3.34,0.58,9.4 +5.2,0.34,0.0,1.8,0.05,27.0,63.0,0.9916,3.68,0.79,14.0 +8.1,0.67,0.55,1.8,0.117,32.0,141.0,0.9968,3.17,0.62,9.4 +5.8,0.68,0.02,1.8,0.087,21.0,94.0,0.9944,3.54,0.52,10.0 +7.6,0.49,0.26,1.6,0.236,10.0,88.0,0.9968,3.11,0.8,9.3 +6.9,0.49,0.1,2.3,0.074,12.0,30.0,0.9959,3.42,0.58,10.2 +8.2,0.4,0.44,2.8,0.089,11.0,43.0,0.9975,3.53,0.61,10.5 +7.3,0.33,0.47,2.1,0.077,5.0,11.0,0.9958,3.33,0.53,10.3 +9.2,0.52,1.0,3.4,0.61,32.0,69.0,0.9996,2.74,2.0,9.4 +7.5,0.6,0.03,1.8,0.095,25.0,99.0,0.995,3.35,0.54,10.1 +7.5,0.6,0.03,1.8,0.095,25.0,99.0,0.995,3.35,0.54,10.1 +7.1,0.43,0.42,5.5,0.07,29.0,129.0,0.9973,3.42,0.72,10.5 +7.1,0.43,0.42,5.5,0.071,28.0,128.0,0.9973,3.42,0.71,10.5 +7.1,0.43,0.42,5.5,0.07,29.0,129.0,0.9973,3.42,0.72,10.5 +7.1,0.43,0.42,5.5,0.071,28.0,128.0,0.9973,3.42,0.71,10.5 +7.1,0.68,0.0,2.2,0.073,12.0,22.0,0.9969,3.48,0.5,9.3 +6.8,0.6,0.18,1.9,0.079,18.0,86.0,0.9968,3.59,0.57,9.3 +7.6,0.95,0.03,2.0,0.09,7.0,20.0,0.9959,3.2,0.56,9.6 +7.6,0.68,0.02,1.3,0.072,9.0,20.0,0.9965,3.17,1.08,9.2 +7.8,0.53,0.04,1.7,0.076,17.0,31.0,0.9964,3.33,0.56,10.0 +7.4,0.6,0.26,7.3,0.07,36.0,121.0,0.9982,3.37,0.49,9.4 +7.3,0.59,0.26,7.2,0.07,35.0,121.0,0.9981,3.37,0.49,9.4 +7.8,0.63,0.48,1.7,0.1,14.0,96.0,0.9961,3.19,0.62,9.5 +6.8,0.64,0.1,2.1,0.085,18.0,101.0,0.9956,3.34,0.52,10.2 +7.3,0.55,0.03,1.6,0.072,17.0,42.0,0.9956,3.37,0.48,9.0 +6.8,0.63,0.07,2.1,0.089,11.0,44.0,0.9953,3.47,0.55,10.4 +7.5,0.705,0.24,1.8,0.36,15.0,63.0,0.9964,3.0,1.59,9.5 +7.9,0.885,0.03,1.8,0.058,4.0,8.0,0.9972,3.36,0.33,9.1 +8.0,0.42,0.17,2.0,0.073,6.0,18.0,0.9972,3.29,0.61,9.2 +8.0,0.42,0.17,2.0,0.073,6.0,18.0,0.9972,3.29,0.61,9.2 +7.4,0.62,0.05,1.9,0.068,24.0,42.0,0.9961,3.42,0.57,11.5 +7.3,0.38,0.21,2.0,0.08,7.0,35.0,0.9961,3.33,0.47,9.5 +6.9,0.5,0.04,1.5,0.085,19.0,49.0,0.9958,3.35,0.78,9.5 +7.3,0.38,0.21,2.0,0.08,7.0,35.0,0.9961,3.33,0.47,9.5 +7.5,0.52,0.42,2.3,0.087,8.0,38.0,0.9972,3.58,0.61,10.5 +7.0,0.805,0.0,2.5,0.068,7.0,20.0,0.9969,3.48,0.56,9.6 +8.8,0.61,0.14,2.4,0.067,10.0,42.0,0.9969,3.19,0.59,9.5 +8.8,0.61,0.14,2.4,0.067,10.0,42.0,0.9969,3.19,0.59,9.5 +8.9,0.61,0.49,2.0,0.27,23.0,110.0,0.9972,3.12,1.02,9.3 +7.2,0.73,0.02,2.5,0.076,16.0,42.0,0.9972,3.44,0.52,9.3 +6.8,0.61,0.2,1.8,0.077,11.0,65.0,0.9971,3.54,0.58,9.3 +6.7,0.62,0.21,1.9,0.079,8.0,62.0,0.997,3.52,0.58,9.3 +8.9,0.31,0.57,2.0,0.111,26.0,85.0,0.9971,3.26,0.53,9.7 +7.4,0.39,0.48,2.0,0.082,14.0,67.0,0.9972,3.34,0.55,9.2 +7.7,0.705,0.1,2.6,0.084,9.0,26.0,0.9976,3.39,0.49,9.7 +7.9,0.5,0.33,2.0,0.084,15.0,143.0,0.9968,3.2,0.55,9.5 +7.9,0.49,0.32,1.9,0.082,17.0,144.0,0.9968,3.2,0.55,9.5 +8.2,0.5,0.35,2.9,0.077,21.0,127.0,0.9976,3.23,0.62,9.4 +6.4,0.37,0.25,1.9,0.074,21.0,49.0,0.9974,3.57,0.62,9.8 +6.8,0.63,0.12,3.8,0.099,16.0,126.0,0.9969,3.28,0.61,9.5 +7.6,0.55,0.21,2.2,0.071,7.0,28.0,0.9964,3.28,0.55,9.7 +7.6,0.55,0.21,2.2,0.071,7.0,28.0,0.9964,3.28,0.55,9.7 +7.8,0.59,0.33,2.0,0.074,24.0,120.0,0.9968,3.25,0.54,9.4 +7.3,0.58,0.3,2.4,0.074,15.0,55.0,0.9968,3.46,0.59,10.2 +11.5,0.3,0.6,2.0,0.067,12.0,27.0,0.9981,3.11,0.97,10.1 +5.4,0.835,0.08,1.2,0.046,13.0,93.0,0.9924,3.57,0.85,13.0 +6.9,1.09,0.06,2.1,0.061,12.0,31.0,0.9948,3.51,0.43,11.4 +9.6,0.32,0.47,1.4,0.056,9.0,24.0,0.99695,3.22,0.82,10.3 +8.8,0.37,0.48,2.1,0.097,39.0,145.0,0.9975,3.04,1.03,9.3 +6.8,0.5,0.11,1.5,0.075,16.0,49.0,0.99545,3.36,0.79,9.5 +7.0,0.42,0.35,1.6,0.088,16.0,39.0,0.9961,3.34,0.55,9.2 +7.0,0.43,0.36,1.6,0.089,14.0,37.0,0.99615,3.34,0.56,9.2 +12.8,0.3,0.74,2.6,0.095,9.0,28.0,0.9994,3.2,0.77,10.8 +12.8,0.3,0.74,2.6,0.095,9.0,28.0,0.9994,3.2,0.77,10.8 +7.8,0.57,0.31,1.8,0.069,26.0,120.0,0.99625,3.29,0.53,9.3 +7.8,0.44,0.28,2.7,0.1,18.0,95.0,0.9966,3.22,0.67,9.4 +11.0,0.3,0.58,2.1,0.054,7.0,19.0,0.998,3.31,0.88,10.5 +9.7,0.53,0.6,2.0,0.039,5.0,19.0,0.99585,3.3,0.86,12.4 +8.0,0.725,0.24,2.8,0.083,10.0,62.0,0.99685,3.35,0.56,10.0 +11.6,0.44,0.64,2.1,0.059,5.0,15.0,0.998,3.21,0.67,10.2 +8.2,0.57,0.26,2.2,0.06,28.0,65.0,0.9959,3.3,0.43,10.1 +7.8,0.735,0.08,2.4,0.092,10.0,41.0,0.9974,3.24,0.71,9.8 +7.0,0.49,0.49,5.6,0.06,26.0,121.0,0.9974,3.34,0.76,10.5 +8.7,0.625,0.16,2.0,0.101,13.0,49.0,0.9962,3.14,0.57,11.0 +8.1,0.725,0.22,2.2,0.072,11.0,41.0,0.9967,3.36,0.55,9.1 +7.5,0.49,0.19,1.9,0.076,10.0,44.0,0.9957,3.39,0.54,9.7 +7.8,0.53,0.33,2.4,0.08,24.0,144.0,0.99655,3.3,0.6,9.5 +7.8,0.34,0.37,2.0,0.082,24.0,58.0,0.9964,3.34,0.59,9.4 +7.4,0.53,0.26,2.0,0.101,16.0,72.0,0.9957,3.15,0.57,9.4 +6.8,0.61,0.04,1.5,0.057,5.0,10.0,0.99525,3.42,0.6,9.5 +8.6,0.645,0.25,2.0,0.083,8.0,28.0,0.99815,3.28,0.6,10.0 +8.4,0.635,0.36,2.0,0.089,15.0,55.0,0.99745,3.31,0.57,10.4 +7.7,0.43,0.25,2.6,0.073,29.0,63.0,0.99615,3.37,0.58,10.5 +8.9,0.59,0.5,2.0,0.337,27.0,81.0,0.9964,3.04,1.61,9.5 +9.0,0.82,0.14,2.6,0.089,9.0,23.0,0.9984,3.39,0.63,9.8 +7.7,0.43,0.25,2.6,0.073,29.0,63.0,0.99615,3.37,0.58,10.5 +6.9,0.52,0.25,2.6,0.081,10.0,37.0,0.99685,3.46,0.5,11.0 +5.2,0.48,0.04,1.6,0.054,19.0,106.0,0.9927,3.54,0.62,12.2 +8.0,0.38,0.06,1.8,0.078,12.0,49.0,0.99625,3.37,0.52,9.9 +8.5,0.37,0.2,2.8,0.09,18.0,58.0,0.998,3.34,0.7,9.6 +6.9,0.52,0.25,2.6,0.081,10.0,37.0,0.99685,3.46,0.5,11.0 +8.2,1.0,0.09,2.3,0.065,7.0,37.0,0.99685,3.32,0.55,9.0 +7.2,0.63,0.0,1.9,0.097,14.0,38.0,0.99675,3.37,0.58,9.0 +7.2,0.63,0.0,1.9,0.097,14.0,38.0,0.99675,3.37,0.58,9.0 +7.2,0.645,0.0,1.9,0.097,15.0,39.0,0.99675,3.37,0.58,9.2 +7.2,0.63,0.0,1.9,0.097,14.0,38.0,0.99675,3.37,0.58,9.0 +8.2,1.0,0.09,2.3,0.065,7.0,37.0,0.99685,3.32,0.55,9.0 +8.9,0.635,0.37,1.7,0.263,5.0,62.0,0.9971,3.0,1.09,9.3 +12.0,0.38,0.56,2.1,0.093,6.0,24.0,0.99925,3.14,0.71,10.9 +7.7,0.58,0.1,1.8,0.102,28.0,109.0,0.99565,3.08,0.49,9.8 +15.0,0.21,0.44,2.2,0.075,10.0,24.0,1.00005,3.07,0.84,9.2 +15.0,0.21,0.44,2.2,0.075,10.0,24.0,1.00005,3.07,0.84,9.2 +7.3,0.66,0.0,2.0,0.084,6.0,23.0,0.9983,3.61,0.96,9.9 +7.1,0.68,0.07,1.9,0.075,16.0,51.0,0.99685,3.38,0.52,9.5 +8.2,0.6,0.17,2.3,0.072,11.0,73.0,0.9963,3.2,0.45,9.3 +7.7,0.53,0.06,1.7,0.074,9.0,39.0,0.99615,3.35,0.48,9.8 +7.3,0.66,0.0,2.0,0.084,6.0,23.0,0.9983,3.61,0.96,9.9 +10.8,0.32,0.44,1.6,0.063,16.0,37.0,0.9985,3.22,0.78,10.0 +7.1,0.6,0.0,1.8,0.074,16.0,34.0,0.9972,3.47,0.7,9.9 +11.1,0.35,0.48,3.1,0.09,5.0,21.0,0.9986,3.17,0.53,10.5 +7.7,0.775,0.42,1.9,0.092,8.0,86.0,0.9959,3.23,0.59,9.5 +7.1,0.6,0.0,1.8,0.074,16.0,34.0,0.9972,3.47,0.7,9.9 +8.0,0.57,0.23,3.2,0.073,17.0,119.0,0.99675,3.26,0.57,9.3 +9.4,0.34,0.37,2.2,0.075,5.0,13.0,0.998,3.22,0.62,9.2 +6.6,0.695,0.0,2.1,0.075,12.0,56.0,0.9968,3.49,0.67,9.2 +7.7,0.41,0.76,1.8,0.611,8.0,45.0,0.9968,3.06,1.26,9.4 +10.0,0.31,0.47,2.6,0.085,14.0,33.0,0.99965,3.36,0.8,10.5 +7.9,0.33,0.23,1.7,0.077,18.0,45.0,0.99625,3.29,0.65,9.3 +7.0,0.975,0.04,2.0,0.087,12.0,67.0,0.99565,3.35,0.6,9.4 +8.0,0.52,0.03,1.7,0.07,10.0,35.0,0.99575,3.34,0.57,10.0 +7.9,0.37,0.23,1.8,0.077,23.0,49.0,0.9963,3.28,0.67,9.3 +12.5,0.56,0.49,2.4,0.064,5.0,27.0,0.9999,3.08,0.87,10.9 +11.8,0.26,0.52,1.8,0.071,6.0,10.0,0.9968,3.2,0.72,10.2 +8.1,0.87,0.0,3.3,0.096,26.0,61.0,1.00025,3.6,0.72,9.8 +7.9,0.35,0.46,3.6,0.078,15.0,37.0,0.9973,3.35,0.86,12.8 +6.9,0.54,0.04,3.0,0.077,7.0,27.0,0.9987,3.69,0.91,9.4 +11.5,0.18,0.51,4.0,0.104,4.0,23.0,0.9996,3.28,0.97,10.1 +7.9,0.545,0.06,4.0,0.087,27.0,61.0,0.9965,3.36,0.67,10.7 +11.5,0.18,0.51,4.0,0.104,4.0,23.0,0.9996,3.28,0.97,10.1 +10.9,0.37,0.58,4.0,0.071,17.0,65.0,0.99935,3.22,0.78,10.1 +8.4,0.715,0.2,2.4,0.076,10.0,38.0,0.99735,3.31,0.64,9.4 +7.5,0.65,0.18,7.0,0.088,27.0,94.0,0.99915,3.38,0.77,9.4 +7.9,0.545,0.06,4.0,0.087,27.0,61.0,0.9965,3.36,0.67,10.7 +6.9,0.54,0.04,3.0,0.077,7.0,27.0,0.9987,3.69,0.91,9.4 +11.5,0.18,0.51,4.0,0.104,4.0,23.0,0.9996,3.28,0.97,10.1 +10.3,0.32,0.45,6.4,0.073,5.0,13.0,0.9976,3.23,0.82,12.6 +8.9,0.4,0.32,5.6,0.087,10.0,47.0,0.9991,3.38,0.77,10.5 +11.4,0.26,0.44,3.6,0.071,6.0,19.0,0.9986,3.12,0.82,9.3 +7.7,0.27,0.68,3.5,0.358,5.0,10.0,0.9972,3.25,1.08,9.9 +7.6,0.52,0.12,3.0,0.067,12.0,53.0,0.9971,3.36,0.57,9.1 +8.9,0.4,0.32,5.6,0.087,10.0,47.0,0.9991,3.38,0.77,10.5 +9.9,0.59,0.07,3.4,0.102,32.0,71.0,1.00015,3.31,0.71,9.8 +9.9,0.59,0.07,3.4,0.102,32.0,71.0,1.00015,3.31,0.71,9.8 +12.0,0.45,0.55,2.0,0.073,25.0,49.0,0.9997,3.1,0.76,10.3 +7.5,0.4,0.12,3.0,0.092,29.0,53.0,0.9967,3.37,0.7,10.3 +8.7,0.52,0.09,2.5,0.091,20.0,49.0,0.9976,3.34,0.86,10.6 +11.6,0.42,0.53,3.3,0.105,33.0,98.0,1.001,3.2,0.95,9.2 +8.7,0.52,0.09,2.5,0.091,20.0,49.0,0.9976,3.34,0.86,10.6 +11.0,0.2,0.48,2.0,0.343,6.0,18.0,0.9979,3.3,0.71,10.5 +10.4,0.55,0.23,2.7,0.091,18.0,48.0,0.9994,3.22,0.64,10.3 +6.9,0.36,0.25,2.4,0.098,5.0,16.0,0.9964,3.41,0.6,10.1 +13.3,0.34,0.52,3.2,0.094,17.0,53.0,1.0014,3.05,0.81,9.5 +10.8,0.5,0.46,2.5,0.073,5.0,27.0,1.0001,3.05,0.64,9.5 +10.6,0.83,0.37,2.6,0.086,26.0,70.0,0.9981,3.16,0.52,9.9 +7.1,0.63,0.06,2.0,0.083,8.0,29.0,0.99855,3.67,0.73,9.6 +7.2,0.65,0.02,2.3,0.094,5.0,31.0,0.9993,3.67,0.8,9.7 +6.9,0.67,0.06,2.1,0.08,8.0,33.0,0.99845,3.68,0.71,9.6 +7.5,0.53,0.06,2.6,0.086,20.0,44.0,0.9965,3.38,0.59,10.7 +11.1,0.18,0.48,1.5,0.068,7.0,15.0,0.9973,3.22,0.64,10.1 +8.3,0.705,0.12,2.6,0.092,12.0,28.0,0.9994,3.51,0.72,10.0 +7.4,0.67,0.12,1.6,0.186,5.0,21.0,0.996,3.39,0.54,9.5 +8.4,0.65,0.6,2.1,0.112,12.0,90.0,0.9973,3.2,0.52,9.2 +10.3,0.53,0.48,2.5,0.063,6.0,25.0,0.9998,3.12,0.59,9.3 +7.6,0.62,0.32,2.2,0.082,7.0,54.0,0.9966,3.36,0.52,9.4 +10.3,0.41,0.42,2.4,0.213,6.0,14.0,0.9994,3.19,0.62,9.5 +10.3,0.43,0.44,2.4,0.214,5.0,12.0,0.9994,3.19,0.63,9.5 +7.4,0.29,0.38,1.7,0.062,9.0,30.0,0.9968,3.41,0.53,9.5 +10.3,0.53,0.48,2.5,0.063,6.0,25.0,0.9998,3.12,0.59,9.3 +7.9,0.53,0.24,2.0,0.072,15.0,105.0,0.996,3.27,0.54,9.4 +9.0,0.46,0.31,2.8,0.093,19.0,98.0,0.99815,3.32,0.63,9.5 +8.6,0.47,0.3,3.0,0.076,30.0,135.0,0.9976,3.3,0.53,9.4 +7.4,0.36,0.29,2.6,0.087,26.0,72.0,0.99645,3.39,0.68,11.0 +7.1,0.35,0.29,2.5,0.096,20.0,53.0,0.9962,3.42,0.65,11.0 +9.6,0.56,0.23,3.4,0.102,37.0,92.0,0.9996,3.3,0.65,10.1 +9.6,0.77,0.12,2.9,0.082,30.0,74.0,0.99865,3.3,0.64,10.4 +9.8,0.66,0.39,3.2,0.083,21.0,59.0,0.9989,3.37,0.71,11.5 +9.6,0.77,0.12,2.9,0.082,30.0,74.0,0.99865,3.3,0.64,10.4 +9.8,0.66,0.39,3.2,0.083,21.0,59.0,0.9989,3.37,0.71,11.5 +9.3,0.61,0.26,3.4,0.09,25.0,87.0,0.99975,3.24,0.62,9.7 +7.8,0.62,0.05,2.3,0.079,6.0,18.0,0.99735,3.29,0.63,9.3 +10.3,0.59,0.42,2.8,0.09,35.0,73.0,0.999,3.28,0.7,9.5 +10.0,0.49,0.2,11.0,0.071,13.0,50.0,1.0015,3.16,0.69,9.2 +10.0,0.49,0.2,11.0,0.071,13.0,50.0,1.0015,3.16,0.69,9.2 +11.6,0.53,0.66,3.65,0.121,6.0,14.0,0.9978,3.05,0.74,11.5 +10.3,0.44,0.5,4.5,0.107,5.0,13.0,0.998,3.28,0.83,11.5 +13.4,0.27,0.62,2.6,0.082,6.0,21.0,1.0002,3.16,0.67,9.7 +10.7,0.46,0.39,2.0,0.061,7.0,15.0,0.9981,3.18,0.62,9.5 +10.2,0.36,0.64,2.9,0.122,10.0,41.0,0.998,3.23,0.66,12.5 +10.2,0.36,0.64,2.9,0.122,10.0,41.0,0.998,3.23,0.66,12.5 +8.0,0.58,0.28,3.2,0.066,21.0,114.0,0.9973,3.22,0.54,9.4 +8.4,0.56,0.08,2.1,0.105,16.0,44.0,0.9958,3.13,0.52,11.0 +7.9,0.65,0.01,2.5,0.078,17.0,38.0,0.9963,3.34,0.74,11.7 +11.9,0.695,0.53,3.4,0.128,7.0,21.0,0.9992,3.17,0.84,12.2 +8.9,0.43,0.45,1.9,0.052,6.0,16.0,0.9948,3.35,0.7,12.5 +7.8,0.43,0.32,2.8,0.08,29.0,58.0,0.9974,3.31,0.64,10.3 +12.4,0.49,0.58,3.0,0.103,28.0,99.0,1.0008,3.16,1.0,11.5 +12.5,0.28,0.54,2.3,0.082,12.0,29.0,0.9997,3.11,1.36,9.8 +12.2,0.34,0.5,2.4,0.066,10.0,21.0,1.0,3.12,1.18,9.2 +10.6,0.42,0.48,2.7,0.065,5.0,18.0,0.9972,3.21,0.87,11.3 +10.9,0.39,0.47,1.8,0.118,6.0,14.0,0.9982,3.3,0.75,9.8 +10.9,0.39,0.47,1.8,0.118,6.0,14.0,0.9982,3.3,0.75,9.8 +11.9,0.57,0.5,2.6,0.082,6.0,32.0,1.0006,3.12,0.78,10.7 +7.0,0.685,0.0,1.9,0.067,40.0,63.0,0.9979,3.6,0.81,9.9 +6.6,0.815,0.02,2.7,0.072,17.0,34.0,0.9955,3.58,0.89,12.3 +13.8,0.49,0.67,3.0,0.093,6.0,15.0,0.9986,3.02,0.93,12.0 +9.6,0.56,0.31,2.8,0.089,15.0,46.0,0.9979,3.11,0.92,10.0 +9.1,0.785,0.0,2.6,0.093,11.0,28.0,0.9994,3.36,0.86,9.4 +10.7,0.67,0.22,2.7,0.107,17.0,34.0,1.0004,3.28,0.98,9.9 +9.1,0.795,0.0,2.6,0.096,11.0,26.0,0.9994,3.35,0.83,9.4 +7.7,0.665,0.0,2.4,0.09,8.0,19.0,0.9974,3.27,0.73,9.3 +13.5,0.53,0.79,4.8,0.12,23.0,77.0,1.0018,3.18,0.77,13.0 +6.1,0.21,0.4,1.4,0.066,40.5,165.0,0.9912,3.25,0.59,11.9 +6.7,0.75,0.01,2.4,0.078,17.0,32.0,0.9955,3.55,0.61,12.8 +11.5,0.41,0.52,3.0,0.08,29.0,55.0,1.0001,3.26,0.88,11.0 +10.5,0.42,0.66,2.95,0.116,12.0,29.0,0.997,3.24,0.75,11.7 +11.9,0.43,0.66,3.1,0.109,10.0,23.0,1.0,3.15,0.85,10.4 +12.6,0.38,0.66,2.6,0.088,10.0,41.0,1.001,3.17,0.68,9.8 +8.2,0.7,0.23,2.0,0.099,14.0,81.0,0.9973,3.19,0.7,9.4 +8.6,0.45,0.31,2.6,0.086,21.0,50.0,0.9982,3.37,0.91,9.9 +11.9,0.58,0.66,2.5,0.072,6.0,37.0,0.9992,3.05,0.56,10.0 +12.5,0.46,0.63,2.0,0.071,6.0,15.0,0.9988,2.99,0.87,10.2 +12.8,0.615,0.66,5.8,0.083,7.0,42.0,1.0022,3.07,0.73,10.0 +10.0,0.42,0.5,3.4,0.107,7.0,21.0,0.9979,3.26,0.93,11.8 +12.8,0.615,0.66,5.8,0.083,7.0,42.0,1.0022,3.07,0.73,10.0 +10.4,0.575,0.61,2.6,0.076,11.0,24.0,1.0,3.16,0.69,9.0 +10.3,0.34,0.52,2.8,0.159,15.0,75.0,0.9998,3.18,0.64,9.4 +9.4,0.27,0.53,2.4,0.074,6.0,18.0,0.9962,3.2,1.13,12.0 +6.9,0.765,0.02,2.3,0.063,35.0,63.0,0.9975,3.57,0.78,9.9 +7.9,0.24,0.4,1.6,0.056,11.0,25.0,0.9967,3.32,0.87,8.7 +9.1,0.28,0.48,1.8,0.067,26.0,46.0,0.9967,3.32,1.04,10.6 +7.4,0.55,0.22,2.2,0.106,12.0,72.0,0.9959,3.05,0.63,9.2 +14.0,0.41,0.63,3.8,0.089,6.0,47.0,1.0014,3.01,0.81,10.8 +11.5,0.54,0.71,4.4,0.124,6.0,15.0,0.9984,3.01,0.83,11.8 +11.5,0.45,0.5,3.0,0.078,19.0,47.0,1.0003,3.26,1.11,11.0 +9.4,0.27,0.53,2.4,0.074,6.0,18.0,0.9962,3.2,1.13,12.0 +11.4,0.625,0.66,6.2,0.088,6.0,24.0,0.9988,3.11,0.99,13.3 +8.3,0.42,0.38,2.5,0.094,24.0,60.0,0.9979,3.31,0.7,10.8 +8.3,0.26,0.42,2.0,0.08,11.0,27.0,0.9974,3.21,0.8,9.4 +13.7,0.415,0.68,2.9,0.085,17.0,43.0,1.0014,3.06,0.8,10.0 +8.3,0.26,0.42,2.0,0.08,11.0,27.0,0.9974,3.21,0.8,9.4 +8.3,0.26,0.42,2.0,0.08,11.0,27.0,0.9974,3.21,0.8,9.4 +7.7,0.51,0.28,2.1,0.087,23.0,54.0,0.998,3.42,0.74,9.2 +7.4,0.63,0.07,2.4,0.09,11.0,37.0,0.9979,3.43,0.76,9.7 +7.8,0.54,0.26,2.0,0.088,23.0,48.0,0.9981,3.41,0.74,9.2 +8.3,0.66,0.15,1.9,0.079,17.0,42.0,0.9972,3.31,0.54,9.6 +7.8,0.46,0.26,1.9,0.088,23.0,53.0,0.9981,3.43,0.74,9.2 +9.6,0.38,0.31,2.5,0.096,16.0,49.0,0.9982,3.19,0.7,10.0 +5.6,0.85,0.05,1.4,0.045,12.0,88.0,0.9924,3.56,0.82,12.9 +13.7,0.415,0.68,2.9,0.085,17.0,43.0,1.0014,3.06,0.8,10.0 +9.5,0.37,0.52,2.0,0.082,6.0,26.0,0.998,3.18,0.51,9.5 +8.4,0.665,0.61,2.0,0.112,13.0,95.0,0.997,3.16,0.54,9.1 +12.7,0.6,0.65,2.3,0.063,6.0,25.0,0.9997,3.03,0.57,9.9 +12.0,0.37,0.76,4.2,0.066,7.0,38.0,1.0004,3.22,0.6,13.0 +6.6,0.735,0.02,7.9,0.122,68.0,124.0,0.9994,3.47,0.53,9.9 +11.5,0.59,0.59,2.6,0.087,13.0,49.0,0.9988,3.18,0.65,11.0 +11.5,0.59,0.59,2.6,0.087,13.0,49.0,0.9988,3.18,0.65,11.0 +8.7,0.765,0.22,2.3,0.064,9.0,42.0,0.9963,3.1,0.55,9.4 +6.6,0.735,0.02,7.9,0.122,68.0,124.0,0.9994,3.47,0.53,9.9 +7.7,0.26,0.3,1.7,0.059,20.0,38.0,0.9949,3.29,0.47,10.8 +12.2,0.48,0.54,2.6,0.085,19.0,64.0,1.0,3.1,0.61,10.5 +11.4,0.6,0.49,2.7,0.085,10.0,41.0,0.9994,3.15,0.63,10.5 +7.7,0.69,0.05,2.7,0.075,15.0,27.0,0.9974,3.26,0.61,9.1 +8.7,0.31,0.46,1.4,0.059,11.0,25.0,0.9966,3.36,0.76,10.1 +9.8,0.44,0.47,2.5,0.063,9.0,28.0,0.9981,3.24,0.65,10.8 +12.0,0.39,0.66,3.0,0.093,12.0,30.0,0.9996,3.18,0.63,10.8 +10.4,0.34,0.58,3.7,0.174,6.0,16.0,0.997,3.19,0.7,11.3 +12.5,0.46,0.49,4.5,0.07,26.0,49.0,0.9981,3.05,0.57,9.6 +9.0,0.43,0.34,2.5,0.08,26.0,86.0,0.9987,3.38,0.62,9.5 +9.1,0.45,0.35,2.4,0.08,23.0,78.0,0.9987,3.38,0.62,9.5 +7.1,0.735,0.16,1.9,0.1,15.0,77.0,0.9966,3.27,0.64,9.3 +9.9,0.4,0.53,6.7,0.097,6.0,19.0,0.9986,3.27,0.82,11.7 +8.8,0.52,0.34,2.7,0.087,24.0,122.0,0.9982,3.26,0.61,9.5 +8.6,0.725,0.24,6.6,0.117,31.0,134.0,1.0014,3.32,1.07,9.3 +10.6,0.48,0.64,2.2,0.111,6.0,20.0,0.997,3.26,0.66,11.7 +7.0,0.58,0.12,1.9,0.091,34.0,124.0,0.9956,3.44,0.48,10.5 +11.9,0.38,0.51,2.0,0.121,7.0,20.0,0.9996,3.24,0.76,10.4 +6.8,0.77,0.0,1.8,0.066,34.0,52.0,0.9976,3.62,0.68,9.9 +9.5,0.56,0.33,2.4,0.089,35.0,67.0,0.9972,3.28,0.73,11.8 +6.6,0.84,0.03,2.3,0.059,32.0,48.0,0.9952,3.52,0.56,12.3 +7.7,0.96,0.2,2.0,0.047,15.0,60.0,0.9955,3.36,0.44,10.9 +10.5,0.24,0.47,2.1,0.066,6.0,24.0,0.9978,3.15,0.9,11.0 +7.7,0.96,0.2,2.0,0.047,15.0,60.0,0.9955,3.36,0.44,10.9 +6.6,0.84,0.03,2.3,0.059,32.0,48.0,0.9952,3.52,0.56,12.3 +6.4,0.67,0.08,2.1,0.045,19.0,48.0,0.9949,3.49,0.49,11.4 +9.5,0.78,0.22,1.9,0.077,6.0,32.0,0.9988,3.26,0.56,10.6 +9.1,0.52,0.33,1.3,0.07,9.0,30.0,0.9978,3.24,0.6,9.3 +12.8,0.84,0.63,2.4,0.088,13.0,35.0,0.9997,3.1,0.6,10.4 +10.5,0.24,0.47,2.1,0.066,6.0,24.0,0.9978,3.15,0.9,11.0 +7.8,0.55,0.35,2.2,0.074,21.0,66.0,0.9974,3.25,0.56,9.2 +11.9,0.37,0.69,2.3,0.078,12.0,24.0,0.9958,3.0,0.65,12.8 +12.3,0.39,0.63,2.3,0.091,6.0,18.0,1.0004,3.16,0.49,9.5 +10.4,0.41,0.55,3.2,0.076,22.0,54.0,0.9996,3.15,0.89,9.9 +12.3,0.39,0.63,2.3,0.091,6.0,18.0,1.0004,3.16,0.49,9.5 +8.0,0.67,0.3,2.0,0.06,38.0,62.0,0.9958,3.26,0.56,10.2 +11.1,0.45,0.73,3.2,0.066,6.0,22.0,0.9986,3.17,0.66,11.2 +10.4,0.41,0.55,3.2,0.076,22.0,54.0,0.9996,3.15,0.89,9.9 +7.0,0.62,0.18,1.5,0.062,7.0,50.0,0.9951,3.08,0.6,9.3 +12.6,0.31,0.72,2.2,0.072,6.0,29.0,0.9987,2.88,0.82,9.8 +11.9,0.4,0.65,2.15,0.068,7.0,27.0,0.9988,3.06,0.68,11.3 +15.6,0.685,0.76,3.7,0.1,6.0,43.0,1.0032,2.95,0.68,11.2 +10.0,0.44,0.49,2.7,0.077,11.0,19.0,0.9963,3.23,0.63,11.6 +5.3,0.57,0.01,1.7,0.054,5.0,27.0,0.9934,3.57,0.84,12.5 +9.5,0.735,0.1,2.1,0.079,6.0,31.0,0.9986,3.23,0.56,10.1 +12.5,0.38,0.6,2.6,0.081,31.0,72.0,0.9996,3.1,0.73,10.5 +9.3,0.48,0.29,2.1,0.127,6.0,16.0,0.9968,3.22,0.72,11.2 +8.6,0.53,0.22,2.0,0.1,7.0,27.0,0.9967,3.2,0.56,10.2 +11.9,0.39,0.69,2.8,0.095,17.0,35.0,0.9994,3.1,0.61,10.8 +11.9,0.39,0.69,2.8,0.095,17.0,35.0,0.9994,3.1,0.61,10.8 +8.4,0.37,0.53,1.8,0.413,9.0,26.0,0.9979,3.06,1.06,9.1 +6.8,0.56,0.03,1.7,0.084,18.0,35.0,0.9968,3.44,0.63,10.0 +10.4,0.33,0.63,2.8,0.084,5.0,22.0,0.9998,3.26,0.74,11.2 +7.0,0.23,0.4,1.6,0.063,21.0,67.0,0.9952,3.5,0.63,11.1 +11.3,0.62,0.67,5.2,0.086,6.0,19.0,0.9988,3.22,0.69,13.4 +8.9,0.59,0.39,2.3,0.095,5.0,22.0,0.9986,3.37,0.58,10.3 +9.2,0.63,0.21,2.7,0.097,29.0,65.0,0.9988,3.28,0.58,9.6 +10.4,0.33,0.63,2.8,0.084,5.0,22.0,0.9998,3.26,0.74,11.2 +11.6,0.58,0.66,2.2,0.074,10.0,47.0,1.0008,3.25,0.57,9.0 +9.2,0.43,0.52,2.3,0.083,14.0,23.0,0.9976,3.35,0.61,11.3 +8.3,0.615,0.22,2.6,0.087,6.0,19.0,0.9982,3.26,0.61,9.3 +11.0,0.26,0.68,2.55,0.085,10.0,25.0,0.997,3.18,0.61,11.8 +8.1,0.66,0.7,2.2,0.098,25.0,129.0,0.9972,3.08,0.53,9.0 +11.5,0.315,0.54,2.1,0.084,5.0,15.0,0.9987,2.98,0.7,9.2 +10.0,0.29,0.4,2.9,0.098,10.0,26.0,1.0006,3.48,0.91,9.7 +10.3,0.5,0.42,2.0,0.069,21.0,51.0,0.9982,3.16,0.72,11.5 +8.8,0.46,0.45,2.6,0.065,7.0,18.0,0.9947,3.32,0.79,14.0 +11.4,0.36,0.69,2.1,0.09,6.0,21.0,1.0,3.17,0.62,9.2 +8.7,0.82,0.02,1.2,0.07,36.0,48.0,0.9952,3.2,0.58,9.8 +13.0,0.32,0.65,2.6,0.093,15.0,47.0,0.9996,3.05,0.61,10.6 +9.6,0.54,0.42,2.4,0.081,25.0,52.0,0.997,3.2,0.71,11.4 +12.5,0.37,0.55,2.6,0.083,25.0,68.0,0.9995,3.15,0.82,10.4 +9.9,0.35,0.55,2.1,0.062,5.0,14.0,0.9971,3.26,0.79,10.6 +10.5,0.28,0.51,1.7,0.08,10.0,24.0,0.9982,3.2,0.89,9.4 +9.6,0.68,0.24,2.2,0.087,5.0,28.0,0.9988,3.14,0.6,10.2 +9.3,0.27,0.41,2.0,0.091,6.0,16.0,0.998,3.28,0.7,9.7 +10.4,0.24,0.49,1.8,0.075,6.0,20.0,0.9977,3.18,1.06,11.0 +9.6,0.68,0.24,2.2,0.087,5.0,28.0,0.9988,3.14,0.6,10.2 +9.4,0.685,0.11,2.7,0.077,6.0,31.0,0.9984,3.19,0.7,10.1 +10.6,0.28,0.39,15.5,0.069,6.0,23.0,1.0026,3.12,0.66,9.2 +9.4,0.3,0.56,2.8,0.08,6.0,17.0,0.9964,3.15,0.92,11.7 +10.6,0.36,0.59,2.2,0.152,6.0,18.0,0.9986,3.04,1.05,9.4 +10.6,0.36,0.6,2.2,0.152,7.0,18.0,0.9986,3.04,1.06,9.4 +10.6,0.44,0.68,4.1,0.114,6.0,24.0,0.997,3.06,0.66,13.4 +10.2,0.67,0.39,1.9,0.054,6.0,17.0,0.9976,3.17,0.47,10.0 +10.2,0.67,0.39,1.9,0.054,6.0,17.0,0.9976,3.17,0.47,10.0 +10.2,0.645,0.36,1.8,0.053,5.0,14.0,0.9982,3.17,0.42,10.0 +11.6,0.32,0.55,2.8,0.081,35.0,67.0,1.0002,3.32,0.92,10.8 +9.3,0.39,0.4,2.6,0.073,10.0,26.0,0.9984,3.34,0.75,10.2 +9.3,0.775,0.27,2.8,0.078,24.0,56.0,0.9984,3.31,0.67,10.6 +9.2,0.41,0.5,2.5,0.055,12.0,25.0,0.9952,3.34,0.79,13.3 +8.9,0.4,0.51,2.6,0.052,13.0,27.0,0.995,3.32,0.9,13.4 +8.7,0.69,0.31,3.0,0.086,23.0,81.0,1.0002,3.48,0.74,11.6 +6.5,0.39,0.23,8.3,0.051,28.0,91.0,0.9952,3.44,0.55,12.1 +10.7,0.35,0.53,2.6,0.07,5.0,16.0,0.9972,3.15,0.65,11.0 +7.8,0.52,0.25,1.9,0.081,14.0,38.0,0.9984,3.43,0.65,9.0 +7.2,0.34,0.32,2.5,0.09,43.0,113.0,0.9966,3.32,0.79,11.1 +10.7,0.35,0.53,2.6,0.07,5.0,16.0,0.9972,3.15,0.65,11.0 +8.7,0.69,0.31,3.0,0.086,23.0,81.0,1.0002,3.48,0.74,11.6 +7.8,0.52,0.25,1.9,0.081,14.0,38.0,0.9984,3.43,0.65,9.0 +10.4,0.44,0.73,6.55,0.074,38.0,76.0,0.999,3.17,0.85,12.0 +10.4,0.44,0.73,6.55,0.074,38.0,76.0,0.999,3.17,0.85,12.0 +10.5,0.26,0.47,1.9,0.078,6.0,24.0,0.9976,3.18,1.04,10.9 +10.5,0.24,0.42,1.8,0.077,6.0,22.0,0.9976,3.21,1.05,10.8 +10.2,0.49,0.63,2.9,0.072,10.0,26.0,0.9968,3.16,0.78,12.5 +10.4,0.24,0.46,1.8,0.075,6.0,21.0,0.9976,3.25,1.02,10.8 +11.2,0.67,0.55,2.3,0.084,6.0,13.0,1.0,3.17,0.71,9.5 +10.0,0.59,0.31,2.2,0.09,26.0,62.0,0.9994,3.18,0.63,10.2 +13.3,0.29,0.75,2.8,0.084,23.0,43.0,0.9986,3.04,0.68,11.4 +12.4,0.42,0.49,4.6,0.073,19.0,43.0,0.9978,3.02,0.61,9.5 +10.0,0.59,0.31,2.2,0.09,26.0,62.0,0.9994,3.18,0.63,10.2 +10.7,0.4,0.48,2.1,0.125,15.0,49.0,0.998,3.03,0.81,9.7 +10.5,0.51,0.64,2.4,0.107,6.0,15.0,0.9973,3.09,0.66,11.8 +10.5,0.51,0.64,2.4,0.107,6.0,15.0,0.9973,3.09,0.66,11.8 +8.5,0.655,0.49,6.1,0.122,34.0,151.0,1.001,3.31,1.14,9.3 +12.5,0.6,0.49,4.3,0.1,5.0,14.0,1.001,3.25,0.74,11.9 +10.4,0.61,0.49,2.1,0.2,5.0,16.0,0.9994,3.16,0.63,8.4 +10.9,0.21,0.49,2.8,0.088,11.0,32.0,0.9972,3.22,0.68,11.7 +7.3,0.365,0.49,2.5,0.088,39.0,106.0,0.9966,3.36,0.78,11.0 +9.8,0.25,0.49,2.7,0.088,15.0,33.0,0.9982,3.42,0.9,10.0 +7.6,0.41,0.49,2.0,0.088,16.0,43.0,0.998,3.48,0.64,9.1 +8.2,0.39,0.49,2.3,0.099,47.0,133.0,0.9979,3.38,0.99,9.8 +9.3,0.4,0.49,2.5,0.085,38.0,142.0,0.9978,3.22,0.55,9.4 +9.2,0.43,0.49,2.4,0.086,23.0,116.0,0.9976,3.23,0.64,9.5 +10.4,0.64,0.24,2.8,0.105,29.0,53.0,0.9998,3.24,0.67,9.9 +7.3,0.365,0.49,2.5,0.088,39.0,106.0,0.9966,3.36,0.78,11.0 +7.0,0.38,0.49,2.5,0.097,33.0,85.0,0.9962,3.39,0.77,11.4 +8.2,0.42,0.49,2.6,0.084,32.0,55.0,0.9988,3.34,0.75,8.7 +9.9,0.63,0.24,2.4,0.077,6.0,33.0,0.9974,3.09,0.57,9.4 +9.1,0.22,0.24,2.1,0.078,1.0,28.0,0.999,3.41,0.87,10.3 +11.9,0.38,0.49,2.7,0.098,12.0,42.0,1.0004,3.16,0.61,10.3 +11.9,0.38,0.49,2.7,0.098,12.0,42.0,1.0004,3.16,0.61,10.3 +10.3,0.27,0.24,2.1,0.072,15.0,33.0,0.9956,3.22,0.66,12.8 +10.0,0.48,0.24,2.7,0.102,13.0,32.0,1.0,3.28,0.56,10.0 +9.1,0.22,0.24,2.1,0.078,1.0,28.0,0.999,3.41,0.87,10.3 +9.9,0.63,0.24,2.4,0.077,6.0,33.0,0.9974,3.09,0.57,9.4 +8.1,0.825,0.24,2.1,0.084,5.0,13.0,0.9972,3.37,0.77,10.7 +12.9,0.35,0.49,5.8,0.066,5.0,35.0,1.0014,3.2,0.66,12.0 +11.2,0.5,0.74,5.15,0.1,5.0,17.0,0.9996,3.22,0.62,11.2 +9.2,0.59,0.24,3.3,0.101,20.0,47.0,0.9988,3.26,0.67,9.6 +9.5,0.46,0.49,6.3,0.064,5.0,17.0,0.9988,3.21,0.73,11.0 +9.3,0.715,0.24,2.1,0.07,5.0,20.0,0.9966,3.12,0.59,9.9 +11.2,0.66,0.24,2.5,0.085,16.0,53.0,0.9993,3.06,0.72,11.0 +14.3,0.31,0.74,1.8,0.075,6.0,15.0,1.0008,2.86,0.79,8.4 +9.1,0.47,0.49,2.6,0.094,38.0,106.0,0.9982,3.08,0.59,9.1 +7.5,0.55,0.24,2.0,0.078,10.0,28.0,0.9983,3.45,0.78,9.5 +10.6,0.31,0.49,2.5,0.067,6.0,21.0,0.9987,3.26,0.86,10.7 +12.4,0.35,0.49,2.6,0.079,27.0,69.0,0.9994,3.12,0.75,10.4 +9.0,0.53,0.49,1.9,0.171,6.0,25.0,0.9975,3.27,0.61,9.4 +6.8,0.51,0.01,2.1,0.074,9.0,25.0,0.9958,3.33,0.56,9.5 +9.4,0.43,0.24,2.8,0.092,14.0,45.0,0.998,3.19,0.73,10.0 +9.5,0.46,0.24,2.7,0.092,14.0,44.0,0.998,3.12,0.74,10.0 +5.0,1.04,0.24,1.6,0.05,32.0,96.0,0.9934,3.74,0.62,11.5 +15.5,0.645,0.49,4.2,0.095,10.0,23.0,1.00315,2.92,0.74,11.1 +15.5,0.645,0.49,4.2,0.095,10.0,23.0,1.00315,2.92,0.74,11.1 +10.9,0.53,0.49,4.6,0.118,10.0,17.0,1.0002,3.07,0.56,11.7 +15.6,0.645,0.49,4.2,0.095,10.0,23.0,1.00315,2.92,0.74,11.1 +10.9,0.53,0.49,4.6,0.118,10.0,17.0,1.0002,3.07,0.56,11.7 +13.0,0.47,0.49,4.3,0.085,6.0,47.0,1.0021,3.3,0.68,12.7 +12.7,0.6,0.49,2.8,0.075,5.0,19.0,0.9994,3.14,0.57,11.4 +9.0,0.44,0.49,2.4,0.078,26.0,121.0,0.9978,3.23,0.58,9.2 +9.0,0.54,0.49,2.9,0.094,41.0,110.0,0.9982,3.08,0.61,9.2 +7.6,0.29,0.49,2.7,0.092,25.0,60.0,0.9971,3.31,0.61,10.1 +13.0,0.47,0.49,4.3,0.085,6.0,47.0,1.0021,3.3,0.68,12.7 +12.7,0.6,0.49,2.8,0.075,5.0,19.0,0.9994,3.14,0.57,11.4 +8.7,0.7,0.24,2.5,0.226,5.0,15.0,0.9991,3.32,0.6,9.0 +8.7,0.7,0.24,2.5,0.226,5.0,15.0,0.9991,3.32,0.6,9.0 +9.8,0.5,0.49,2.6,0.25,5.0,20.0,0.999,3.31,0.79,10.7 +6.2,0.36,0.24,2.2,0.095,19.0,42.0,0.9946,3.57,0.57,11.7 +11.5,0.35,0.49,3.3,0.07,10.0,37.0,1.0003,3.32,0.91,11.0 +6.2,0.36,0.24,2.2,0.095,19.0,42.0,0.9946,3.57,0.57,11.7 +10.2,0.24,0.49,2.4,0.075,10.0,28.0,0.9978,3.14,0.61,10.4 +10.5,0.59,0.49,2.1,0.07,14.0,47.0,0.9991,3.3,0.56,9.6 +10.6,0.34,0.49,3.2,0.078,20.0,78.0,0.9992,3.19,0.7,10.0 +12.3,0.27,0.49,3.1,0.079,28.0,46.0,0.9993,3.2,0.8,10.2 +9.9,0.5,0.24,2.3,0.103,6.0,14.0,0.9978,3.34,0.52,10.0 +8.8,0.44,0.49,2.8,0.083,18.0,111.0,0.9982,3.3,0.6,9.5 +8.8,0.47,0.49,2.9,0.085,17.0,110.0,0.9982,3.29,0.6,9.8 +10.6,0.31,0.49,2.2,0.063,18.0,40.0,0.9976,3.14,0.51,9.8 +12.3,0.5,0.49,2.2,0.089,5.0,14.0,1.0002,3.19,0.44,9.6 +12.3,0.5,0.49,2.2,0.089,5.0,14.0,1.0002,3.19,0.44,9.6 +11.7,0.49,0.49,2.2,0.083,5.0,15.0,1.0,3.19,0.43,9.2 +12.0,0.28,0.49,1.9,0.074,10.0,21.0,0.9976,2.98,0.66,9.9 +11.8,0.33,0.49,3.4,0.093,54.0,80.0,1.0002,3.3,0.76,10.7 +7.6,0.51,0.24,2.4,0.091,8.0,38.0,0.998,3.47,0.66,9.6 +11.1,0.31,0.49,2.7,0.094,16.0,47.0,0.9986,3.12,1.02,10.6 +7.3,0.73,0.24,1.9,0.108,18.0,102.0,0.9967,3.26,0.59,9.3 +5.0,0.42,0.24,2.0,0.06,19.0,50.0,0.9917,3.72,0.74,14.0 +10.2,0.29,0.49,2.6,0.059,5.0,13.0,0.9976,3.05,0.74,10.5 +9.0,0.45,0.49,2.6,0.084,21.0,75.0,0.9987,3.35,0.57,9.7 +6.6,0.39,0.49,1.7,0.07,23.0,149.0,0.9922,3.12,0.5,11.5 +9.0,0.45,0.49,2.6,0.084,21.0,75.0,0.9987,3.35,0.57,9.7 +9.9,0.49,0.58,3.5,0.094,9.0,43.0,1.0004,3.29,0.58,9.0 +7.9,0.72,0.17,2.6,0.096,20.0,38.0,0.9978,3.4,0.53,9.5 +8.9,0.595,0.41,7.9,0.086,30.0,109.0,0.9998,3.27,0.57,9.3 +12.4,0.4,0.51,2.0,0.059,6.0,24.0,0.9994,3.04,0.6,9.3 +11.9,0.58,0.58,1.9,0.071,5.0,18.0,0.998,3.09,0.63,10.0 +8.5,0.585,0.18,2.1,0.078,5.0,30.0,0.9967,3.2,0.48,9.8 +12.7,0.59,0.45,2.3,0.082,11.0,22.0,1.0,3.0,0.7,9.3 +8.2,0.915,0.27,2.1,0.088,7.0,23.0,0.9962,3.26,0.47,10.0 +13.2,0.46,0.52,2.2,0.071,12.0,35.0,1.0006,3.1,0.56,9.0 +7.7,0.835,0.0,2.6,0.081,6.0,14.0,0.9975,3.3,0.52,9.3 +13.2,0.46,0.52,2.2,0.071,12.0,35.0,1.0006,3.1,0.56,9.0 +8.3,0.58,0.13,2.9,0.096,14.0,63.0,0.9984,3.17,0.62,9.1 +8.3,0.6,0.13,2.6,0.085,6.0,24.0,0.9984,3.31,0.59,9.2 +9.4,0.41,0.48,4.6,0.072,10.0,20.0,0.9973,3.34,0.79,12.2 +8.8,0.48,0.41,3.3,0.092,26.0,52.0,0.9982,3.31,0.53,10.5 +10.1,0.65,0.37,5.1,0.11,11.0,65.0,1.0026,3.32,0.64,10.4 +6.3,0.36,0.19,3.2,0.075,15.0,39.0,0.9956,3.56,0.52,12.7 +8.8,0.24,0.54,2.5,0.083,25.0,57.0,0.9983,3.39,0.54,9.2 +13.2,0.38,0.55,2.7,0.081,5.0,16.0,1.0006,2.98,0.54,9.4 +7.5,0.64,0.0,2.4,0.077,18.0,29.0,0.9965,3.32,0.6,10.0 +8.2,0.39,0.38,1.5,0.058,10.0,29.0,0.9962,3.26,0.74,9.8 +9.2,0.755,0.18,2.2,0.148,10.0,103.0,0.9969,2.87,1.36,10.2 +9.6,0.6,0.5,2.3,0.079,28.0,71.0,0.9997,3.5,0.57,9.7 +9.6,0.6,0.5,2.3,0.079,28.0,71.0,0.9997,3.5,0.57,9.7 +11.5,0.31,0.51,2.2,0.079,14.0,28.0,0.9982,3.03,0.93,9.8 +11.4,0.46,0.5,2.7,0.122,4.0,17.0,1.0006,3.13,0.7,10.2 +11.3,0.37,0.41,2.3,0.088,6.0,16.0,0.9988,3.09,0.8,9.3 +8.3,0.54,0.24,3.4,0.076,16.0,112.0,0.9976,3.27,0.61,9.4 +8.2,0.56,0.23,3.4,0.078,14.0,104.0,0.9976,3.28,0.62,9.4 +10.0,0.58,0.22,1.9,0.08,9.0,32.0,0.9974,3.13,0.55,9.5 +7.9,0.51,0.25,2.9,0.077,21.0,45.0,0.9974,3.49,0.96,12.1 +6.8,0.69,0.0,5.6,0.124,21.0,58.0,0.9997,3.46,0.72,10.2 +6.8,0.69,0.0,5.6,0.124,21.0,58.0,0.9997,3.46,0.72,10.2 +8.8,0.6,0.29,2.2,0.098,5.0,15.0,0.9988,3.36,0.49,9.1 +8.8,0.6,0.29,2.2,0.098,5.0,15.0,0.9988,3.36,0.49,9.1 +8.7,0.54,0.26,2.5,0.097,7.0,31.0,0.9976,3.27,0.6,9.3 +7.6,0.685,0.23,2.3,0.111,20.0,84.0,0.9964,3.21,0.61,9.3 +8.7,0.54,0.26,2.5,0.097,7.0,31.0,0.9976,3.27,0.6,9.3 +10.4,0.28,0.54,2.7,0.105,5.0,19.0,0.9988,3.25,0.63,9.5 +7.6,0.41,0.14,3.0,0.087,21.0,43.0,0.9964,3.32,0.57,10.5 +10.1,0.935,0.22,3.4,0.105,11.0,86.0,1.001,3.43,0.64,11.3 +7.9,0.35,0.21,1.9,0.073,46.0,102.0,0.9964,3.27,0.58,9.5 +8.7,0.84,0.0,1.4,0.065,24.0,33.0,0.9954,3.27,0.55,9.7 +9.6,0.88,0.28,2.4,0.086,30.0,147.0,0.9979,3.24,0.53,9.4 +9.5,0.885,0.27,2.3,0.084,31.0,145.0,0.9978,3.24,0.53,9.4 +7.7,0.915,0.12,2.2,0.143,7.0,23.0,0.9964,3.35,0.65,10.2 +8.9,0.29,0.35,1.9,0.067,25.0,57.0,0.997,3.18,1.36,10.3 +9.9,0.54,0.45,2.3,0.071,16.0,40.0,0.9991,3.39,0.62,9.4 +9.5,0.59,0.44,2.3,0.071,21.0,68.0,0.9992,3.46,0.63,9.5 +9.9,0.54,0.45,2.3,0.071,16.0,40.0,0.9991,3.39,0.62,9.4 +9.5,0.59,0.44,2.3,0.071,21.0,68.0,0.9992,3.46,0.63,9.5 +9.9,0.54,0.45,2.3,0.071,16.0,40.0,0.9991,3.39,0.62,9.4 +7.8,0.64,0.1,6.0,0.115,5.0,11.0,0.9984,3.37,0.69,10.1 +7.3,0.67,0.05,3.6,0.107,6.0,20.0,0.9972,3.4,0.63,10.1 +8.3,0.845,0.01,2.2,0.07,5.0,14.0,0.9967,3.32,0.58,11.0 +8.7,0.48,0.3,2.8,0.066,10.0,28.0,0.9964,3.33,0.67,11.2 +6.7,0.42,0.27,8.6,0.068,24.0,148.0,0.9948,3.16,0.57,11.3 +10.7,0.43,0.39,2.2,0.106,8.0,32.0,0.9986,2.89,0.5,9.6 +9.8,0.88,0.25,2.5,0.104,35.0,155.0,1.001,3.41,0.67,11.2 +15.9,0.36,0.65,7.5,0.096,22.0,71.0,0.9976,2.98,0.84,14.9 +9.4,0.33,0.59,2.8,0.079,9.0,30.0,0.9976,3.12,0.54,12.0 +8.6,0.47,0.47,2.4,0.074,7.0,29.0,0.9979,3.08,0.46,9.5 +9.7,0.55,0.17,2.9,0.087,20.0,53.0,1.0004,3.14,0.61,9.4 +10.7,0.43,0.39,2.2,0.106,8.0,32.0,0.9986,2.89,0.5,9.6 +12.0,0.5,0.59,1.4,0.073,23.0,42.0,0.998,2.92,0.68,10.5 +7.2,0.52,0.07,1.4,0.074,5.0,20.0,0.9973,3.32,0.81,9.6 +7.1,0.84,0.02,4.4,0.096,5.0,13.0,0.997,3.41,0.57,11.0 +7.2,0.52,0.07,1.4,0.074,5.0,20.0,0.9973,3.32,0.81,9.6 +7.5,0.42,0.31,1.6,0.08,15.0,42.0,0.9978,3.31,0.64,9.0 +7.2,0.57,0.06,1.6,0.076,9.0,27.0,0.9972,3.36,0.7,9.6 +10.1,0.28,0.46,1.8,0.05,5.0,13.0,0.9974,3.04,0.79,10.2 +12.1,0.4,0.52,2.0,0.092,15.0,54.0,1.0,3.03,0.66,10.2 +9.4,0.59,0.14,2.0,0.084,25.0,48.0,0.9981,3.14,0.56,9.7 +8.3,0.49,0.36,1.8,0.222,6.0,16.0,0.998,3.18,0.6,9.5 +11.3,0.34,0.45,2.0,0.082,6.0,15.0,0.9988,2.94,0.66,9.2 +10.0,0.73,0.43,2.3,0.059,15.0,31.0,0.9966,3.15,0.57,11.0 +11.3,0.34,0.45,2.0,0.082,6.0,15.0,0.9988,2.94,0.66,9.2 +6.9,0.4,0.24,2.5,0.083,30.0,45.0,0.9959,3.26,0.58,10.0 +8.2,0.73,0.21,1.7,0.074,5.0,13.0,0.9968,3.2,0.52,9.5 +9.8,1.24,0.34,2.0,0.079,32.0,151.0,0.998,3.15,0.53,9.5 +8.2,0.73,0.21,1.7,0.074,5.0,13.0,0.9968,3.2,0.52,9.5 +10.8,0.4,0.41,2.2,0.084,7.0,17.0,0.9984,3.08,0.67,9.3 +9.3,0.41,0.39,2.2,0.064,12.0,31.0,0.9984,3.26,0.65,10.2 +10.8,0.4,0.41,2.2,0.084,7.0,17.0,0.9984,3.08,0.67,9.3 +8.6,0.8,0.11,2.3,0.084,12.0,31.0,0.9979,3.4,0.48,9.9 +8.3,0.78,0.1,2.6,0.081,45.0,87.0,0.9983,3.48,0.53,10.0 +10.8,0.26,0.45,3.3,0.06,20.0,49.0,0.9972,3.13,0.54,9.6 +13.3,0.43,0.58,1.9,0.07,15.0,40.0,1.0004,3.06,0.49,9.0 +8.0,0.45,0.23,2.2,0.094,16.0,29.0,0.9962,3.21,0.49,10.2 +8.5,0.46,0.31,2.25,0.078,32.0,58.0,0.998,3.33,0.54,9.8 +8.1,0.78,0.23,2.6,0.059,5.0,15.0,0.997,3.37,0.56,11.3 +9.8,0.98,0.32,2.3,0.078,35.0,152.0,0.998,3.25,0.48,9.4 +8.1,0.78,0.23,2.6,0.059,5.0,15.0,0.997,3.37,0.56,11.3 +7.1,0.65,0.18,1.8,0.07,13.0,40.0,0.997,3.44,0.6,9.1 +9.1,0.64,0.23,3.1,0.095,13.0,38.0,0.9998,3.28,0.59,9.7 +7.7,0.66,0.04,1.6,0.039,4.0,9.0,0.9962,3.4,0.47,9.4 +8.1,0.38,0.48,1.8,0.157,5.0,17.0,0.9976,3.3,1.05,9.4 +7.4,1.185,0.0,4.25,0.097,5.0,14.0,0.9966,3.63,0.54,10.7 +9.2,0.92,0.24,2.6,0.087,12.0,93.0,0.9998,3.48,0.54,9.8 +8.6,0.49,0.51,2.0,0.422,16.0,62.0,0.9979,3.03,1.17,9.0 +9.0,0.48,0.32,2.8,0.084,21.0,122.0,0.9984,3.32,0.62,9.4 +9.0,0.47,0.31,2.7,0.084,24.0,125.0,0.9984,3.31,0.61,9.4 +5.1,0.47,0.02,1.3,0.034,18.0,44.0,0.9921,3.9,0.62,12.8 +7.0,0.65,0.02,2.1,0.066,8.0,25.0,0.9972,3.47,0.67,9.5 +7.0,0.65,0.02,2.1,0.066,8.0,25.0,0.9972,3.47,0.67,9.5 +9.4,0.615,0.28,3.2,0.087,18.0,72.0,1.0001,3.31,0.53,9.7 +11.8,0.38,0.55,2.1,0.071,5.0,19.0,0.9986,3.11,0.62,10.8 +10.6,1.02,0.43,2.9,0.076,26.0,88.0,0.9984,3.08,0.57,10.1 +7.0,0.65,0.02,2.1,0.066,8.0,25.0,0.9972,3.47,0.67,9.5 +7.0,0.64,0.02,2.1,0.067,9.0,23.0,0.997,3.47,0.67,9.4 +7.5,0.38,0.48,2.6,0.073,22.0,84.0,0.9972,3.32,0.7,9.6 +9.1,0.765,0.04,1.6,0.078,4.0,14.0,0.998,3.29,0.54,9.7 +8.4,1.035,0.15,6.0,0.073,11.0,54.0,0.999,3.37,0.49,9.9 +7.0,0.78,0.08,2.0,0.093,10.0,19.0,0.9956,3.4,0.47,10.0 +7.4,0.49,0.19,3.0,0.077,16.0,37.0,0.9966,3.37,0.51,10.5 +7.8,0.545,0.12,2.5,0.068,11.0,35.0,0.996,3.34,0.61,11.6 +9.7,0.31,0.47,1.6,0.062,13.0,33.0,0.9983,3.27,0.66,10.0 +10.6,1.025,0.43,2.8,0.08,21.0,84.0,0.9985,3.06,0.57,10.1 +8.9,0.565,0.34,3.0,0.093,16.0,112.0,0.9998,3.38,0.61,9.5 +8.7,0.69,0.0,3.2,0.084,13.0,33.0,0.9992,3.36,0.45,9.4 +8.0,0.43,0.36,2.3,0.075,10.0,48.0,0.9976,3.34,0.46,9.4 +9.9,0.74,0.28,2.6,0.078,21.0,77.0,0.998,3.28,0.51,9.8 +7.2,0.49,0.18,2.7,0.069,13.0,34.0,0.9967,3.29,0.48,9.2 +8.0,0.43,0.36,2.3,0.075,10.0,48.0,0.9976,3.34,0.46,9.4 +7.6,0.46,0.11,2.6,0.079,12.0,49.0,0.9968,3.21,0.57,10.0 +8.4,0.56,0.04,2.0,0.082,10.0,22.0,0.9976,3.22,0.44,9.6 +7.1,0.66,0.0,3.9,0.086,17.0,45.0,0.9976,3.46,0.54,9.5 +8.4,0.56,0.04,2.0,0.082,10.0,22.0,0.9976,3.22,0.44,9.6 +8.9,0.48,0.24,2.85,0.094,35.0,106.0,0.9982,3.1,0.53,9.2 +7.6,0.42,0.08,2.7,0.084,15.0,48.0,0.9968,3.21,0.59,10.0 +7.1,0.31,0.3,2.2,0.053,36.0,127.0,0.9965,2.94,1.62,9.5 +7.5,1.115,0.1,3.1,0.086,5.0,12.0,0.9958,3.54,0.6,11.2 +9.0,0.66,0.17,3.0,0.077,5.0,13.0,0.9976,3.29,0.55,10.4 +8.1,0.72,0.09,2.8,0.084,18.0,49.0,0.9994,3.43,0.72,11.1 +6.4,0.57,0.02,1.8,0.067,4.0,11.0,0.997,3.46,0.68,9.5 +6.4,0.57,0.02,1.8,0.067,4.0,11.0,0.997,3.46,0.68,9.5 +6.4,0.865,0.03,3.2,0.071,27.0,58.0,0.995,3.61,0.49,12.7 +9.5,0.55,0.66,2.3,0.387,12.0,37.0,0.9982,3.17,0.67,9.6 +8.9,0.875,0.13,3.45,0.088,4.0,14.0,0.9994,3.44,0.52,11.5 +7.3,0.835,0.03,2.1,0.092,10.0,19.0,0.9966,3.39,0.47,9.6 +7.0,0.45,0.34,2.7,0.082,16.0,72.0,0.998,3.55,0.6,9.5 +7.7,0.56,0.2,2.0,0.075,9.0,39.0,0.9987,3.48,0.62,9.3 +7.7,0.965,0.1,2.1,0.112,11.0,22.0,0.9963,3.26,0.5,9.5 +7.7,0.965,0.1,2.1,0.112,11.0,22.0,0.9963,3.26,0.5,9.5 +8.2,0.59,0.0,2.5,0.093,19.0,58.0,1.0002,3.5,0.65,9.3 +9.0,0.46,0.23,2.8,0.092,28.0,104.0,0.9983,3.1,0.56,9.2 +9.0,0.69,0.0,2.4,0.088,19.0,38.0,0.999,3.35,0.6,9.3 +8.3,0.76,0.29,4.2,0.075,12.0,16.0,0.9965,3.45,0.68,11.5 +9.2,0.53,0.24,2.6,0.078,28.0,139.0,0.99788,3.21,0.57,9.5 +6.5,0.615,0.0,1.9,0.065,9.0,18.0,0.9972,3.46,0.65,9.2 +11.6,0.41,0.58,2.8,0.096,25.0,101.0,1.00024,3.13,0.53,10.0 +11.1,0.39,0.54,2.7,0.095,21.0,101.0,1.0001,3.13,0.51,9.5 +7.3,0.51,0.18,2.1,0.07,12.0,28.0,0.99768,3.52,0.73,9.5 +8.2,0.34,0.38,2.5,0.08,12.0,57.0,0.9978,3.3,0.47,9.0 +8.6,0.33,0.4,2.6,0.083,16.0,68.0,0.99782,3.3,0.48,9.4 +7.2,0.5,0.18,2.1,0.071,12.0,31.0,0.99761,3.52,0.72,9.6 +7.3,0.51,0.18,2.1,0.07,12.0,28.0,0.99768,3.52,0.73,9.5 +8.3,0.65,0.1,2.9,0.089,17.0,40.0,0.99803,3.29,0.55,9.5 +8.3,0.65,0.1,2.9,0.089,17.0,40.0,0.99803,3.29,0.55,9.5 +7.6,0.54,0.13,2.5,0.097,24.0,66.0,0.99785,3.39,0.61,9.4 +8.3,0.65,0.1,2.9,0.089,17.0,40.0,0.99803,3.29,0.55,9.5 +7.8,0.48,0.68,1.7,0.415,14.0,32.0,0.99656,3.09,1.06,9.1 +7.8,0.91,0.07,1.9,0.058,22.0,47.0,0.99525,3.51,0.43,10.7 +6.3,0.98,0.01,2.0,0.057,15.0,33.0,0.99488,3.6,0.46,11.2 +8.1,0.87,0.0,2.2,0.084,10.0,31.0,0.99656,3.25,0.5,9.8 +8.1,0.87,0.0,2.2,0.084,10.0,31.0,0.99656,3.25,0.5,9.8 +8.8,0.42,0.21,2.5,0.092,33.0,88.0,0.99823,3.19,0.52,9.2 +9.0,0.58,0.25,2.8,0.075,9.0,104.0,0.99779,3.23,0.57,9.7 +9.3,0.655,0.26,2.0,0.096,5.0,35.0,0.99738,3.25,0.42,9.6 +8.8,0.7,0.0,1.7,0.069,8.0,19.0,0.99701,3.31,0.53,10.0 +9.3,0.655,0.26,2.0,0.096,5.0,35.0,0.99738,3.25,0.42,9.6 +9.1,0.68,0.11,2.8,0.093,11.0,44.0,0.99888,3.31,0.55,9.5 +9.2,0.67,0.1,3.0,0.091,12.0,48.0,0.99888,3.31,0.54,9.5 +8.8,0.59,0.18,2.9,0.089,12.0,74.0,0.99738,3.14,0.54,9.4 +7.5,0.6,0.32,2.7,0.103,13.0,98.0,0.99938,3.45,0.62,9.5 +7.1,0.59,0.02,2.3,0.082,24.0,94.0,0.99744,3.55,0.53,9.7 +7.9,0.72,0.01,1.9,0.076,7.0,32.0,0.99668,3.39,0.54,9.6 +7.1,0.59,0.02,2.3,0.082,24.0,94.0,0.99744,3.55,0.53,9.7 +9.4,0.685,0.26,2.4,0.082,23.0,143.0,0.9978,3.28,0.55,9.4 +9.5,0.57,0.27,2.3,0.082,23.0,144.0,0.99782,3.27,0.55,9.4 +7.9,0.4,0.29,1.8,0.157,1.0,44.0,0.9973,3.3,0.92,9.5 +7.9,0.4,0.3,1.8,0.157,2.0,45.0,0.99727,3.31,0.91,9.5 +7.2,1.0,0.0,3.0,0.102,7.0,16.0,0.99586,3.43,0.46,10.0 +6.9,0.765,0.18,2.4,0.243,5.5,48.0,0.99612,3.4,0.6,10.3 +6.9,0.635,0.17,2.4,0.241,6.0,18.0,0.9961,3.4,0.59,10.3 +8.3,0.43,0.3,3.4,0.079,7.0,34.0,0.99788,3.36,0.61,10.5 +7.1,0.52,0.03,2.6,0.076,21.0,92.0,0.99745,3.5,0.6,9.8 +7.0,0.57,0.0,2.0,0.19,12.0,45.0,0.99676,3.31,0.6,9.4 +6.5,0.46,0.14,2.4,0.114,9.0,37.0,0.99732,3.66,0.65,9.8 +9.0,0.82,0.05,2.4,0.081,26.0,96.0,0.99814,3.36,0.53,10.0 +6.5,0.46,0.14,2.4,0.114,9.0,37.0,0.99732,3.66,0.65,9.8 +7.1,0.59,0.01,2.5,0.077,20.0,85.0,0.99746,3.55,0.59,9.8 +9.9,0.35,0.41,2.3,0.083,11.0,61.0,0.9982,3.21,0.5,9.5 +9.9,0.35,0.41,2.3,0.083,11.0,61.0,0.9982,3.21,0.5,9.5 +10.0,0.56,0.24,2.2,0.079,19.0,58.0,0.9991,3.18,0.56,10.1 +10.0,0.56,0.24,2.2,0.079,19.0,58.0,0.9991,3.18,0.56,10.1 +8.6,0.63,0.17,2.9,0.099,21.0,119.0,0.998,3.09,0.52,9.3 +7.4,0.37,0.43,2.6,0.082,18.0,82.0,0.99708,3.33,0.68,9.7 +8.8,0.64,0.17,2.9,0.084,25.0,130.0,0.99818,3.23,0.54,9.6 +7.1,0.61,0.02,2.5,0.081,17.0,87.0,0.99745,3.48,0.6,9.7 +7.7,0.6,0.0,2.6,0.055,7.0,13.0,0.99639,3.38,0.56,10.8 +10.1,0.27,0.54,2.3,0.065,7.0,26.0,0.99531,3.17,0.53,12.5 +10.8,0.89,0.3,2.6,0.132,7.0,60.0,0.99786,2.99,1.18,10.2 +8.7,0.46,0.31,2.5,0.126,24.0,64.0,0.99746,3.1,0.74,9.6 +9.3,0.37,0.44,1.6,0.038,21.0,42.0,0.99526,3.24,0.81,10.8 +9.4,0.5,0.34,3.6,0.082,5.0,14.0,0.9987,3.29,0.52,10.7 +9.4,0.5,0.34,3.6,0.082,5.0,14.0,0.9987,3.29,0.52,10.7 +7.2,0.61,0.08,4.0,0.082,26.0,108.0,0.99641,3.25,0.51,9.4 +8.6,0.55,0.09,3.3,0.068,8.0,17.0,0.99735,3.23,0.44,10.0 +5.1,0.585,0.0,1.7,0.044,14.0,86.0,0.99264,3.56,0.94,12.9 +7.7,0.56,0.08,2.5,0.114,14.0,46.0,0.9971,3.24,0.66,9.6 +8.4,0.52,0.22,2.7,0.084,4.0,18.0,0.99682,3.26,0.57,9.9 +8.2,0.28,0.4,2.4,0.052,4.0,10.0,0.99356,3.33,0.7,12.8 +8.4,0.25,0.39,2.0,0.041,4.0,10.0,0.99386,3.27,0.71,12.5 +8.2,0.28,0.4,2.4,0.052,4.0,10.0,0.99356,3.33,0.7,12.8 +7.4,0.53,0.12,1.9,0.165,4.0,12.0,0.99702,3.26,0.86,9.2 +7.6,0.48,0.31,2.8,0.07,4.0,15.0,0.99693,3.22,0.55,10.3 +7.3,0.49,0.1,2.6,0.068,4.0,14.0,0.99562,3.3,0.47,10.5 +12.9,0.5,0.55,2.8,0.072,7.0,24.0,1.00012,3.09,0.68,10.9 +10.8,0.45,0.33,2.5,0.099,20.0,38.0,0.99818,3.24,0.71,10.8 +6.9,0.39,0.24,2.1,0.102,4.0,7.0,0.99462,3.44,0.58,11.4 +12.6,0.41,0.54,2.8,0.103,19.0,41.0,0.99939,3.21,0.76,11.3 +10.8,0.45,0.33,2.5,0.099,20.0,38.0,0.99818,3.24,0.71,10.8 +9.8,0.51,0.19,3.2,0.081,8.0,30.0,0.9984,3.23,0.58,10.5 +10.8,0.29,0.42,1.6,0.084,19.0,27.0,0.99545,3.28,0.73,11.9 +7.1,0.715,0.0,2.35,0.071,21.0,47.0,0.99632,3.29,0.45,9.4 +9.1,0.66,0.15,3.2,0.097,9.0,59.0,0.99976,3.28,0.54,9.6 +7.0,0.685,0.0,1.9,0.099,9.0,22.0,0.99606,3.34,0.6,9.7 +4.9,0.42,0.0,2.1,0.048,16.0,42.0,0.99154,3.71,0.74,14.0 +6.7,0.54,0.13,2.0,0.076,15.0,36.0,0.9973,3.61,0.64,9.8 +6.7,0.54,0.13,2.0,0.076,15.0,36.0,0.9973,3.61,0.64,9.8 +7.1,0.48,0.28,2.8,0.068,6.0,16.0,0.99682,3.24,0.53,10.3 +7.1,0.46,0.14,2.8,0.076,15.0,37.0,0.99624,3.36,0.49,10.7 +7.5,0.27,0.34,2.3,0.05,4.0,8.0,0.9951,3.4,0.64,11.0 +7.1,0.46,0.14,2.8,0.076,15.0,37.0,0.99624,3.36,0.49,10.7 +7.8,0.57,0.09,2.3,0.065,34.0,45.0,0.99417,3.46,0.74,12.7 +5.9,0.61,0.08,2.1,0.071,16.0,24.0,0.99376,3.56,0.77,11.1 +7.5,0.685,0.07,2.5,0.058,5.0,9.0,0.99632,3.38,0.55,10.9 +5.9,0.61,0.08,2.1,0.071,16.0,24.0,0.99376,3.56,0.77,11.1 +10.4,0.44,0.42,1.5,0.145,34.0,48.0,0.99832,3.38,0.86,9.9 +11.6,0.47,0.44,1.6,0.147,36.0,51.0,0.99836,3.38,0.86,9.9 +8.8,0.685,0.26,1.6,0.088,16.0,23.0,0.99694,3.32,0.47,9.4 +7.6,0.665,0.1,1.5,0.066,27.0,55.0,0.99655,3.39,0.51,9.3 +6.7,0.28,0.28,2.4,0.012,36.0,100.0,0.99064,3.26,0.39,11.7 +6.7,0.28,0.28,2.4,0.012,36.0,100.0,0.99064,3.26,0.39,11.7 +10.1,0.31,0.35,1.6,0.075,9.0,28.0,0.99672,3.24,0.83,11.2 +6.0,0.5,0.04,2.2,0.092,13.0,26.0,0.99647,3.46,0.47,10.0 +11.1,0.42,0.47,2.65,0.085,9.0,34.0,0.99736,3.24,0.77,12.1 +6.6,0.66,0.0,3.0,0.115,21.0,31.0,0.99629,3.45,0.63,10.3 +10.6,0.5,0.45,2.6,0.119,34.0,68.0,0.99708,3.23,0.72,10.9 +7.1,0.685,0.35,2.0,0.088,9.0,92.0,0.9963,3.28,0.62,9.4 +9.9,0.25,0.46,1.7,0.062,26.0,42.0,0.9959,3.18,0.83,10.6 +6.4,0.64,0.21,1.8,0.081,14.0,31.0,0.99689,3.59,0.66,9.8 +6.4,0.64,0.21,1.8,0.081,14.0,31.0,0.99689,3.59,0.66,9.8 +7.4,0.68,0.16,1.8,0.078,12.0,39.0,0.9977,3.5,0.7,9.9 +6.4,0.64,0.21,1.8,0.081,14.0,31.0,0.99689,3.59,0.66,9.8 +6.4,0.63,0.21,1.6,0.08,12.0,32.0,0.99689,3.58,0.66,9.8 +9.3,0.43,0.44,1.9,0.085,9.0,22.0,0.99708,3.28,0.55,9.5 +9.3,0.43,0.44,1.9,0.085,9.0,22.0,0.99708,3.28,0.55,9.5 +8.0,0.42,0.32,2.5,0.08,26.0,122.0,0.99801,3.22,1.07,9.7 +9.3,0.36,0.39,1.5,0.08,41.0,55.0,0.99652,3.47,0.73,10.9 +9.3,0.36,0.39,1.5,0.08,41.0,55.0,0.99652,3.47,0.73,10.9 +7.6,0.735,0.02,2.5,0.071,10.0,14.0,0.99538,3.51,0.71,11.7 +9.3,0.36,0.39,1.5,0.08,41.0,55.0,0.99652,3.47,0.73,10.9 +8.2,0.26,0.34,2.5,0.073,16.0,47.0,0.99594,3.4,0.78,11.3 +11.7,0.28,0.47,1.7,0.054,17.0,32.0,0.99686,3.15,0.67,10.6 +6.8,0.56,0.22,1.8,0.074,15.0,24.0,0.99438,3.4,0.82,11.2 +7.2,0.62,0.06,2.7,0.077,15.0,85.0,0.99746,3.51,0.54,9.5 +5.8,1.01,0.66,2.0,0.039,15.0,88.0,0.99357,3.66,0.6,11.5 +7.5,0.42,0.32,2.7,0.067,7.0,25.0,0.99628,3.24,0.44,10.4 +7.2,0.62,0.06,2.5,0.078,17.0,84.0,0.99746,3.51,0.53,9.7 +7.2,0.62,0.06,2.7,0.077,15.0,85.0,0.99746,3.51,0.54,9.5 +7.2,0.635,0.07,2.6,0.077,16.0,86.0,0.99748,3.51,0.54,9.7 +6.8,0.49,0.22,2.3,0.071,13.0,24.0,0.99438,3.41,0.83,11.3 +6.9,0.51,0.23,2.0,0.072,13.0,22.0,0.99438,3.4,0.84,11.2 +6.8,0.56,0.22,1.8,0.074,15.0,24.0,0.99438,3.4,0.82,11.2 +7.6,0.63,0.03,2.0,0.08,27.0,43.0,0.99578,3.44,0.64,10.9 +7.7,0.715,0.01,2.1,0.064,31.0,43.0,0.99371,3.41,0.57,11.8 +6.9,0.56,0.03,1.5,0.086,36.0,46.0,0.99522,3.53,0.57,10.6 +7.3,0.35,0.24,2.0,0.067,28.0,48.0,0.99576,3.43,0.54,10.0 +9.1,0.21,0.37,1.6,0.067,6.0,10.0,0.99552,3.23,0.58,11.1 +10.4,0.38,0.46,2.1,0.104,6.0,10.0,0.99664,3.12,0.65,11.8 +8.8,0.31,0.4,2.8,0.109,7.0,16.0,0.99614,3.31,0.79,11.8 +7.1,0.47,0.0,2.2,0.067,7.0,14.0,0.99517,3.4,0.58,10.9 +7.7,0.715,0.01,2.1,0.064,31.0,43.0,0.99371,3.41,0.57,11.8 +8.8,0.61,0.19,4.0,0.094,30.0,69.0,0.99787,3.22,0.5,10.0 +7.2,0.6,0.04,2.5,0.076,18.0,88.0,0.99745,3.53,0.55,9.5 +9.2,0.56,0.18,1.6,0.078,10.0,21.0,0.99576,3.15,0.49,9.9 +7.6,0.715,0.0,2.1,0.068,30.0,35.0,0.99533,3.48,0.65,11.4 +8.4,0.31,0.29,3.1,0.194,14.0,26.0,0.99536,3.22,0.78,12.0 +7.2,0.6,0.04,2.5,0.076,18.0,88.0,0.99745,3.53,0.55,9.5 +8.8,0.61,0.19,4.0,0.094,30.0,69.0,0.99787,3.22,0.5,10.0 +8.9,0.75,0.14,2.5,0.086,9.0,30.0,0.99824,3.34,0.64,10.5 +9.0,0.8,0.12,2.4,0.083,8.0,28.0,0.99836,3.33,0.65,10.4 +10.7,0.52,0.38,2.6,0.066,29.0,56.0,0.99577,3.15,0.79,12.1 +6.8,0.57,0.0,2.5,0.072,32.0,64.0,0.99491,3.43,0.56,11.2 +10.7,0.9,0.34,6.6,0.112,23.0,99.0,1.00289,3.22,0.68,9.3 +7.2,0.34,0.24,2.0,0.071,30.0,52.0,0.99576,3.44,0.58,10.1 +7.2,0.66,0.03,2.3,0.078,16.0,86.0,0.99743,3.53,0.57,9.7 +10.1,0.45,0.23,1.9,0.082,10.0,18.0,0.99774,3.22,0.65,9.3 +7.2,0.66,0.03,2.3,0.078,16.0,86.0,0.99743,3.53,0.57,9.7 +7.2,0.63,0.03,2.2,0.08,17.0,88.0,0.99745,3.53,0.58,9.8 +7.1,0.59,0.01,2.3,0.08,27.0,43.0,0.9955,3.42,0.58,10.7 +8.3,0.31,0.39,2.4,0.078,17.0,43.0,0.99444,3.31,0.77,12.5 +7.1,0.59,0.01,2.3,0.08,27.0,43.0,0.9955,3.42,0.58,10.7 +8.3,0.31,0.39,2.4,0.078,17.0,43.0,0.99444,3.31,0.77,12.5 +8.3,1.02,0.02,3.4,0.084,6.0,11.0,0.99892,3.48,0.49,11.0 +8.9,0.31,0.36,2.6,0.056,10.0,39.0,0.99562,3.4,0.69,11.8 +7.4,0.635,0.1,2.4,0.08,16.0,33.0,0.99736,3.58,0.69,10.8 +7.4,0.635,0.1,2.4,0.08,16.0,33.0,0.99736,3.58,0.69,10.8 +6.8,0.59,0.06,6.0,0.06,11.0,18.0,0.9962,3.41,0.59,10.8 +6.8,0.59,0.06,6.0,0.06,11.0,18.0,0.9962,3.41,0.59,10.8 +9.2,0.58,0.2,3.0,0.081,15.0,115.0,0.998,3.23,0.59,9.5 +7.2,0.54,0.27,2.6,0.084,12.0,78.0,0.9964,3.39,0.71,11.0 +6.1,0.56,0.0,2.2,0.079,6.0,9.0,0.9948,3.59,0.54,11.5 +7.4,0.52,0.13,2.4,0.078,34.0,61.0,0.99528,3.43,0.59,10.8 +7.3,0.305,0.39,1.2,0.059,7.0,11.0,0.99331,3.29,0.52,11.5 +9.3,0.38,0.48,3.8,0.132,3.0,11.0,0.99577,3.23,0.57,13.2 +9.1,0.28,0.46,9.0,0.114,3.0,9.0,0.99901,3.18,0.6,10.9 +10.0,0.46,0.44,2.9,0.065,4.0,8.0,0.99674,3.33,0.62,12.2 +9.4,0.395,0.46,4.6,0.094,3.0,10.0,0.99639,3.27,0.64,12.2 +7.3,0.305,0.39,1.2,0.059,7.0,11.0,0.99331,3.29,0.52,11.5 +8.6,0.315,0.4,2.2,0.079,3.0,6.0,0.99512,3.27,0.67,11.9 +5.3,0.715,0.19,1.5,0.161,7.0,62.0,0.99395,3.62,0.61,11.0 +6.8,0.41,0.31,8.8,0.084,26.0,45.0,0.99824,3.38,0.64,10.1 +8.4,0.36,0.32,2.2,0.081,32.0,79.0,0.9964,3.3,0.72,11.0 +8.4,0.62,0.12,1.8,0.072,38.0,46.0,0.99504,3.38,0.89,11.8 +9.6,0.41,0.37,2.3,0.091,10.0,23.0,0.99786,3.24,0.56,10.5 +8.4,0.36,0.32,2.2,0.081,32.0,79.0,0.9964,3.3,0.72,11.0 +8.4,0.62,0.12,1.8,0.072,38.0,46.0,0.99504,3.38,0.89,11.8 +6.8,0.41,0.31,8.8,0.084,26.0,45.0,0.99824,3.38,0.64,10.1 +8.6,0.47,0.27,2.3,0.055,14.0,28.0,0.99516,3.18,0.8,11.2 +8.6,0.22,0.36,1.9,0.064,53.0,77.0,0.99604,3.47,0.87,11.0 +9.4,0.24,0.33,2.3,0.061,52.0,73.0,0.99786,3.47,0.9,10.2 +8.4,0.67,0.19,2.2,0.093,11.0,75.0,0.99736,3.2,0.59,9.2 +8.6,0.47,0.27,2.3,0.055,14.0,28.0,0.99516,3.18,0.8,11.2 +8.7,0.33,0.38,3.3,0.063,10.0,19.0,0.99468,3.3,0.73,12.0 +6.6,0.61,0.01,1.9,0.08,8.0,25.0,0.99746,3.69,0.73,10.5 +7.4,0.61,0.01,2.0,0.074,13.0,38.0,0.99748,3.48,0.65,9.8 +7.6,0.4,0.29,1.9,0.078,29.0,66.0,0.9971,3.45,0.59,9.5 +7.4,0.61,0.01,2.0,0.074,13.0,38.0,0.99748,3.48,0.65,9.8 +6.6,0.61,0.01,1.9,0.08,8.0,25.0,0.99746,3.69,0.73,10.5 +8.8,0.3,0.38,2.3,0.06,19.0,72.0,0.99543,3.39,0.72,11.8 +8.8,0.3,0.38,2.3,0.06,19.0,72.0,0.99543,3.39,0.72,11.8 +12.0,0.63,0.5,1.4,0.071,6.0,26.0,0.99791,3.07,0.6,10.4 +7.2,0.38,0.38,2.8,0.068,23.0,42.0,0.99356,3.34,0.72,12.9 +6.2,0.46,0.17,1.6,0.073,7.0,11.0,0.99425,3.61,0.54,11.4 +9.6,0.33,0.52,2.2,0.074,13.0,25.0,0.99509,3.36,0.76,12.4 +9.9,0.27,0.49,5.0,0.082,9.0,17.0,0.99484,3.19,0.52,12.5 +10.1,0.43,0.4,2.6,0.092,13.0,52.0,0.99834,3.22,0.64,10.0 +9.8,0.5,0.34,2.3,0.094,10.0,45.0,0.99864,3.24,0.6,9.7 +8.3,0.3,0.49,3.8,0.09,11.0,24.0,0.99498,3.27,0.64,12.1 +10.2,0.44,0.42,2.0,0.071,7.0,20.0,0.99566,3.14,0.79,11.1 +10.2,0.44,0.58,4.1,0.092,11.0,24.0,0.99745,3.29,0.99,12.0 +8.3,0.28,0.48,2.1,0.093,6.0,12.0,0.99408,3.26,0.62,12.4 +8.9,0.12,0.45,1.8,0.075,10.0,21.0,0.99552,3.41,0.76,11.9 +8.9,0.12,0.45,1.8,0.075,10.0,21.0,0.99552,3.41,0.76,11.9 +8.9,0.12,0.45,1.8,0.075,10.0,21.0,0.99552,3.41,0.76,11.9 +8.3,0.28,0.48,2.1,0.093,6.0,12.0,0.99408,3.26,0.62,12.4 +8.2,0.31,0.4,2.2,0.058,6.0,10.0,0.99536,3.31,0.68,11.2 +10.2,0.34,0.48,2.1,0.052,5.0,9.0,0.99458,3.2,0.69,12.1 +7.6,0.43,0.4,2.7,0.082,6.0,11.0,0.99538,3.44,0.54,12.2 +8.5,0.21,0.52,1.9,0.09,9.0,23.0,0.99648,3.36,0.67,10.4 +9.0,0.36,0.52,2.1,0.111,5.0,10.0,0.99568,3.31,0.62,11.3 +9.5,0.37,0.52,2.0,0.088,12.0,51.0,0.99613,3.29,0.58,11.1 +6.4,0.57,0.12,2.3,0.12,25.0,36.0,0.99519,3.47,0.71,11.3 +8.0,0.59,0.05,2.0,0.089,12.0,32.0,0.99735,3.36,0.61,10.0 +8.5,0.47,0.27,1.9,0.058,18.0,38.0,0.99518,3.16,0.85,11.1 +7.1,0.56,0.14,1.6,0.078,7.0,18.0,0.99592,3.27,0.62,9.3 +6.6,0.57,0.02,2.1,0.115,6.0,16.0,0.99654,3.38,0.69,9.5 +8.8,0.27,0.39,2.0,0.1,20.0,27.0,0.99546,3.15,0.69,11.2 +8.5,0.47,0.27,1.9,0.058,18.0,38.0,0.99518,3.16,0.85,11.1 +8.3,0.34,0.4,2.4,0.065,24.0,48.0,0.99554,3.34,0.86,11.0 +9.0,0.38,0.41,2.4,0.103,6.0,10.0,0.99604,3.13,0.58,11.9 +8.5,0.66,0.2,2.1,0.097,23.0,113.0,0.99733,3.13,0.48,9.2 +9.0,0.4,0.43,2.4,0.068,29.0,46.0,0.9943,3.2,0.6,12.2 +6.7,0.56,0.09,2.9,0.079,7.0,22.0,0.99669,3.46,0.61,10.2 +10.4,0.26,0.48,1.9,0.066,6.0,10.0,0.99724,3.33,0.87,10.9 +10.4,0.26,0.48,1.9,0.066,6.0,10.0,0.99724,3.33,0.87,10.9 +10.1,0.38,0.5,2.4,0.104,6.0,13.0,0.99643,3.22,0.65,11.6 +8.5,0.34,0.44,1.7,0.079,6.0,12.0,0.99605,3.52,0.63,10.7 +8.8,0.33,0.41,5.9,0.073,7.0,13.0,0.99658,3.3,0.62,12.1 +7.2,0.41,0.3,2.1,0.083,35.0,72.0,0.997,3.44,0.52,9.4 +7.2,0.41,0.3,2.1,0.083,35.0,72.0,0.997,3.44,0.52,9.4 +8.4,0.59,0.29,2.6,0.109,31.0,119.0,0.99801,3.15,0.5,9.1 +7.0,0.4,0.32,3.6,0.061,9.0,29.0,0.99416,3.28,0.49,11.3 +12.2,0.45,0.49,1.4,0.075,3.0,6.0,0.9969,3.13,0.63,10.4 +9.1,0.5,0.3,1.9,0.065,8.0,17.0,0.99774,3.32,0.71,10.5 +9.5,0.86,0.26,1.9,0.079,13.0,28.0,0.99712,3.25,0.62,10.0 +7.3,0.52,0.32,2.1,0.07,51.0,70.0,0.99418,3.34,0.82,12.9 +9.1,0.5,0.3,1.9,0.065,8.0,17.0,0.99774,3.32,0.71,10.5 +12.2,0.45,0.49,1.4,0.075,3.0,6.0,0.9969,3.13,0.63,10.4 +7.4,0.58,0.0,2.0,0.064,7.0,11.0,0.99562,3.45,0.58,11.3 +9.8,0.34,0.39,1.4,0.066,3.0,7.0,0.9947,3.19,0.55,11.4 +7.1,0.36,0.3,1.6,0.08,35.0,70.0,0.99693,3.44,0.5,9.4 +7.7,0.39,0.12,1.7,0.097,19.0,27.0,0.99596,3.16,0.49,9.4 +9.7,0.295,0.4,1.5,0.073,14.0,21.0,0.99556,3.14,0.51,10.9 +7.7,0.39,0.12,1.7,0.097,19.0,27.0,0.99596,3.16,0.49,9.4 +7.1,0.34,0.28,2.0,0.082,31.0,68.0,0.99694,3.45,0.48,9.4 +6.5,0.4,0.1,2.0,0.076,30.0,47.0,0.99554,3.36,0.48,9.4 +7.1,0.34,0.28,2.0,0.082,31.0,68.0,0.99694,3.45,0.48,9.4 +10.0,0.35,0.45,2.5,0.092,20.0,88.0,0.99918,3.15,0.43,9.4 +7.7,0.6,0.06,2.0,0.079,19.0,41.0,0.99697,3.39,0.62,10.1 +5.6,0.66,0.0,2.2,0.087,3.0,11.0,0.99378,3.71,0.63,12.8 +5.6,0.66,0.0,2.2,0.087,3.0,11.0,0.99378,3.71,0.63,12.8 +8.9,0.84,0.34,1.4,0.05,4.0,10.0,0.99554,3.12,0.48,9.1 +6.4,0.69,0.0,1.65,0.055,7.0,12.0,0.99162,3.47,0.53,12.9 +7.5,0.43,0.3,2.2,0.062,6.0,12.0,0.99495,3.44,0.72,11.5 +9.9,0.35,0.38,1.5,0.058,31.0,47.0,0.99676,3.26,0.82,10.6 +9.1,0.29,0.33,2.05,0.063,13.0,27.0,0.99516,3.26,0.84,11.7 +6.8,0.36,0.32,1.8,0.067,4.0,8.0,0.9928,3.36,0.55,12.8 +8.2,0.43,0.29,1.6,0.081,27.0,45.0,0.99603,3.25,0.54,10.3 +6.8,0.36,0.32,1.8,0.067,4.0,8.0,0.9928,3.36,0.55,12.8 +9.1,0.29,0.33,2.05,0.063,13.0,27.0,0.99516,3.26,0.84,11.7 +9.1,0.3,0.34,2.0,0.064,12.0,25.0,0.99516,3.26,0.84,11.7 +8.9,0.35,0.4,3.6,0.11,12.0,24.0,0.99549,3.23,0.7,12.0 +9.6,0.5,0.36,2.8,0.116,26.0,55.0,0.99722,3.18,0.68,10.9 +8.9,0.28,0.45,1.7,0.067,7.0,12.0,0.99354,3.25,0.55,12.3 +8.9,0.32,0.31,2.0,0.088,12.0,19.0,0.9957,3.17,0.55,10.4 +7.7,1.005,0.15,2.1,0.102,11.0,32.0,0.99604,3.23,0.48,10.0 +7.5,0.71,0.0,1.6,0.092,22.0,31.0,0.99635,3.38,0.58,10.0 +8.0,0.58,0.16,2.0,0.12,3.0,7.0,0.99454,3.22,0.58,11.2 +10.5,0.39,0.46,2.2,0.075,14.0,27.0,0.99598,3.06,0.84,11.4 +8.9,0.38,0.4,2.2,0.068,12.0,28.0,0.99486,3.27,0.75,12.6 +8.0,0.18,0.37,0.9,0.049,36.0,109.0,0.99007,2.89,0.44,12.7 +8.0,0.18,0.37,0.9,0.049,36.0,109.0,0.99007,2.89,0.44,12.7 +7.0,0.5,0.14,1.8,0.078,10.0,23.0,0.99636,3.53,0.61,10.4 +11.3,0.36,0.66,2.4,0.123,3.0,8.0,0.99642,3.2,0.53,11.9 +11.3,0.36,0.66,2.4,0.123,3.0,8.0,0.99642,3.2,0.53,11.9 +7.0,0.51,0.09,2.1,0.062,4.0,9.0,0.99584,3.35,0.54,10.5 +8.2,0.32,0.42,2.3,0.098,3.0,9.0,0.99506,3.27,0.55,12.3 +7.7,0.58,0.01,1.8,0.088,12.0,18.0,0.99568,3.32,0.56,10.5 +8.6,0.83,0.0,2.8,0.095,17.0,43.0,0.99822,3.33,0.6,10.4 +7.9,0.31,0.32,1.9,0.066,14.0,36.0,0.99364,3.41,0.56,12.6 +6.4,0.795,0.0,2.2,0.065,28.0,52.0,0.99378,3.49,0.52,11.6 +7.2,0.34,0.21,2.5,0.075,41.0,68.0,0.99586,3.37,0.54,10.1 +7.7,0.58,0.01,1.8,0.088,12.0,18.0,0.99568,3.32,0.56,10.5 +7.1,0.59,0.0,2.1,0.091,9.0,14.0,0.99488,3.42,0.55,11.5 +7.3,0.55,0.01,1.8,0.093,9.0,15.0,0.99514,3.35,0.58,11.0 +8.1,0.82,0.0,4.1,0.095,5.0,14.0,0.99854,3.36,0.53,9.6 +7.5,0.57,0.08,2.6,0.089,14.0,27.0,0.99592,3.3,0.59,10.4 +8.9,0.745,0.18,2.5,0.077,15.0,48.0,0.99739,3.2,0.47,9.7 +10.1,0.37,0.34,2.4,0.085,5.0,17.0,0.99683,3.17,0.65,10.6 +7.6,0.31,0.34,2.5,0.082,26.0,35.0,0.99356,3.22,0.59,12.5 +7.3,0.91,0.1,1.8,0.074,20.0,56.0,0.99672,3.35,0.56,9.2 +8.7,0.41,0.41,6.2,0.078,25.0,42.0,0.9953,3.24,0.77,12.6 +8.9,0.5,0.21,2.2,0.088,21.0,39.0,0.99692,3.33,0.83,11.1 +7.4,0.965,0.0,2.2,0.088,16.0,32.0,0.99756,3.58,0.67,10.2 +6.9,0.49,0.19,1.7,0.079,13.0,26.0,0.99547,3.38,0.64,9.8 +8.9,0.5,0.21,2.2,0.088,21.0,39.0,0.99692,3.33,0.83,11.1 +9.5,0.39,0.41,8.9,0.069,18.0,39.0,0.99859,3.29,0.81,10.9 +6.4,0.39,0.33,3.3,0.046,12.0,53.0,0.99294,3.36,0.62,12.2 +6.9,0.44,0.0,1.4,0.07,32.0,38.0,0.99438,3.32,0.58,11.4 +7.6,0.78,0.0,1.7,0.076,33.0,45.0,0.99612,3.31,0.62,10.7 +7.1,0.43,0.17,1.8,0.082,27.0,51.0,0.99634,3.49,0.64,10.4 +9.3,0.49,0.36,1.7,0.081,3.0,14.0,0.99702,3.27,0.78,10.9 +9.3,0.5,0.36,1.8,0.084,6.0,17.0,0.99704,3.27,0.77,10.8 +7.1,0.43,0.17,1.8,0.082,27.0,51.0,0.99634,3.49,0.64,10.4 +8.5,0.46,0.59,1.4,0.414,16.0,45.0,0.99702,3.03,1.34,9.2 +5.6,0.605,0.05,2.4,0.073,19.0,25.0,0.99258,3.56,0.55,12.9 +8.3,0.33,0.42,2.3,0.07,9.0,20.0,0.99426,3.38,0.77,12.7 +8.2,0.64,0.27,2.0,0.095,5.0,77.0,0.99747,3.13,0.62,9.1 +8.2,0.64,0.27,2.0,0.095,5.0,77.0,0.99747,3.13,0.62,9.1 +8.9,0.48,0.53,4.0,0.101,3.0,10.0,0.99586,3.21,0.59,12.1 +7.6,0.42,0.25,3.9,0.104,28.0,90.0,0.99784,3.15,0.57,9.1 +9.9,0.53,0.57,2.4,0.093,30.0,52.0,0.9971,3.19,0.76,11.6 +8.9,0.48,0.53,4.0,0.101,3.0,10.0,0.99586,3.21,0.59,12.1 +11.6,0.23,0.57,1.8,0.074,3.0,8.0,0.9981,3.14,0.7,9.9 +9.1,0.4,0.5,1.8,0.071,7.0,16.0,0.99462,3.21,0.69,12.5 +8.0,0.38,0.44,1.9,0.098,6.0,15.0,0.9956,3.3,0.64,11.4 +10.2,0.29,0.65,2.4,0.075,6.0,17.0,0.99565,3.22,0.63,11.8 +8.2,0.74,0.09,2.0,0.067,5.0,10.0,0.99418,3.28,0.57,11.8 +7.7,0.61,0.18,2.4,0.083,6.0,20.0,0.9963,3.29,0.6,10.2 +6.6,0.52,0.08,2.4,0.07,13.0,26.0,0.99358,3.4,0.72,12.5 +11.1,0.31,0.53,2.2,0.06,3.0,10.0,0.99572,3.02,0.83,10.9 +11.1,0.31,0.53,2.2,0.06,3.0,10.0,0.99572,3.02,0.83,10.9 +8.0,0.62,0.35,2.8,0.086,28.0,52.0,0.997,3.31,0.62,10.8 +9.3,0.33,0.45,1.5,0.057,19.0,37.0,0.99498,3.18,0.89,11.1 +7.5,0.77,0.2,8.1,0.098,30.0,92.0,0.99892,3.2,0.58,9.2 +7.2,0.35,0.26,1.8,0.083,33.0,75.0,0.9968,3.4,0.58,9.5 +8.0,0.62,0.33,2.7,0.088,16.0,37.0,0.9972,3.31,0.58,10.7 +7.5,0.77,0.2,8.1,0.098,30.0,92.0,0.99892,3.2,0.58,9.2 +9.1,0.25,0.34,2.0,0.071,45.0,67.0,0.99769,3.44,0.86,10.2 +9.9,0.32,0.56,2.0,0.073,3.0,8.0,0.99534,3.15,0.73,11.4 +8.6,0.37,0.65,6.4,0.08,3.0,8.0,0.99817,3.27,0.58,11.0 +8.6,0.37,0.65,6.4,0.08,3.0,8.0,0.99817,3.27,0.58,11.0 +7.9,0.3,0.68,8.3,0.05,37.5,278.0,0.99316,3.01,0.51,12.3 +10.3,0.27,0.56,1.4,0.047,3.0,8.0,0.99471,3.16,0.51,11.8 +7.9,0.3,0.68,8.3,0.05,37.5,289.0,0.99316,3.01,0.51,12.3 +7.2,0.38,0.3,1.8,0.073,31.0,70.0,0.99685,3.42,0.59,9.5 +8.7,0.42,0.45,2.4,0.072,32.0,59.0,0.99617,3.33,0.77,12.0 +7.2,0.38,0.3,1.8,0.073,31.0,70.0,0.99685,3.42,0.59,9.5 +6.8,0.48,0.08,1.8,0.074,40.0,64.0,0.99529,3.12,0.49,9.6 +8.5,0.34,0.4,4.7,0.055,3.0,9.0,0.99738,3.38,0.66,11.6 +7.9,0.19,0.42,1.6,0.057,18.0,30.0,0.994,3.29,0.69,11.2 +11.6,0.41,0.54,1.5,0.095,22.0,41.0,0.99735,3.02,0.76,9.9 +11.6,0.41,0.54,1.5,0.095,22.0,41.0,0.99735,3.02,0.76,9.9 +10.0,0.26,0.54,1.9,0.083,42.0,74.0,0.99451,2.98,0.63,11.8 +7.9,0.34,0.42,2.0,0.086,8.0,19.0,0.99546,3.35,0.6,11.4 +7.0,0.54,0.09,2.0,0.081,10.0,16.0,0.99479,3.43,0.59,11.5 +9.2,0.31,0.36,2.2,0.079,11.0,31.0,0.99615,3.33,0.86,12.0 +6.6,0.725,0.09,5.5,0.117,9.0,17.0,0.99655,3.35,0.49,10.8 +9.4,0.4,0.47,2.5,0.087,6.0,20.0,0.99772,3.15,0.5,10.5 +6.6,0.725,0.09,5.5,0.117,9.0,17.0,0.99655,3.35,0.49,10.8 +8.6,0.52,0.38,1.5,0.096,5.0,18.0,0.99666,3.2,0.52,9.4 +8.0,0.31,0.45,2.1,0.216,5.0,16.0,0.99358,3.15,0.81,12.5 +8.6,0.52,0.38,1.5,0.096,5.0,18.0,0.99666,3.2,0.52,9.4 +8.4,0.34,0.42,2.1,0.072,23.0,36.0,0.99392,3.11,0.78,12.4 +7.4,0.49,0.27,2.1,0.071,14.0,25.0,0.99388,3.35,0.63,12.0 +6.1,0.48,0.09,1.7,0.078,18.0,30.0,0.99402,3.45,0.54,11.2 +7.4,0.49,0.27,2.1,0.071,14.0,25.0,0.99388,3.35,0.63,12.0 +8.0,0.48,0.34,2.2,0.073,16.0,25.0,0.9936,3.28,0.66,12.4 +6.3,0.57,0.28,2.1,0.048,13.0,49.0,0.99374,3.41,0.6,12.8 +8.2,0.23,0.42,1.9,0.069,9.0,17.0,0.99376,3.21,0.54,12.3 +9.1,0.3,0.41,2.0,0.068,10.0,24.0,0.99523,3.27,0.85,11.7 +8.1,0.78,0.1,3.3,0.09,4.0,13.0,0.99855,3.36,0.49,9.5 +10.8,0.47,0.43,2.1,0.171,27.0,66.0,0.9982,3.17,0.76,10.8 +8.3,0.53,0.0,1.4,0.07,6.0,14.0,0.99593,3.25,0.64,10.0 +5.4,0.42,0.27,2.0,0.092,23.0,55.0,0.99471,3.78,0.64,12.3 +7.9,0.33,0.41,1.5,0.056,6.0,35.0,0.99396,3.29,0.71,11.0 +8.9,0.24,0.39,1.6,0.074,3.0,10.0,0.99698,3.12,0.59,9.5 +5.0,0.4,0.5,4.3,0.046,29.0,80.0,0.9902,3.49,0.66,13.6 +7.0,0.69,0.07,2.5,0.091,15.0,21.0,0.99572,3.38,0.6,11.3 +7.0,0.69,0.07,2.5,0.091,15.0,21.0,0.99572,3.38,0.6,11.3 +7.0,0.69,0.07,2.5,0.091,15.0,21.0,0.99572,3.38,0.6,11.3 +7.1,0.39,0.12,2.1,0.065,14.0,24.0,0.99252,3.3,0.53,13.3 +5.6,0.66,0.0,2.5,0.066,7.0,15.0,0.99256,3.52,0.58,12.9 +7.9,0.54,0.34,2.5,0.076,8.0,17.0,0.99235,3.2,0.72,13.1 +6.6,0.5,0.0,1.8,0.062,21.0,28.0,0.99352,3.44,0.55,12.3 +6.3,0.47,0.0,1.4,0.055,27.0,33.0,0.9922,3.45,0.48,12.3 +10.7,0.4,0.37,1.9,0.081,17.0,29.0,0.99674,3.12,0.65,11.2 +6.5,0.58,0.0,2.2,0.096,3.0,13.0,0.99557,3.62,0.62,11.5 +8.8,0.24,0.35,1.7,0.055,13.0,27.0,0.99394,3.14,0.59,11.3 +5.8,0.29,0.26,1.7,0.063,3.0,11.0,0.9915,3.39,0.54,13.5 +6.3,0.76,0.0,2.9,0.072,26.0,52.0,0.99379,3.51,0.6,11.5 +10.0,0.43,0.33,2.7,0.095,28.0,89.0,0.9984,3.22,0.68,10.0 +10.5,0.43,0.35,3.3,0.092,24.0,70.0,0.99798,3.21,0.69,10.5 +9.1,0.6,0.0,1.9,0.058,5.0,10.0,0.9977,3.18,0.63,10.4 +5.9,0.19,0.21,1.7,0.045,57.0,135.0,0.99341,3.32,0.44,9.5 +7.4,0.36,0.34,1.8,0.075,18.0,38.0,0.9933,3.38,0.88,13.6 +7.2,0.48,0.07,5.5,0.089,10.0,18.0,0.99684,3.37,0.68,11.2 +8.5,0.28,0.35,1.7,0.061,6.0,15.0,0.99524,3.3,0.74,11.8 +8.0,0.25,0.43,1.7,0.067,22.0,50.0,0.9946,3.38,0.6,11.9 +10.4,0.52,0.45,2.0,0.08,6.0,13.0,0.99774,3.22,0.76,11.4 +10.4,0.52,0.45,2.0,0.08,6.0,13.0,0.99774,3.22,0.76,11.4 +7.5,0.41,0.15,3.7,0.104,29.0,94.0,0.99786,3.14,0.58,9.1 +8.2,0.51,0.24,2.0,0.079,16.0,86.0,0.99764,3.34,0.64,9.5 +7.3,0.4,0.3,1.7,0.08,33.0,79.0,0.9969,3.41,0.65,9.5 +8.2,0.38,0.32,2.5,0.08,24.0,71.0,0.99624,3.27,0.85,11.0 +6.9,0.45,0.11,2.4,0.043,6.0,12.0,0.99354,3.3,0.65,11.4 +7.0,0.22,0.3,1.8,0.065,16.0,20.0,0.99672,3.61,0.82,10.0 +7.3,0.32,0.23,2.3,0.066,35.0,70.0,0.99588,3.43,0.62,10.1 +8.2,0.2,0.43,2.5,0.076,31.0,51.0,0.99672,3.53,0.81,10.4 +7.8,0.5,0.12,1.8,0.178,6.0,21.0,0.996,3.28,0.87,9.8 +10.0,0.41,0.45,6.2,0.071,6.0,14.0,0.99702,3.21,0.49,11.8 +7.8,0.39,0.42,2.0,0.086,9.0,21.0,0.99526,3.39,0.66,11.6 +10.0,0.35,0.47,2.0,0.061,6.0,11.0,0.99585,3.23,0.52,12.0 +8.2,0.33,0.32,2.8,0.067,4.0,12.0,0.99473,3.3,0.76,12.8 +6.1,0.58,0.23,2.5,0.044,16.0,70.0,0.99352,3.46,0.65,12.5 +8.3,0.6,0.25,2.2,0.118,9.0,38.0,0.99616,3.15,0.53,9.8 +9.6,0.42,0.35,2.1,0.083,17.0,38.0,0.99622,3.23,0.66,11.1 +6.6,0.58,0.0,2.2,0.1,50.0,63.0,0.99544,3.59,0.68,11.4 +8.3,0.6,0.25,2.2,0.118,9.0,38.0,0.99616,3.15,0.53,9.8 +8.5,0.18,0.51,1.75,0.071,45.0,88.0,0.99524,3.33,0.76,11.8 +5.1,0.51,0.18,2.1,0.042,16.0,101.0,0.9924,3.46,0.87,12.9 +6.7,0.41,0.43,2.8,0.076,22.0,54.0,0.99572,3.42,1.16,10.6 +10.2,0.41,0.43,2.2,0.11,11.0,37.0,0.99728,3.16,0.67,10.8 +10.6,0.36,0.57,2.3,0.087,6.0,20.0,0.99676,3.14,0.72,11.1 +8.8,0.45,0.43,1.4,0.076,12.0,21.0,0.99551,3.21,0.75,10.2 +8.5,0.32,0.42,2.3,0.075,12.0,19.0,0.99434,3.14,0.71,11.8 +9.0,0.785,0.24,1.7,0.078,10.0,21.0,0.99692,3.29,0.67,10.0 +9.0,0.785,0.24,1.7,0.078,10.0,21.0,0.99692,3.29,0.67,10.0 +8.5,0.44,0.5,1.9,0.369,15.0,38.0,0.99634,3.01,1.1,9.4 +9.9,0.54,0.26,2.0,0.111,7.0,60.0,0.99709,2.94,0.98,10.2 +8.2,0.33,0.39,2.5,0.074,29.0,48.0,0.99528,3.32,0.88,12.4 +6.5,0.34,0.27,2.8,0.067,8.0,44.0,0.99384,3.21,0.56,12.0 +7.6,0.5,0.29,2.3,0.086,5.0,14.0,0.99502,3.32,0.62,11.5 +9.2,0.36,0.34,1.6,0.062,5.0,12.0,0.99667,3.2,0.67,10.5 +7.1,0.59,0.0,2.2,0.078,26.0,44.0,0.99522,3.42,0.68,10.8 +9.7,0.42,0.46,2.1,0.074,5.0,16.0,0.99649,3.27,0.74,12.3 +7.6,0.36,0.31,1.7,0.079,26.0,65.0,0.99716,3.46,0.62,9.5 +7.6,0.36,0.31,1.7,0.079,26.0,65.0,0.99716,3.46,0.62,9.5 +6.5,0.61,0.0,2.2,0.095,48.0,59.0,0.99541,3.61,0.7,11.5 +6.5,0.88,0.03,5.6,0.079,23.0,47.0,0.99572,3.58,0.5,11.2 +7.1,0.66,0.0,2.4,0.052,6.0,11.0,0.99318,3.35,0.66,12.7 +5.6,0.915,0.0,2.1,0.041,17.0,78.0,0.99346,3.68,0.73,11.4 +8.2,0.35,0.33,2.4,0.076,11.0,47.0,0.99599,3.27,0.81,11.0 +8.2,0.35,0.33,2.4,0.076,11.0,47.0,0.99599,3.27,0.81,11.0 +9.8,0.39,0.43,1.65,0.068,5.0,11.0,0.99478,3.19,0.46,11.4 +10.2,0.4,0.4,2.5,0.068,41.0,54.0,0.99754,3.38,0.86,10.5 +6.8,0.66,0.07,1.6,0.07,16.0,61.0,0.99572,3.29,0.6,9.3 +6.7,0.64,0.23,2.1,0.08,11.0,119.0,0.99538,3.36,0.7,10.9 +7.0,0.43,0.3,2.0,0.085,6.0,39.0,0.99346,3.33,0.46,11.9 +6.6,0.8,0.03,7.8,0.079,6.0,12.0,0.9963,3.52,0.5,12.2 +7.0,0.43,0.3,2.0,0.085,6.0,39.0,0.99346,3.33,0.46,11.9 +6.7,0.64,0.23,2.1,0.08,11.0,119.0,0.99538,3.36,0.7,10.9 +8.8,0.955,0.05,1.8,0.075,5.0,19.0,0.99616,3.3,0.44,9.6 +9.1,0.4,0.57,4.6,0.08,6.0,20.0,0.99652,3.28,0.57,12.5 +6.5,0.885,0.0,2.3,0.166,6.0,12.0,0.99551,3.56,0.51,10.8 +7.2,0.25,0.37,2.5,0.063,11.0,41.0,0.99439,3.52,0.8,12.4 +6.4,0.885,0.0,2.3,0.166,6.0,12.0,0.99551,3.56,0.51,10.8 +7.0,0.745,0.12,1.8,0.114,15.0,64.0,0.99588,3.22,0.59,9.5 +6.2,0.43,0.22,1.8,0.078,21.0,56.0,0.99633,3.52,0.6,9.5 +7.9,0.58,0.23,2.3,0.076,23.0,94.0,0.99686,3.21,0.58,9.5 +7.7,0.57,0.21,1.5,0.069,4.0,9.0,0.99458,3.16,0.54,9.8 +7.7,0.26,0.26,2.0,0.052,19.0,77.0,0.9951,3.15,0.79,10.9 +7.9,0.58,0.23,2.3,0.076,23.0,94.0,0.99686,3.21,0.58,9.5 +7.7,0.57,0.21,1.5,0.069,4.0,9.0,0.99458,3.16,0.54,9.8 +7.9,0.34,0.36,1.9,0.065,5.0,10.0,0.99419,3.27,0.54,11.2 +8.6,0.42,0.39,1.8,0.068,6.0,12.0,0.99516,3.35,0.69,11.7 +9.9,0.74,0.19,5.8,0.111,33.0,76.0,0.99878,3.14,0.55,9.4 +7.2,0.36,0.46,2.1,0.074,24.0,44.0,0.99534,3.4,0.85,11.0 +7.2,0.36,0.46,2.1,0.074,24.0,44.0,0.99534,3.4,0.85,11.0 +7.2,0.36,0.46,2.1,0.074,24.0,44.0,0.99534,3.4,0.85,11.0 +9.9,0.72,0.55,1.7,0.136,24.0,52.0,0.99752,3.35,0.94,10.0 +7.2,0.36,0.46,2.1,0.074,24.0,44.0,0.99534,3.4,0.85,11.0 +6.2,0.39,0.43,2.0,0.071,14.0,24.0,0.99428,3.45,0.87,11.2 +6.8,0.65,0.02,2.1,0.078,8.0,15.0,0.99498,3.35,0.62,10.4 +6.6,0.44,0.15,2.1,0.076,22.0,53.0,0.9957,3.32,0.62,9.3 +6.8,0.65,0.02,2.1,0.078,8.0,15.0,0.99498,3.35,0.62,10.4 +9.6,0.38,0.42,1.9,0.071,5.0,13.0,0.99659,3.15,0.75,10.5 +10.2,0.33,0.46,1.9,0.081,6.0,9.0,0.99628,3.1,0.48,10.4 +8.8,0.27,0.46,2.1,0.095,20.0,29.0,0.99488,3.26,0.56,11.3 +7.9,0.57,0.31,2.0,0.079,10.0,79.0,0.99677,3.29,0.69,9.5 +8.2,0.34,0.37,1.9,0.057,43.0,74.0,0.99408,3.23,0.81,12.0 +8.2,0.4,0.31,1.9,0.082,8.0,24.0,0.996,3.24,0.69,10.6 +9.0,0.39,0.4,1.3,0.044,25.0,50.0,0.99478,3.2,0.83,10.9 +10.9,0.32,0.52,1.8,0.132,17.0,44.0,0.99734,3.28,0.77,11.5 +10.9,0.32,0.52,1.8,0.132,17.0,44.0,0.99734,3.28,0.77,11.5 +8.1,0.53,0.22,2.2,0.078,33.0,89.0,0.99678,3.26,0.46,9.6 +10.5,0.36,0.47,2.2,0.074,9.0,23.0,0.99638,3.23,0.76,12.0 +12.6,0.39,0.49,2.5,0.08,8.0,20.0,0.9992,3.07,0.82,10.3 +9.2,0.46,0.23,2.6,0.091,18.0,77.0,0.99922,3.15,0.51,9.4 +7.5,0.58,0.03,4.1,0.08,27.0,46.0,0.99592,3.02,0.47,9.2 +9.0,0.58,0.25,2.0,0.104,8.0,21.0,0.99769,3.27,0.72,9.6 +5.1,0.42,0.0,1.8,0.044,18.0,88.0,0.99157,3.68,0.73,13.6 +7.6,0.43,0.29,2.1,0.075,19.0,66.0,0.99718,3.4,0.64,9.5 +7.7,0.18,0.34,2.7,0.066,15.0,58.0,0.9947,3.37,0.78,11.8 +7.8,0.815,0.01,2.6,0.074,48.0,90.0,0.99621,3.38,0.62,10.8 +7.6,0.43,0.29,2.1,0.075,19.0,66.0,0.99718,3.4,0.64,9.5 +10.2,0.23,0.37,2.2,0.057,14.0,36.0,0.99614,3.23,0.49,9.3 +7.1,0.75,0.01,2.2,0.059,11.0,18.0,0.99242,3.39,0.4,12.8 +6.0,0.33,0.32,12.9,0.054,6.0,113.0,0.99572,3.3,0.56,11.5 +7.8,0.55,0.0,1.7,0.07,7.0,17.0,0.99659,3.26,0.64,9.4 +7.1,0.75,0.01,2.2,0.059,11.0,18.0,0.99242,3.39,0.4,12.8 +8.1,0.73,0.0,2.5,0.081,12.0,24.0,0.99798,3.38,0.46,9.6 +6.5,0.67,0.0,4.3,0.057,11.0,20.0,0.99488,3.45,0.56,11.8 +7.5,0.61,0.2,1.7,0.076,36.0,60.0,0.99494,3.1,0.4,9.3 +9.8,0.37,0.39,2.5,0.079,28.0,65.0,0.99729,3.16,0.59,9.8 +9.0,0.4,0.41,2.0,0.058,15.0,40.0,0.99414,3.22,0.6,12.2 +8.3,0.56,0.22,2.4,0.082,10.0,86.0,0.9983,3.37,0.62,9.5 +5.9,0.29,0.25,13.4,0.067,72.0,160.0,0.99721,3.33,0.54,10.3 +7.4,0.55,0.19,1.8,0.082,15.0,34.0,0.99655,3.49,0.68,10.5 +7.4,0.74,0.07,1.7,0.086,15.0,48.0,0.99502,3.12,0.48,10.0 +7.4,0.55,0.19,1.8,0.082,15.0,34.0,0.99655,3.49,0.68,10.5 +6.9,0.41,0.33,2.2,0.081,22.0,36.0,0.9949,3.41,0.75,11.1 +7.1,0.6,0.01,2.3,0.079,24.0,37.0,0.99514,3.4,0.61,10.9 +7.1,0.6,0.01,2.3,0.079,24.0,37.0,0.99514,3.4,0.61,10.9 +7.5,0.58,0.14,2.2,0.077,27.0,60.0,0.9963,3.28,0.59,9.8 +7.1,0.72,0.0,1.8,0.123,6.0,14.0,0.99627,3.45,0.58,9.8 +7.9,0.66,0.0,1.4,0.096,6.0,13.0,0.99569,3.43,0.58,9.5 +7.8,0.7,0.06,1.9,0.079,20.0,35.0,0.99628,3.4,0.69,10.9 +6.1,0.64,0.02,2.4,0.069,26.0,46.0,0.99358,3.47,0.45,11.0 +7.5,0.59,0.22,1.8,0.082,43.0,60.0,0.99499,3.1,0.42,9.2 +7.0,0.58,0.28,4.8,0.085,12.0,69.0,0.99633,3.32,0.7,11.0 +6.8,0.64,0.0,2.7,0.123,15.0,33.0,0.99538,3.44,0.63,11.3 +6.8,0.64,0.0,2.7,0.123,15.0,33.0,0.99538,3.44,0.63,11.3 +8.6,0.635,0.68,1.8,0.403,19.0,56.0,0.99632,3.02,1.15,9.3 +6.3,1.02,0.0,2.0,0.083,17.0,24.0,0.99437,3.59,0.55,11.2 +9.8,0.45,0.38,2.5,0.081,34.0,66.0,0.99726,3.15,0.58,9.8 +8.2,0.78,0.0,2.2,0.089,13.0,26.0,0.9978,3.37,0.46,9.6 +8.5,0.37,0.32,1.8,0.066,26.0,51.0,0.99456,3.38,0.72,11.8 +7.2,0.57,0.05,2.3,0.081,16.0,36.0,0.99564,3.38,0.6,10.3 +7.2,0.57,0.05,2.3,0.081,16.0,36.0,0.99564,3.38,0.6,10.3 +10.4,0.43,0.5,2.3,0.068,13.0,19.0,0.996,3.1,0.87,11.4 +6.9,0.41,0.31,2.0,0.079,21.0,51.0,0.99668,3.47,0.55,9.5 +5.5,0.49,0.03,1.8,0.044,28.0,87.0,0.9908,3.5,0.82,14.0 +5.0,0.38,0.01,1.6,0.048,26.0,60.0,0.99084,3.7,0.75,14.0 +7.3,0.44,0.2,1.6,0.049,24.0,64.0,0.9935,3.38,0.57,11.7 +5.9,0.46,0.0,1.9,0.077,25.0,44.0,0.99385,3.5,0.53,11.2 +7.5,0.58,0.2,2.0,0.073,34.0,44.0,0.99494,3.1,0.43,9.3 +7.8,0.58,0.13,2.1,0.102,17.0,36.0,0.9944,3.24,0.53,11.2 +8.0,0.715,0.22,2.3,0.075,13.0,81.0,0.99688,3.24,0.54,9.5 +8.5,0.4,0.4,6.3,0.05,3.0,10.0,0.99566,3.28,0.56,12.0 +7.0,0.69,0.0,1.9,0.114,3.0,10.0,0.99636,3.35,0.6,9.7 +8.0,0.715,0.22,2.3,0.075,13.0,81.0,0.99688,3.24,0.54,9.5 +9.8,0.3,0.39,1.7,0.062,3.0,9.0,0.9948,3.14,0.57,11.5 +7.1,0.46,0.2,1.9,0.077,28.0,54.0,0.9956,3.37,0.64,10.4 +7.1,0.46,0.2,1.9,0.077,28.0,54.0,0.9956,3.37,0.64,10.4 +7.9,0.765,0.0,2.0,0.084,9.0,22.0,0.99619,3.33,0.68,10.9 +8.7,0.63,0.28,2.7,0.096,17.0,69.0,0.99734,3.26,0.63,10.2 +7.0,0.42,0.19,2.3,0.071,18.0,36.0,0.99476,3.39,0.56,10.9 +11.3,0.37,0.5,1.8,0.09,20.0,47.0,0.99734,3.15,0.57,10.5 +7.1,0.16,0.44,2.5,0.068,17.0,31.0,0.99328,3.35,0.54,12.4 +8.0,0.6,0.08,2.6,0.056,3.0,7.0,0.99286,3.22,0.37,13.0 +7.0,0.6,0.3,4.5,0.068,20.0,110.0,0.99914,3.3,1.17,10.2 +7.0,0.6,0.3,4.5,0.068,20.0,110.0,0.99914,3.3,1.17,10.2 +7.6,0.74,0.0,1.9,0.1,6.0,12.0,0.99521,3.36,0.59,11.0 +8.2,0.635,0.1,2.1,0.073,25.0,60.0,0.99638,3.29,0.75,10.9 +5.9,0.395,0.13,2.4,0.056,14.0,28.0,0.99362,3.62,0.67,12.4 +7.5,0.755,0.0,1.9,0.084,6.0,12.0,0.99672,3.34,0.49,9.7 +8.2,0.635,0.1,2.1,0.073,25.0,60.0,0.99638,3.29,0.75,10.9 +6.6,0.63,0.0,4.3,0.093,51.0,77.5,0.99558,3.2,0.45,9.5 +6.6,0.63,0.0,4.3,0.093,51.0,77.5,0.99558,3.2,0.45,9.5 +7.2,0.53,0.14,2.1,0.064,15.0,29.0,0.99323,3.35,0.61,12.1 +5.7,0.6,0.0,1.4,0.063,11.0,18.0,0.99191,3.45,0.56,12.2 +7.6,1.58,0.0,2.1,0.137,5.0,9.0,0.99476,3.5,0.4,10.9 +5.2,0.645,0.0,2.15,0.08,15.0,28.0,0.99444,3.78,0.61,12.5 +6.7,0.86,0.07,2.0,0.1,20.0,57.0,0.99598,3.6,0.74,11.7 +9.1,0.37,0.32,2.1,0.064,4.0,15.0,0.99576,3.3,0.8,11.2 +8.0,0.28,0.44,1.8,0.081,28.0,68.0,0.99501,3.36,0.66,11.2 +7.6,0.79,0.21,2.3,0.087,21.0,68.0,0.9955,3.12,0.44,9.2 +7.5,0.61,0.26,1.9,0.073,24.0,88.0,0.99612,3.3,0.53,9.8 +9.7,0.69,0.32,2.5,0.088,22.0,91.0,0.9979,3.29,0.62,10.1 +6.8,0.68,0.09,3.9,0.068,15.0,29.0,0.99524,3.41,0.52,11.1 +9.7,0.69,0.32,2.5,0.088,22.0,91.0,0.9979,3.29,0.62,10.1 +7.0,0.62,0.1,1.4,0.071,27.0,63.0,0.996,3.28,0.61,9.2 +7.5,0.61,0.26,1.9,0.073,24.0,88.0,0.99612,3.3,0.53,9.8 +6.5,0.51,0.15,3.0,0.064,12.0,27.0,0.9929,3.33,0.59,12.8 +8.0,1.18,0.21,1.9,0.083,14.0,41.0,0.99532,3.34,0.47,10.5 +7.0,0.36,0.21,2.3,0.086,20.0,65.0,0.99558,3.4,0.54,10.1 +7.0,0.36,0.21,2.4,0.086,24.0,69.0,0.99556,3.4,0.53,10.1 +7.5,0.63,0.27,2.0,0.083,17.0,91.0,0.99616,3.26,0.58,9.8 +5.4,0.74,0.0,1.2,0.041,16.0,46.0,0.99258,4.01,0.59,12.5 +9.9,0.44,0.46,2.2,0.091,10.0,41.0,0.99638,3.18,0.69,11.9 +7.5,0.63,0.27,2.0,0.083,17.0,91.0,0.99616,3.26,0.58,9.8 +9.1,0.76,0.68,1.7,0.414,18.0,64.0,0.99652,2.9,1.33,9.1 +9.7,0.66,0.34,2.6,0.094,12.0,88.0,0.99796,3.26,0.66,10.1 +5.0,0.74,0.0,1.2,0.041,16.0,46.0,0.99258,4.01,0.59,12.5 +9.1,0.34,0.42,1.8,0.058,9.0,18.0,0.99392,3.18,0.55,11.4 +9.1,0.36,0.39,1.8,0.06,21.0,55.0,0.99495,3.18,0.82,11.0 +6.7,0.46,0.24,1.7,0.077,18.0,34.0,0.9948,3.39,0.6,10.6 +6.7,0.46,0.24,1.7,0.077,18.0,34.0,0.9948,3.39,0.6,10.6 +6.7,0.46,0.24,1.7,0.077,18.0,34.0,0.9948,3.39,0.6,10.6 +6.7,0.46,0.24,1.7,0.077,18.0,34.0,0.9948,3.39,0.6,10.6 +6.5,0.52,0.11,1.8,0.073,13.0,38.0,0.9955,3.34,0.52,9.3 +7.4,0.6,0.26,2.1,0.083,17.0,91.0,0.99616,3.29,0.56,9.8 +7.4,0.6,0.26,2.1,0.083,17.0,91.0,0.99616,3.29,0.56,9.8 +7.8,0.87,0.26,3.8,0.107,31.0,67.0,0.99668,3.26,0.46,9.2 +8.4,0.39,0.1,1.7,0.075,6.0,25.0,0.99581,3.09,0.43,9.7 +9.1,0.775,0.22,2.2,0.079,12.0,48.0,0.9976,3.18,0.51,9.6 +7.2,0.835,0.0,2.0,0.166,4.0,11.0,0.99608,3.39,0.52,10.0 +6.6,0.58,0.02,2.4,0.069,19.0,40.0,0.99387,3.38,0.66,12.6 +6.0,0.5,0.0,1.4,0.057,15.0,26.0,0.99448,3.36,0.45,9.5 +6.0,0.5,0.0,1.4,0.057,15.0,26.0,0.99448,3.36,0.45,9.5 +6.0,0.5,0.0,1.4,0.057,15.0,26.0,0.99448,3.36,0.45,9.5 +7.5,0.51,0.02,1.7,0.084,13.0,31.0,0.99538,3.36,0.54,10.5 +7.5,0.51,0.02,1.7,0.084,13.0,31.0,0.99538,3.36,0.54,10.5 +7.5,0.51,0.02,1.7,0.084,13.0,31.0,0.99538,3.36,0.54,10.5 +7.6,0.54,0.02,1.7,0.085,17.0,31.0,0.99589,3.37,0.51,10.4 +7.5,0.51,0.02,1.7,0.084,13.0,31.0,0.99538,3.36,0.54,10.5 +11.5,0.42,0.48,2.6,0.077,8.0,20.0,0.99852,3.09,0.53,11.0 +8.2,0.44,0.24,2.3,0.063,10.0,28.0,0.99613,3.25,0.53,10.2 +6.1,0.59,0.01,2.1,0.056,5.0,13.0,0.99472,3.52,0.56,11.4 +7.2,0.655,0.03,1.8,0.078,7.0,12.0,0.99587,3.34,0.39,9.5 +7.2,0.655,0.03,1.8,0.078,7.0,12.0,0.99587,3.34,0.39,9.5 +6.9,0.57,0.0,2.8,0.081,21.0,41.0,0.99518,3.41,0.52,10.8 +9.0,0.6,0.29,2.0,0.069,32.0,73.0,0.99654,3.34,0.57,10.0 +7.2,0.62,0.01,2.3,0.065,8.0,46.0,0.99332,3.32,0.51,11.8 +7.6,0.645,0.03,1.9,0.086,14.0,57.0,0.9969,3.37,0.46,10.3 +7.6,0.645,0.03,1.9,0.086,14.0,57.0,0.9969,3.37,0.46,10.3 +7.2,0.58,0.03,2.3,0.077,7.0,28.0,0.99568,3.35,0.52,10.0 +6.1,0.32,0.25,1.8,0.086,5.0,32.0,0.99464,3.36,0.44,10.1 +6.1,0.34,0.25,1.8,0.084,4.0,28.0,0.99464,3.36,0.44,10.1 +7.3,0.43,0.24,2.5,0.078,27.0,67.0,0.99648,3.6,0.59,11.1 +7.4,0.64,0.17,5.4,0.168,52.0,98.0,0.99736,3.28,0.5,9.5 +11.6,0.475,0.4,1.4,0.091,6.0,28.0,0.99704,3.07,0.65,10.03333333 +9.2,0.54,0.31,2.3,0.112,11.0,38.0,0.99699,3.24,0.56,10.9 +8.3,0.85,0.14,2.5,0.093,13.0,54.0,0.99724,3.36,0.54,10.1 +11.6,0.475,0.4,1.4,0.091,6.0,28.0,0.99704,3.07,0.65,10.03333333 +8.0,0.83,0.27,2.0,0.08,11.0,63.0,0.99652,3.29,0.48,9.8 +7.2,0.605,0.02,1.9,0.096,10.0,31.0,0.995,3.46,0.53,11.8 +7.8,0.5,0.09,2.2,0.115,10.0,42.0,0.9971,3.18,0.62,9.5 +7.3,0.74,0.08,1.7,0.094,10.0,45.0,0.99576,3.24,0.5,9.8 +6.9,0.54,0.3,2.2,0.088,9.0,105.0,0.99725,3.25,1.18,10.5 +8.0,0.77,0.32,2.1,0.079,16.0,74.0,0.99656,3.27,0.5,9.8 +6.6,0.61,0.0,1.6,0.069,4.0,8.0,0.99396,3.33,0.37,10.4 +8.7,0.78,0.51,1.7,0.415,12.0,66.0,0.99623,3.0,1.17,9.2 +7.5,0.58,0.56,3.1,0.153,5.0,14.0,0.99476,3.21,1.03,11.6 +8.7,0.78,0.51,1.7,0.415,12.0,66.0,0.99623,3.0,1.17,9.2 +7.7,0.75,0.27,3.8,0.11,34.0,89.0,0.99664,3.24,0.45,9.3 +6.8,0.815,0.0,1.2,0.267,16.0,29.0,0.99471,3.32,0.51,9.8 +7.2,0.56,0.26,2.0,0.083,13.0,100.0,0.99586,3.26,0.52,9.9 +8.2,0.885,0.2,1.4,0.086,7.0,31.0,0.9946,3.11,0.46,10.0 +5.2,0.49,0.26,2.3,0.09,23.0,74.0,0.9953,3.71,0.62,12.2 +7.2,0.45,0.15,2.0,0.078,10.0,28.0,0.99609,3.29,0.51,9.9 +7.5,0.57,0.02,2.6,0.077,11.0,35.0,0.99557,3.36,0.62,10.8 +7.5,0.57,0.02,2.6,0.077,11.0,35.0,0.99557,3.36,0.62,10.8 +6.8,0.83,0.09,1.8,0.074,4.0,25.0,0.99534,3.38,0.45,9.6 +8.0,0.6,0.22,2.1,0.08,25.0,105.0,0.99613,3.3,0.49,9.9 +8.0,0.6,0.22,2.1,0.08,25.0,105.0,0.99613,3.3,0.49,9.9 +7.1,0.755,0.15,1.8,0.107,20.0,84.0,0.99593,3.19,0.5,9.5 +8.0,0.81,0.25,3.4,0.076,34.0,85.0,0.99668,3.19,0.42,9.2 +7.4,0.64,0.07,1.8,0.1,8.0,23.0,0.9961,3.3,0.58,9.6 +7.4,0.64,0.07,1.8,0.1,8.0,23.0,0.9961,3.3,0.58,9.6 +6.6,0.64,0.31,6.1,0.083,7.0,49.0,0.99718,3.35,0.68,10.3 +6.7,0.48,0.02,2.2,0.08,36.0,111.0,0.99524,3.1,0.53,9.7 +6.0,0.49,0.0,2.3,0.068,15.0,33.0,0.99292,3.58,0.59,12.5 +8.0,0.64,0.22,2.4,0.094,5.0,33.0,0.99612,3.37,0.58,11.0 +7.1,0.62,0.06,1.3,0.07,5.0,12.0,0.9942,3.17,0.48,9.8 +8.0,0.52,0.25,2.0,0.078,19.0,59.0,0.99612,3.3,0.48,10.2 +6.4,0.57,0.14,3.9,0.07,27.0,73.0,0.99669,3.32,0.48,9.2 +8.6,0.685,0.1,1.6,0.092,3.0,12.0,0.99745,3.31,0.65,9.55 +8.7,0.675,0.1,1.6,0.09,4.0,11.0,0.99745,3.31,0.65,9.55 +7.3,0.59,0.26,2.0,0.08,17.0,104.0,0.99584,3.28,0.52,9.9 +7.0,0.6,0.12,2.2,0.083,13.0,28.0,0.9966,3.52,0.62,10.2 +7.2,0.67,0.0,2.2,0.068,10.0,24.0,0.9956,3.42,0.72,11.1 +7.9,0.69,0.21,2.1,0.08,33.0,141.0,0.9962,3.25,0.51,9.9 +7.9,0.69,0.21,2.1,0.08,33.0,141.0,0.9962,3.25,0.51,9.9 +7.6,0.3,0.42,2.0,0.052,6.0,24.0,0.9963,3.44,0.82,11.9 +7.2,0.33,0.33,1.7,0.061,3.0,13.0,0.996,3.23,1.1,10.0 +8.0,0.5,0.39,2.6,0.082,12.0,46.0,0.9985,3.43,0.62,10.7 +7.7,0.28,0.3,2.0,0.062,18.0,34.0,0.9952,3.28,0.9,11.3 +8.2,0.24,0.34,5.1,0.062,8.0,22.0,0.9974,3.22,0.94,10.9 +6.0,0.51,0.0,2.1,0.064,40.0,54.0,0.995,3.54,0.93,10.7 +8.1,0.29,0.36,2.2,0.048,35.0,53.0,0.995,3.27,1.01,12.4 +6.0,0.51,0.0,2.1,0.064,40.0,54.0,0.995,3.54,0.93,10.7 +6.6,0.96,0.0,1.8,0.082,5.0,16.0,0.9936,3.5,0.44,11.9 +6.4,0.47,0.4,2.4,0.071,8.0,19.0,0.9963,3.56,0.73,10.6 +8.2,0.24,0.34,5.1,0.062,8.0,22.0,0.9974,3.22,0.94,10.9 +9.9,0.57,0.25,2.0,0.104,12.0,89.0,0.9963,3.04,0.9,10.1 +10.0,0.32,0.59,2.2,0.077,3.0,15.0,0.9994,3.2,0.78,9.6 +6.2,0.58,0.0,1.6,0.065,8.0,18.0,0.9966,3.56,0.84,9.4 +10.0,0.32,0.59,2.2,0.077,3.0,15.0,0.9994,3.2,0.78,9.6 +7.3,0.34,0.33,2.5,0.064,21.0,37.0,0.9952,3.35,0.77,12.1 +7.8,0.53,0.01,1.6,0.077,3.0,19.0,0.995,3.16,0.46,9.8 +7.7,0.64,0.21,2.2,0.077,32.0,133.0,0.9956,3.27,0.45,9.9 +7.8,0.53,0.01,1.6,0.077,3.0,19.0,0.995,3.16,0.46,9.8 +7.5,0.4,0.18,1.6,0.079,24.0,58.0,0.9965,3.34,0.58,9.4 +7.0,0.54,0.0,2.1,0.079,39.0,55.0,0.9956,3.39,0.84,11.4 +6.4,0.53,0.09,3.9,0.123,14.0,31.0,0.9968,3.5,0.67,11.0 +8.3,0.26,0.37,1.4,0.076,8.0,23.0,0.9974,3.26,0.7,9.6 +8.3,0.26,0.37,1.4,0.076,8.0,23.0,0.9974,3.26,0.7,9.6 +7.7,0.23,0.37,1.8,0.046,23.0,60.0,0.9971,3.41,0.71,12.1 +7.6,0.41,0.33,2.5,0.078,6.0,23.0,0.9957,3.3,0.58,11.2 +7.8,0.64,0.0,1.9,0.072,27.0,55.0,0.9962,3.31,0.63,11.0 +7.9,0.18,0.4,2.2,0.049,38.0,67.0,0.996,3.33,0.93,11.3 +7.4,0.41,0.24,1.8,0.066,18.0,47.0,0.9956,3.37,0.62,10.4 +7.6,0.43,0.31,2.1,0.069,13.0,74.0,0.9958,3.26,0.54,9.9 +5.9,0.44,0.0,1.6,0.042,3.0,11.0,0.9944,3.48,0.85,11.7 +6.1,0.4,0.16,1.8,0.069,11.0,25.0,0.9955,3.42,0.74,10.1 +10.2,0.54,0.37,15.4,0.214,55.0,95.0,1.00369,3.18,0.77,9.0 +10.2,0.54,0.37,15.4,0.214,55.0,95.0,1.00369,3.18,0.77,9.0 +10.0,0.38,0.38,1.6,0.169,27.0,90.0,0.99914,3.15,0.65,8.5 +6.8,0.915,0.29,4.8,0.07,15.0,39.0,0.99577,3.53,0.54,11.1 +7.0,0.59,0.0,1.7,0.052,3.0,8.0,0.996,3.41,0.47,10.3 +7.3,0.67,0.02,2.2,0.072,31.0,92.0,0.99566,3.32,0.68,11.06666667 +7.2,0.37,0.32,2.0,0.062,15.0,28.0,0.9947,3.23,0.73,11.3 +7.4,0.785,0.19,5.2,0.094,19.0,98.0,0.99713,3.16,0.52,9.566666667 +6.9,0.63,0.02,1.9,0.078,18.0,30.0,0.99712,3.4,0.75,9.8 +6.9,0.58,0.2,1.75,0.058,8.0,22.0,0.99322,3.38,0.49,11.7 +7.3,0.67,0.02,2.2,0.072,31.0,92.0,0.99566,3.32,0.68,11.1 +7.4,0.785,0.19,5.2,0.094,19.0,98.0,0.99713,3.16,0.52,9.6 +6.9,0.63,0.02,1.9,0.078,18.0,30.0,0.99712,3.4,0.75,9.8 +6.8,0.67,0.0,1.9,0.08,22.0,39.0,0.99701,3.4,0.74,9.7 +6.9,0.58,0.01,1.9,0.08,40.0,54.0,0.99683,3.4,0.73,9.7 +7.2,0.38,0.31,2.0,0.056,15.0,29.0,0.99472,3.23,0.76,11.3 +7.2,0.37,0.32,2.0,0.062,15.0,28.0,0.9947,3.23,0.73,11.3 +7.8,0.32,0.44,2.7,0.104,8.0,17.0,0.99732,3.33,0.78,11.0 +6.6,0.58,0.02,2.0,0.062,37.0,53.0,0.99374,3.35,0.76,11.6 +7.6,0.49,0.33,1.9,0.074,27.0,85.0,0.99706,3.41,0.58,9.0 +11.7,0.45,0.63,2.2,0.073,7.0,23.0,0.99974,3.21,0.69,10.9 +6.5,0.9,0.0,1.6,0.052,9.0,17.0,0.99467,3.5,0.63,10.9 +6.0,0.54,0.06,1.8,0.05,38.0,89.0,0.99236,3.3,0.5,10.55 +7.6,0.49,0.33,1.9,0.074,27.0,85.0,0.99706,3.41,0.58,9.0 +8.4,0.29,0.4,1.7,0.067,8.0,20.0,0.99603,3.39,0.6,10.5 +7.9,0.2,0.35,1.7,0.054,7.0,15.0,0.99458,3.32,0.8,11.9 +6.4,0.42,0.09,2.3,0.054,34.0,64.0,0.99724,3.41,0.68,10.4 +6.2,0.785,0.0,2.1,0.06,6.0,13.0,0.99664,3.59,0.61,10.0 +6.8,0.64,0.03,2.3,0.075,14.0,31.0,0.99545,3.36,0.58,10.4 +6.9,0.63,0.01,2.4,0.076,14.0,39.0,0.99522,3.34,0.53,10.8 +6.8,0.59,0.1,1.7,0.063,34.0,53.0,0.9958,3.41,0.67,9.7 +6.8,0.59,0.1,1.7,0.063,34.0,53.0,0.9958,3.41,0.67,9.7 +7.3,0.48,0.32,2.1,0.062,31.0,54.0,0.99728,3.3,0.65,10.0 +6.7,1.04,0.08,2.3,0.067,19.0,32.0,0.99648,3.52,0.57,11.0 +7.3,0.48,0.32,2.1,0.062,31.0,54.0,0.99728,3.3,0.65,10.0 +7.3,0.98,0.05,2.1,0.061,20.0,49.0,0.99705,3.31,0.55,9.7 +10.0,0.69,0.11,1.4,0.084,8.0,24.0,0.99578,2.88,0.47,9.7 +6.7,0.7,0.08,3.75,0.067,8.0,16.0,0.99334,3.43,0.52,12.6 +7.6,0.35,0.6,2.6,0.073,23.0,44.0,0.99656,3.38,0.79,11.1 +6.1,0.6,0.08,1.8,0.071,14.0,45.0,0.99336,3.38,0.54,11.0 +9.9,0.5,0.5,13.8,0.205,48.0,82.0,1.00242,3.16,0.75,8.8 +5.3,0.47,0.11,2.2,0.048,16.0,89.0,0.99182,3.54,0.88,13.56666667 +9.9,0.5,0.5,13.8,0.205,48.0,82.0,1.00242,3.16,0.75,8.8 +5.3,0.47,0.11,2.2,0.048,16.0,89.0,0.99182,3.54,0.88,13.6 +7.1,0.875,0.05,5.7,0.082,3.0,14.0,0.99808,3.4,0.52,10.2 +8.2,0.28,0.6,3.0,0.104,10.0,22.0,0.99828,3.39,0.68,10.6 +5.6,0.62,0.03,1.5,0.08,6.0,13.0,0.99498,3.66,0.62,10.1 +8.2,0.28,0.6,3.0,0.104,10.0,22.0,0.99828,3.39,0.68,10.6 +7.2,0.58,0.54,2.1,0.114,3.0,9.0,0.99719,3.33,0.57,10.3 +8.1,0.33,0.44,1.5,0.042,6.0,12.0,0.99542,3.35,0.61,10.7 +6.8,0.91,0.06,2.0,0.06,4.0,11.0,0.99592,3.53,0.64,10.9 +7.0,0.655,0.16,2.1,0.074,8.0,25.0,0.99606,3.37,0.55,9.7 +6.8,0.68,0.21,2.1,0.07,9.0,23.0,0.99546,3.38,0.6,10.3 +6.0,0.64,0.05,1.9,0.066,9.0,17.0,0.99496,3.52,0.78,10.6 +5.6,0.54,0.04,1.7,0.049,5.0,13.0,0.9942,3.72,0.58,11.4 +6.2,0.57,0.1,2.1,0.048,4.0,11.0,0.99448,3.44,0.76,10.8 +7.1,0.22,0.49,1.8,0.039,8.0,18.0,0.99344,3.39,0.56,12.4 +5.6,0.54,0.04,1.7,0.049,5.0,13.0,0.9942,3.72,0.58,11.4 +6.2,0.65,0.06,1.6,0.05,6.0,18.0,0.99348,3.57,0.54,11.95 +7.7,0.54,0.26,1.9,0.089,23.0,147.0,0.99636,3.26,0.59,9.7 +6.4,0.31,0.09,1.4,0.066,15.0,28.0,0.99459,3.42,0.7,10.0 +7.0,0.43,0.02,1.9,0.08,15.0,28.0,0.99492,3.35,0.81,10.6 +7.7,0.54,0.26,1.9,0.089,23.0,147.0,0.99636,3.26,0.59,9.7 +6.9,0.74,0.03,2.3,0.054,7.0,16.0,0.99508,3.45,0.63,11.5 +6.6,0.895,0.04,2.3,0.068,7.0,13.0,0.99582,3.53,0.58,10.8 +6.9,0.74,0.03,2.3,0.054,7.0,16.0,0.99508,3.45,0.63,11.5 +7.5,0.725,0.04,1.5,0.076,8.0,15.0,0.99508,3.26,0.53,9.6 +7.8,0.82,0.29,4.3,0.083,21.0,64.0,0.99642,3.16,0.53,9.4 +7.3,0.585,0.18,2.4,0.078,15.0,60.0,0.99638,3.31,0.54,9.8 +6.2,0.44,0.39,2.5,0.077,6.0,14.0,0.99555,3.51,0.69,11.0 +7.5,0.38,0.57,2.3,0.106,5.0,12.0,0.99605,3.36,0.55,11.4 +6.7,0.76,0.02,1.8,0.078,6.0,12.0,0.996,3.55,0.63,9.95 +6.8,0.81,0.05,2.0,0.07,6.0,14.0,0.99562,3.51,0.66,10.8 +7.5,0.38,0.57,2.3,0.106,5.0,12.0,0.99605,3.36,0.55,11.4 +7.1,0.27,0.6,2.1,0.074,17.0,25.0,0.99814,3.38,0.72,10.6 +7.9,0.18,0.4,1.8,0.062,7.0,20.0,0.9941,3.28,0.7,11.1 +6.4,0.36,0.21,2.2,0.047,26.0,48.0,0.99661,3.47,0.77,9.7 +7.1,0.69,0.04,2.1,0.068,19.0,27.0,0.99712,3.44,0.67,9.8 +6.4,0.79,0.04,2.2,0.061,11.0,17.0,0.99588,3.53,0.65,10.4 +6.4,0.56,0.15,1.8,0.078,17.0,65.0,0.99294,3.33,0.6,10.5 +6.9,0.84,0.21,4.1,0.074,16.0,65.0,0.99842,3.53,0.72,9.233333333 +6.9,0.84,0.21,4.1,0.074,16.0,65.0,0.99842,3.53,0.72,9.25 +6.1,0.32,0.25,2.3,0.071,23.0,58.0,0.99633,3.42,0.97,10.6 +6.5,0.53,0.06,2.0,0.063,29.0,44.0,0.99489,3.38,0.83,10.3 +7.4,0.47,0.46,2.2,0.114,7.0,20.0,0.99647,3.32,0.63,10.5 +6.6,0.7,0.08,2.6,0.106,14.0,27.0,0.99665,3.44,0.58,10.2 +6.5,0.53,0.06,2.0,0.063,29.0,44.0,0.99489,3.38,0.83,10.3 +6.9,0.48,0.2,1.9,0.082,9.0,23.0,0.99585,3.39,0.43,9.05 +6.1,0.32,0.25,2.3,0.071,23.0,58.0,0.99633,3.42,0.97,10.6 +6.8,0.48,0.25,2.0,0.076,29.0,61.0,0.9953,3.34,0.6,10.4 +6.0,0.42,0.19,2.0,0.075,22.0,47.0,0.99522,3.39,0.78,10.0 +6.7,0.48,0.08,2.1,0.064,18.0,34.0,0.99552,3.33,0.64,9.7 +6.8,0.47,0.08,2.2,0.064,18.0,38.0,0.99553,3.3,0.65,9.6 +7.1,0.53,0.07,1.7,0.071,15.0,24.0,0.9951,3.29,0.66,10.8 +7.9,0.29,0.49,2.2,0.096,21.0,59.0,0.99714,3.31,0.67,10.1 +7.1,0.69,0.08,2.1,0.063,42.0,52.0,0.99608,3.42,0.6,10.2 +6.6,0.44,0.09,2.2,0.063,9.0,18.0,0.99444,3.42,0.69,11.3 +6.1,0.705,0.1,2.8,0.081,13.0,28.0,0.99631,3.6,0.66,10.2 +7.2,0.53,0.13,2.0,0.058,18.0,22.0,0.99573,3.21,0.68,9.9 +8.0,0.39,0.3,1.9,0.074,32.0,84.0,0.99717,3.39,0.61,9.0 +6.6,0.56,0.14,2.4,0.064,13.0,29.0,0.99397,3.42,0.62,11.7 +7.0,0.55,0.13,2.2,0.075,15.0,35.0,0.9959,3.36,0.59,9.7 +6.1,0.53,0.08,1.9,0.077,24.0,45.0,0.99528,3.6,0.68,10.3 +5.4,0.58,0.08,1.9,0.059,20.0,31.0,0.99484,3.5,0.64,10.2 +6.2,0.64,0.09,2.5,0.081,15.0,26.0,0.99538,3.57,0.63,12.0 +7.2,0.39,0.32,1.8,0.065,34.0,60.0,0.99714,3.46,0.78,9.9 +6.2,0.52,0.08,4.4,0.071,11.0,32.0,0.99646,3.56,0.63,11.6 +7.4,0.25,0.29,2.2,0.054,19.0,49.0,0.99666,3.4,0.76,10.9 +6.7,0.855,0.02,1.9,0.064,29.0,38.0,0.99472,3.3,0.56,10.75 +11.1,0.44,0.42,2.2,0.064,14.0,19.0,0.99758,3.25,0.57,10.4 +8.4,0.37,0.43,2.3,0.063,12.0,19.0,0.9955,3.17,0.81,11.2 +6.5,0.63,0.33,1.8,0.059,16.0,28.0,0.99531,3.36,0.64,10.1 +7.0,0.57,0.02,2.0,0.072,17.0,26.0,0.99575,3.36,0.61,10.2 +6.3,0.6,0.1,1.6,0.048,12.0,26.0,0.99306,3.55,0.51,12.1 +11.2,0.4,0.5,2.0,0.099,19.0,50.0,0.99783,3.1,0.58,10.4 +7.4,0.36,0.3,1.8,0.074,17.0,24.0,0.99419,3.24,0.7,11.4 +7.1,0.68,0.0,2.3,0.087,17.0,26.0,0.99783,3.45,0.53,9.5 +7.1,0.67,0.0,2.3,0.083,18.0,27.0,0.99768,3.44,0.54,9.4 +6.3,0.68,0.01,3.7,0.103,32.0,54.0,0.99586,3.51,0.66,11.3 +7.3,0.735,0.0,2.2,0.08,18.0,28.0,0.99765,3.41,0.6,9.4 +6.6,0.855,0.02,2.4,0.062,15.0,23.0,0.99627,3.54,0.6,11.0 +7.0,0.56,0.17,1.7,0.065,15.0,24.0,0.99514,3.44,0.68,10.55 +6.6,0.88,0.04,2.2,0.066,12.0,20.0,0.99636,3.53,0.56,9.9 +6.6,0.855,0.02,2.4,0.062,15.0,23.0,0.99627,3.54,0.6,11.0 +6.9,0.63,0.33,6.7,0.235,66.0,115.0,0.99787,3.22,0.56,9.5 +7.8,0.6,0.26,2.0,0.08,31.0,131.0,0.99622,3.21,0.52,9.9 +7.8,0.6,0.26,2.0,0.08,31.0,131.0,0.99622,3.21,0.52,9.9 +7.8,0.6,0.26,2.0,0.08,31.0,131.0,0.99622,3.21,0.52,9.9 +7.2,0.695,0.13,2.0,0.076,12.0,20.0,0.99546,3.29,0.54,10.1 +7.2,0.695,0.13,2.0,0.076,12.0,20.0,0.99546,3.29,0.54,10.1 +7.2,0.695,0.13,2.0,0.076,12.0,20.0,0.99546,3.29,0.54,10.1 +6.7,0.67,0.02,1.9,0.061,26.0,42.0,0.99489,3.39,0.82,10.9 +6.7,0.16,0.64,2.1,0.059,24.0,52.0,0.99494,3.34,0.71,11.2 +7.2,0.695,0.13,2.0,0.076,12.0,20.0,0.99546,3.29,0.54,10.1 +7.0,0.56,0.13,1.6,0.077,25.0,42.0,0.99629,3.34,0.59,9.2 +6.2,0.51,0.14,1.9,0.056,15.0,34.0,0.99396,3.48,0.57,11.5 +6.4,0.36,0.53,2.2,0.23,19.0,35.0,0.9934,3.37,0.93,12.4 +6.4,0.38,0.14,2.2,0.038,15.0,25.0,0.99514,3.44,0.65,11.1 +7.3,0.69,0.32,2.2,0.069,35.0,104.0,0.99632,3.33,0.51,9.5 +6.0,0.58,0.2,2.4,0.075,15.0,50.0,0.99467,3.58,0.67,12.5 +5.6,0.31,0.78,13.9,0.074,23.0,92.0,0.99677,3.39,0.48,10.5 +7.5,0.52,0.4,2.2,0.06,12.0,20.0,0.99474,3.26,0.64,11.8 +8.0,0.3,0.63,1.6,0.081,16.0,29.0,0.99588,3.3,0.78,10.8 +6.2,0.7,0.15,5.1,0.076,13.0,27.0,0.99622,3.54,0.6,11.9 +6.8,0.67,0.15,1.8,0.118,13.0,20.0,0.9954,3.42,0.67,11.3 +6.2,0.56,0.09,1.7,0.053,24.0,32.0,0.99402,3.54,0.6,11.3 +7.4,0.35,0.33,2.4,0.068,9.0,26.0,0.9947,3.36,0.6,11.9 +6.2,0.56,0.09,1.7,0.053,24.0,32.0,0.99402,3.54,0.6,11.3 +6.1,0.715,0.1,2.6,0.053,13.0,27.0,0.99362,3.57,0.5,11.9 +6.2,0.46,0.29,2.1,0.074,32.0,98.0,0.99578,3.33,0.62,9.8 +6.7,0.32,0.44,2.4,0.061,24.0,34.0,0.99484,3.29,0.8,11.6 +7.2,0.39,0.44,2.6,0.066,22.0,48.0,0.99494,3.3,0.84,11.5 +7.5,0.31,0.41,2.4,0.065,34.0,60.0,0.99492,3.34,0.85,11.4 +5.8,0.61,0.11,1.8,0.066,18.0,28.0,0.99483,3.55,0.66,10.9 +7.2,0.66,0.33,2.5,0.068,34.0,102.0,0.99414,3.27,0.78,12.8 +6.6,0.725,0.2,7.8,0.073,29.0,79.0,0.9977,3.29,0.54,9.2 +6.3,0.55,0.15,1.8,0.077,26.0,35.0,0.99314,3.32,0.82,11.6 +5.4,0.74,0.09,1.7,0.089,16.0,26.0,0.99402,3.67,0.56,11.6 +6.3,0.51,0.13,2.3,0.076,29.0,40.0,0.99574,3.42,0.75,11.0 +6.8,0.62,0.08,1.9,0.068,28.0,38.0,0.99651,3.42,0.82,9.5 +6.2,0.6,0.08,2.0,0.09,32.0,44.0,0.9949,3.45,0.58,10.5 +5.9,0.55,0.1,2.2,0.062,39.0,51.0,0.99512,3.52,0.76,11.2 +6.3,0.51,0.13,2.3,0.076,29.0,40.0,0.99574,3.42,0.75,11.0 +5.9,0.645,0.12,2.0,0.075,32.0,44.0,0.99547,3.57,0.71,10.2 +6.0,0.31,0.47,3.6,0.067,18.0,42.0,0.99549,3.39,0.66,11.0 +7.0,0.27,0.36,20.7,0.045,45.0,170.0,1.001,3.0,0.45,8.8 +6.3,0.3,0.34,1.6,0.049,14.0,132.0,0.994,3.3,0.49,9.5 +8.1,0.28,0.4,6.9,0.05,30.0,97.0,0.9951,3.26,0.44,10.1 +7.2,0.23,0.32,8.5,0.058,47.0,186.0,0.9956,3.19,0.4,9.9 +7.2,0.23,0.32,8.5,0.058,47.0,186.0,0.9956,3.19,0.4,9.9 +8.1,0.28,0.4,6.9,0.05,30.0,97.0,0.9951,3.26,0.44,10.1 +6.2,0.32,0.16,7.0,0.045,30.0,136.0,0.9949,3.18,0.47,9.6 +7.0,0.27,0.36,20.7,0.045,45.0,170.0,1.001,3.0,0.45,8.8 +6.3,0.3,0.34,1.6,0.049,14.0,132.0,0.994,3.3,0.49,9.5 +8.1,0.22,0.43,1.5,0.044,28.0,129.0,0.9938,3.22,0.45,11.0 +8.1,0.27,0.41,1.45,0.033,11.0,63.0,0.9908,2.99,0.56,12.0 +8.6,0.23,0.4,4.2,0.035,17.0,109.0,0.9947,3.14,0.53,9.7 +7.9,0.18,0.37,1.2,0.04,16.0,75.0,0.992,3.18,0.63,10.8 +6.6,0.16,0.4,1.5,0.044,48.0,143.0,0.9912,3.54,0.52,12.4 +8.3,0.42,0.62,19.25,0.04,41.0,172.0,1.0002,2.98,0.67,9.7 +6.6,0.17,0.38,1.5,0.032,28.0,112.0,0.9914,3.25,0.55,11.4 +6.3,0.48,0.04,1.1,0.046,30.0,99.0,0.9928,3.24,0.36,9.6 +6.2,0.66,0.48,1.2,0.029,29.0,75.0,0.9892,3.33,0.39,12.8 +7.4,0.34,0.42,1.1,0.033,17.0,171.0,0.9917,3.12,0.53,11.3 +6.5,0.31,0.14,7.5,0.044,34.0,133.0,0.9955,3.22,0.5,9.5 +6.2,0.66,0.48,1.2,0.029,29.0,75.0,0.9892,3.33,0.39,12.8 +6.4,0.31,0.38,2.9,0.038,19.0,102.0,0.9912,3.17,0.35,11.0 +6.8,0.26,0.42,1.7,0.049,41.0,122.0,0.993,3.47,0.48,10.5 +7.6,0.67,0.14,1.5,0.074,25.0,168.0,0.9937,3.05,0.51,9.3 +6.6,0.27,0.41,1.3,0.052,16.0,142.0,0.9951,3.42,0.47,10.0 +7.0,0.25,0.32,9.0,0.046,56.0,245.0,0.9955,3.25,0.5,10.4 +6.9,0.24,0.35,1.0,0.052,35.0,146.0,0.993,3.45,0.44,10.0 +7.0,0.28,0.39,8.7,0.051,32.0,141.0,0.9961,3.38,0.53,10.5 +7.4,0.27,0.48,1.1,0.047,17.0,132.0,0.9914,3.19,0.49,11.6 +7.2,0.32,0.36,2.0,0.033,37.0,114.0,0.9906,3.1,0.71,12.3 +8.5,0.24,0.39,10.4,0.044,20.0,142.0,0.9974,3.2,0.53,10.0 +8.3,0.14,0.34,1.1,0.042,7.0,47.0,0.9934,3.47,0.4,10.2 +7.4,0.25,0.36,2.05,0.05,31.0,100.0,0.992,3.19,0.44,10.8 +6.2,0.12,0.34,1.5,0.045,43.0,117.0,0.9939,3.42,0.51,9.0 +5.8,0.27,0.2,14.95,0.044,22.0,179.0,0.9962,3.37,0.37,10.2 +7.3,0.28,0.43,1.7,0.08,21.0,123.0,0.9905,3.19,0.42,12.8 +6.5,0.39,0.23,5.4,0.051,25.0,149.0,0.9934,3.24,0.35,10.0 +7.0,0.33,0.32,1.2,0.053,38.0,138.0,0.9906,3.13,0.28,11.2 +7.3,0.24,0.39,17.95,0.057,45.0,149.0,0.9999,3.21,0.36,8.6 +7.3,0.24,0.39,17.95,0.057,45.0,149.0,0.9999,3.21,0.36,8.6 +6.7,0.23,0.39,2.5,0.172,63.0,158.0,0.9937,3.11,0.36,9.4 +6.7,0.24,0.39,2.9,0.173,63.0,157.0,0.9937,3.1,0.34,9.4 +7.0,0.31,0.26,7.4,0.069,28.0,160.0,0.9954,3.13,0.46,9.8 +6.6,0.24,0.27,1.4,0.057,33.0,152.0,0.9934,3.22,0.56,9.5 +6.7,0.23,0.26,1.4,0.06,33.0,154.0,0.9934,3.24,0.56,9.5 +7.4,0.18,0.31,1.4,0.058,38.0,167.0,0.9931,3.16,0.53,10.0 +6.2,0.45,0.26,4.4,0.063,63.0,206.0,0.994,3.27,0.52,9.8 +6.2,0.46,0.25,4.4,0.066,62.0,207.0,0.9939,3.25,0.52,9.8 +7.0,0.31,0.26,7.4,0.069,28.0,160.0,0.9954,3.13,0.46,9.8 +6.9,0.19,0.35,5.0,0.067,32.0,150.0,0.995,3.36,0.48,9.8 +7.2,0.19,0.31,1.6,0.062,31.0,173.0,0.9917,3.35,0.44,11.7 +6.6,0.25,0.29,1.1,0.068,39.0,124.0,0.9914,3.34,0.58,11.0 +6.2,0.16,0.33,1.1,0.057,21.0,82.0,0.991,3.32,0.46,10.9 +6.4,0.18,0.35,1.0,0.045,39.0,108.0,0.9911,3.31,0.35,10.9 +6.8,0.2,0.59,0.9,0.147,38.0,132.0,0.993,3.05,0.38,9.1 +6.9,0.25,0.35,1.3,0.039,29.0,191.0,0.9908,3.13,0.52,11.0 +7.2,0.21,0.34,11.9,0.043,37.0,213.0,0.9962,3.09,0.5,9.6 +6.0,0.19,0.26,12.4,0.048,50.0,147.0,0.9972,3.3,0.36,8.9 +6.6,0.38,0.15,4.6,0.044,25.0,78.0,0.9931,3.11,0.38,10.2 +7.4,0.2,0.36,1.2,0.038,44.0,111.0,0.9926,3.36,0.34,9.9 +6.8,0.22,0.24,4.9,0.092,30.0,123.0,0.9951,3.03,0.46,8.6 +6.0,0.19,0.26,12.4,0.048,50.0,147.0,0.9972,3.3,0.36,8.9 +7.0,0.47,0.07,1.1,0.035,17.0,151.0,0.991,3.02,0.34,10.5 +6.6,0.38,0.15,4.6,0.044,25.0,78.0,0.9931,3.11,0.38,10.2 +7.2,0.24,0.27,1.4,0.038,31.0,122.0,0.9927,3.15,0.46,10.3 +6.2,0.35,0.03,1.2,0.064,29.0,120.0,0.9934,3.22,0.54,9.1 +6.4,0.26,0.24,6.4,0.04,27.0,124.0,0.9903,3.22,0.49,12.6 +6.7,0.25,0.13,1.2,0.041,81.0,174.0,0.992,3.14,0.42,9.8 +6.7,0.23,0.31,2.1,0.046,30.0,96.0,0.9926,3.33,0.64,10.7 +7.4,0.24,0.29,10.1,0.05,21.0,105.0,0.9962,3.13,0.35,9.5 +6.2,0.27,0.43,7.8,0.056,48.0,244.0,0.9956,3.1,0.51,9.0 +6.8,0.3,0.23,4.6,0.061,50.5,238.5,0.9958,3.32,0.6,9.5 +6.0,0.27,0.28,4.8,0.063,31.0,201.0,0.9964,3.69,0.71,10.0 +8.6,0.23,0.46,1.0,0.054,9.0,72.0,0.9941,2.95,0.49,9.1 +6.7,0.23,0.31,2.1,0.046,30.0,96.0,0.9926,3.33,0.64,10.7 +7.4,0.24,0.29,10.1,0.05,21.0,105.0,0.9962,3.13,0.35,9.5 +7.1,0.18,0.36,1.4,0.043,31.0,87.0,0.9898,3.26,0.37,12.7 +7.0,0.32,0.34,1.3,0.042,20.0,69.0,0.9912,3.31,0.65,12.0 +7.4,0.18,0.3,8.8,0.064,26.0,103.0,0.9961,2.94,0.56,9.3 +6.7,0.54,0.28,5.4,0.06,21.0,105.0,0.9949,3.27,0.37,9.0 +6.8,0.22,0.31,1.4,0.053,34.0,114.0,0.9929,3.39,0.77,10.6 +7.1,0.2,0.34,16.0,0.05,51.0,166.0,0.9985,3.21,0.6,9.2 +7.1,0.34,0.2,6.1,0.063,47.0,164.0,0.9946,3.17,0.42,10.0 +7.3,0.22,0.3,8.2,0.047,42.0,207.0,0.9966,3.33,0.46,9.5 +7.1,0.43,0.61,11.8,0.045,54.0,155.0,0.9974,3.11,0.45,8.7 +7.1,0.44,0.62,11.8,0.044,52.0,152.0,0.9975,3.12,0.46,8.7 +7.2,0.39,0.63,11.0,0.044,55.0,156.0,0.9974,3.09,0.44,8.7 +6.8,0.25,0.31,13.3,0.05,69.0,202.0,0.9972,3.22,0.48,9.7 +7.1,0.43,0.61,11.8,0.045,54.0,155.0,0.9974,3.11,0.45,8.7 +7.1,0.44,0.62,11.8,0.044,52.0,152.0,0.9975,3.12,0.46,8.7 +7.2,0.39,0.63,11.0,0.044,55.0,156.0,0.9974,3.09,0.44,8.7 +6.1,0.27,0.43,7.5,0.049,65.0,243.0,0.9957,3.12,0.47,9.0 +6.9,0.24,0.33,1.7,0.035,47.0,136.0,0.99,3.26,0.4,12.6 +6.9,0.21,0.33,1.8,0.034,48.0,136.0,0.9899,3.25,0.41,12.6 +7.5,0.17,0.32,1.7,0.04,51.0,148.0,0.9916,3.21,0.44,11.5 +7.1,0.26,0.29,12.4,0.044,62.0,240.0,0.9969,3.04,0.42,9.2 +6.0,0.34,0.66,15.9,0.046,26.0,164.0,0.9979,3.14,0.5,8.8 +8.6,0.265,0.36,1.2,0.034,15.0,80.0,0.9913,2.95,0.36,11.4 +9.8,0.36,0.46,10.5,0.038,4.0,83.0,0.9956,2.89,0.3,10.1 +6.0,0.34,0.66,15.9,0.046,26.0,164.0,0.9979,3.14,0.5,8.8 +7.4,0.25,0.37,13.5,0.06,52.0,192.0,0.9975,3.0,0.44,9.1 +7.1,0.12,0.32,9.6,0.054,64.0,162.0,0.9962,3.4,0.41,9.4 +6.0,0.21,0.24,12.1,0.05,55.0,164.0,0.997,3.34,0.39,9.4 +7.5,0.305,0.4,18.9,0.059,44.0,170.0,1.0,2.99,0.46,9.0 +7.4,0.25,0.37,13.5,0.06,52.0,192.0,0.9975,3.0,0.44,9.1 +7.3,0.13,0.32,14.4,0.051,34.0,109.0,0.9974,3.2,0.35,9.2 +7.1,0.12,0.32,9.6,0.054,64.0,162.0,0.9962,3.4,0.41,9.4 +7.1,0.23,0.35,16.5,0.04,60.0,171.0,0.999,3.16,0.59,9.1 +7.1,0.23,0.35,16.5,0.04,60.0,171.0,0.999,3.16,0.59,9.1 +6.9,0.33,0.28,1.3,0.051,37.0,187.0,0.9927,3.27,0.6,10.3 +6.5,0.17,0.54,8.5,0.082,64.0,163.0,0.9959,2.89,0.39,8.8 +7.2,0.27,0.46,18.75,0.052,45.0,255.0,1.0,3.04,0.52,8.9 +7.2,0.31,0.5,13.3,0.056,68.0,195.0,0.9982,3.01,0.47,9.2 +6.7,0.41,0.34,9.2,0.049,29.0,150.0,0.9968,3.22,0.51,9.1 +6.7,0.41,0.34,9.2,0.049,29.0,150.0,0.9968,3.22,0.51,9.1 +5.5,0.485,0.0,1.5,0.065,8.0,103.0,0.994,3.63,0.4,9.7 +6.0,0.31,0.24,3.3,0.041,25.0,143.0,0.9914,3.31,0.44,11.3 +7.0,0.14,0.4,1.7,0.035,16.0,85.0,0.9911,3.19,0.42,11.8 +7.2,0.31,0.5,13.3,0.056,68.0,195.0,0.9982,3.01,0.47,9.2 +7.3,0.32,0.48,13.3,0.06,57.0,196.0,0.9982,3.04,0.5,9.2 +5.9,0.36,0.04,5.7,0.046,21.0,87.0,0.9934,3.22,0.51,10.2 +7.8,0.24,0.32,12.2,0.054,42.0,138.0,0.9984,3.01,0.54,8.8 +7.4,0.16,0.31,6.85,0.059,31.0,131.0,0.9952,3.29,0.34,9.7 +6.9,0.19,0.28,5.0,0.058,14.0,146.0,0.9952,3.29,0.36,9.1 +6.4,0.13,0.47,1.6,0.092,40.0,158.0,0.9928,3.21,0.36,9.8 +6.7,0.19,0.36,1.1,0.026,63.0,143.0,0.9912,3.27,0.48,11.0 +7.4,0.39,0.23,7.0,0.033,29.0,126.0,0.994,3.14,0.42,10.5 +6.5,0.24,0.32,7.6,0.038,48.0,203.0,0.9958,3.45,0.54,9.7 +6.1,0.3,0.56,2.8,0.044,47.0,179.0,0.9924,3.3,0.57,10.9 +6.1,0.3,0.56,2.7,0.046,46.0,184.0,0.9924,3.31,0.57,10.9 +5.7,0.26,0.25,10.4,0.02,7.0,57.0,0.994,3.39,0.37,10.6 +6.5,0.24,0.32,7.6,0.038,48.0,203.0,0.9958,3.45,0.54,9.7 +6.5,0.425,0.4,13.1,0.038,59.0,241.0,0.9979,3.23,0.57,9.0 +6.6,0.24,0.27,15.8,0.035,46.0,188.0,0.9982,3.24,0.51,9.2 +6.8,0.27,0.22,8.1,0.034,55.0,203.0,0.9961,3.19,0.52,8.9 +6.7,0.27,0.31,15.7,0.036,44.0,179.0,0.9979,3.26,0.56,9.6 +8.2,0.23,0.4,1.2,0.027,36.0,121.0,0.992,3.12,0.38,10.7 +7.1,0.37,0.67,10.5,0.045,49.0,155.0,0.9975,3.16,0.44,8.7 +6.8,0.19,0.36,1.9,0.035,30.0,96.0,0.9917,3.15,0.54,10.8 +8.1,0.28,0.39,1.9,0.029,18.0,79.0,0.9923,3.23,0.52,11.8 +6.3,0.31,0.34,2.2,0.045,20.0,77.0,0.9927,3.3,0.43,10.2 +7.1,0.37,0.67,10.5,0.045,49.0,155.0,0.9975,3.16,0.44,8.7 +7.9,0.21,0.4,1.2,0.039,38.0,107.0,0.992,3.21,0.54,10.8 +8.5,0.21,0.41,4.3,0.036,24.0,99.0,0.9947,3.18,0.53,9.7 +8.1,0.2,0.4,2.0,0.037,19.0,87.0,0.9921,3.12,0.54,11.2 +6.3,0.255,0.37,1.1,0.04,37.0,114.0,0.9905,3.0,0.39,10.9 +5.6,0.16,0.27,1.4,0.044,53.0,168.0,0.9918,3.28,0.37,10.1 +6.4,0.595,0.14,5.2,0.058,15.0,97.0,0.9951,3.38,0.36,9.0 +6.3,0.34,0.33,4.6,0.034,19.0,80.0,0.9917,3.38,0.58,12.0 +6.9,0.25,0.3,4.1,0.054,23.0,116.0,0.994,2.99,0.38,9.4 +7.9,0.22,0.38,8.0,0.043,46.0,152.0,0.9934,3.12,0.32,11.5 +7.6,0.18,0.46,10.2,0.055,58.0,135.0,0.9968,3.14,0.43,9.9 +6.9,0.25,0.3,4.1,0.054,23.0,116.0,0.994,2.99,0.38,9.4 +7.2,0.18,0.41,1.2,0.048,41.0,97.0,0.9919,3.14,0.45,10.4 +8.2,0.23,0.4,7.5,0.049,12.0,76.0,0.9966,3.06,0.84,9.7 +7.4,0.24,0.42,14.0,0.066,48.0,198.0,0.9979,2.89,0.42,8.9 +7.4,0.24,0.42,14.0,0.066,48.0,198.0,0.9979,2.89,0.42,8.9 +6.1,0.32,0.24,1.5,0.036,38.0,124.0,0.9898,3.29,0.42,12.4 +5.2,0.44,0.04,1.4,0.036,43.0,119.0,0.9894,3.36,0.33,12.1 +5.2,0.44,0.04,1.4,0.036,43.0,119.0,0.9894,3.36,0.33,12.1 +6.1,0.32,0.24,1.5,0.036,38.0,124.0,0.9898,3.29,0.42,12.4 +6.4,0.22,0.56,14.5,0.055,27.0,159.0,0.998,2.98,0.4,9.1 +6.3,0.36,0.3,4.8,0.049,14.0,85.0,0.9932,3.28,0.39,10.6 +7.4,0.24,0.42,14.0,0.066,48.0,198.0,0.9979,2.89,0.42,8.9 +6.7,0.24,0.35,13.1,0.05,64.0,205.0,0.997,3.15,0.5,9.5 +7.0,0.23,0.36,13.0,0.051,72.0,177.0,0.9972,3.16,0.49,9.8 +8.4,0.27,0.46,8.7,0.048,39.0,197.0,0.9974,3.14,0.59,9.6 +6.7,0.46,0.18,2.4,0.034,25.0,98.0,0.9896,3.08,0.44,12.6 +7.5,0.29,0.31,8.95,0.055,20.0,151.0,0.9968,3.08,0.54,9.3 +9.8,0.42,0.48,9.85,0.034,5.0,110.0,0.9958,2.87,0.29,10.0 +7.1,0.3,0.46,1.5,0.066,29.0,133.0,0.9906,3.12,0.54,12.7 +7.9,0.19,0.45,1.5,0.045,17.0,96.0,0.9917,3.13,0.39,11.0 +7.6,0.48,0.37,0.8,0.037,4.0,100.0,0.9902,3.03,0.39,11.4 +6.3,0.22,0.43,4.55,0.038,31.0,130.0,0.9918,3.35,0.33,11.5 +7.5,0.27,0.31,17.7,0.051,33.0,173.0,0.999,3.09,0.64,10.2 +6.9,0.23,0.4,7.5,0.04,50.0,151.0,0.9927,3.11,0.27,11.4 +7.2,0.32,0.47,5.1,0.044,19.0,65.0,0.991,3.03,0.41,12.6 +5.9,0.23,0.3,12.9,0.054,57.0,170.0,0.9972,3.28,0.39,9.4 +6.0,0.67,0.07,1.2,0.06,9.0,108.0,0.9931,3.11,0.35,8.7 +6.4,0.25,0.32,5.5,0.049,41.0,176.0,0.995,3.19,0.68,9.2 +6.4,0.33,0.31,5.5,0.048,42.0,173.0,0.9951,3.19,0.66,9.3 +7.1,0.34,0.15,1.2,0.053,61.0,183.0,0.9936,3.09,0.43,9.2 +6.8,0.28,0.4,22.0,0.048,48.0,167.0,1.001,2.93,0.5,8.7 +6.9,0.27,0.4,14.0,0.05,64.0,227.0,0.9979,3.18,0.58,9.6 +6.8,0.26,0.56,11.9,0.043,64.0,226.0,0.997,3.02,0.63,9.3 +6.8,0.29,0.56,11.9,0.043,66.0,230.0,0.9972,3.02,0.63,9.3 +6.7,0.24,0.41,9.4,0.04,49.0,166.0,0.9954,3.12,0.61,9.9 +5.9,0.3,0.23,4.2,0.038,42.0,119.0,0.9924,3.15,0.5,11.0 +6.8,0.53,0.35,3.8,0.034,26.0,109.0,0.9906,3.26,0.57,12.7 +6.5,0.28,0.28,8.5,0.047,54.0,210.0,0.9962,3.09,0.54,8.9 +6.6,0.28,0.28,8.5,0.052,55.0,211.0,0.9962,3.09,0.55,8.9 +6.8,0.28,0.4,22.0,0.048,48.0,167.0,1.001,2.93,0.5,8.7 +6.8,0.28,0.36,8.0,0.045,28.0,123.0,0.9928,3.02,0.37,11.4 +6.6,0.15,0.34,5.1,0.055,34.0,125.0,0.9942,3.36,0.42,9.6 +6.4,0.29,0.44,3.6,0.2,75.0,181.0,0.9942,3.02,0.41,9.1 +6.4,0.3,0.45,3.5,0.197,76.0,180.0,0.9942,3.02,0.39,9.1 +6.4,0.29,0.44,3.6,0.197,75.0,183.0,0.9942,3.01,0.38,9.1 +6.8,0.26,0.24,7.8,0.052,54.0,214.0,0.9961,3.13,0.47,8.9 +7.1,0.32,0.24,13.1,0.05,52.0,204.0,0.998,3.1,0.49,8.8 +6.8,0.26,0.24,7.8,0.052,54.0,214.0,0.9961,3.13,0.47,8.9 +6.8,0.27,0.26,16.1,0.049,55.0,196.0,0.9984,3.15,0.5,9.3 +7.1,0.32,0.24,13.1,0.05,52.0,204.0,0.998,3.1,0.49,8.8 +6.9,0.54,0.32,13.2,0.05,53.0,236.0,0.9973,3.2,0.5,9.6 +6.8,0.26,0.34,13.9,0.034,39.0,134.0,0.9949,3.33,0.53,12.0 +5.8,0.28,0.35,2.3,0.053,36.0,114.0,0.9924,3.28,0.5,10.2 +6.4,0.21,0.5,11.6,0.042,45.0,153.0,0.9972,3.15,0.43,8.8 +7.0,0.16,0.32,8.3,0.045,38.0,126.0,0.9958,3.21,0.34,9.2 +10.2,0.44,0.88,6.2,0.049,20.0,124.0,0.9968,2.99,0.51,9.9 +6.8,0.57,0.29,2.2,0.04,15.0,77.0,0.9938,3.32,0.74,10.2 +6.1,0.4,0.31,0.9,0.048,23.0,170.0,0.993,3.22,0.77,9.5 +5.6,0.245,0.25,9.7,0.032,12.0,68.0,0.994,3.31,0.34,10.5 +6.8,0.18,0.38,1.4,0.038,35.0,111.0,0.9918,3.32,0.59,11.2 +7.0,0.16,0.32,8.3,0.045,38.0,126.0,0.9958,3.21,0.34,9.2 +6.7,0.13,0.29,5.3,0.051,31.0,122.0,0.9944,3.44,0.37,9.7 +6.2,0.25,0.25,1.4,0.03,35.0,105.0,0.9912,3.3,0.44,11.1 +5.8,0.26,0.24,9.2,0.044,55.0,152.0,0.9961,3.31,0.38,9.4 +7.5,0.27,0.36,7.0,0.036,45.0,164.0,0.9939,3.03,0.33,11.0 +5.8,0.26,0.24,9.2,0.044,55.0,152.0,0.9961,3.31,0.38,9.4 +5.7,0.28,0.24,17.5,0.044,60.0,167.0,0.9989,3.31,0.44,9.4 +7.5,0.23,0.36,7.0,0.036,43.0,161.0,0.9938,3.04,0.32,11.0 +7.5,0.27,0.36,7.0,0.036,45.0,164.0,0.9939,3.03,0.33,11.0 +7.2,0.685,0.21,9.5,0.07,33.0,172.0,0.9971,3.0,0.55,9.1 +6.2,0.25,0.25,1.4,0.03,35.0,105.0,0.9912,3.3,0.44,11.1 +6.5,0.19,0.3,0.8,0.043,33.0,144.0,0.9936,3.42,0.39,9.1 +6.3,0.495,0.22,1.8,0.046,31.0,140.0,0.9929,3.39,0.54,10.4 +7.1,0.24,0.41,17.8,0.046,39.0,145.0,0.9998,3.32,0.39,8.7 +6.4,0.17,0.32,2.4,0.048,41.0,200.0,0.9938,3.5,0.5,9.7 +7.1,0.25,0.32,10.3,0.041,66.0,272.0,0.9969,3.17,0.52,9.1 +6.4,0.17,0.32,2.4,0.048,41.0,200.0,0.9938,3.5,0.5,9.7 +7.1,0.24,0.41,17.8,0.046,39.0,145.0,0.9998,3.32,0.39,8.7 +6.8,0.64,0.08,9.7,0.062,26.0,142.0,0.9972,3.37,0.46,8.9 +8.3,0.28,0.4,7.8,0.041,38.0,194.0,0.9976,3.34,0.51,9.6 +8.2,0.27,0.39,7.8,0.039,49.0,208.0,0.9976,3.31,0.51,9.5 +7.2,0.23,0.38,14.3,0.058,55.0,194.0,0.9979,3.09,0.44,9.0 +7.2,0.23,0.38,14.3,0.058,55.0,194.0,0.9979,3.09,0.44,9.0 +7.2,0.23,0.38,14.3,0.058,55.0,194.0,0.9979,3.09,0.44,9.0 +7.2,0.23,0.38,14.3,0.058,55.0,194.0,0.9979,3.09,0.44,9.0 +6.8,0.52,0.32,13.2,0.044,54.0,221.0,0.9972,3.27,0.5,9.6 +7.0,0.26,0.59,1.4,0.037,40.0,120.0,0.9918,3.34,0.41,11.1 +6.2,0.25,0.21,15.55,0.039,28.0,159.0,0.9982,3.48,0.64,9.6 +7.3,0.32,0.23,13.7,0.05,49.0,197.0,0.9985,3.2,0.46,8.7 +7.7,0.31,0.26,7.8,0.031,23.0,90.0,0.9944,3.13,0.5,10.4 +7.1,0.21,0.37,2.4,0.026,23.0,100.0,0.9903,3.15,0.38,11.4 +6.8,0.24,0.34,2.7,0.047,64.5,218.5,0.9934,3.3,0.58,9.7 +6.9,0.4,0.56,11.2,0.043,40.0,142.0,0.9975,3.14,0.46,8.7 +6.1,0.18,0.36,2.0,0.038,20.0,249.5,0.9923,3.37,0.79,11.3 +6.8,0.21,0.27,2.1,0.03,26.0,139.0,0.99,3.16,0.61,12.6 +5.8,0.2,0.27,1.4,0.031,12.0,77.0,0.9905,3.25,0.36,10.9 +5.6,0.19,0.26,1.4,0.03,12.0,76.0,0.9905,3.25,0.37,10.9 +6.1,0.41,0.14,10.4,0.037,18.0,119.0,0.996,3.38,0.45,10.0 +5.9,0.21,0.28,4.6,0.053,40.0,199.0,0.9964,3.72,0.7,10.0 +8.5,0.26,0.21,16.2,0.074,41.0,197.0,0.998,3.02,0.5,9.8 +6.9,0.4,0.56,11.2,0.043,40.0,142.0,0.9975,3.14,0.46,8.7 +5.8,0.24,0.44,3.5,0.029,5.0,109.0,0.9913,3.53,0.43,11.7 +5.8,0.24,0.39,1.5,0.054,37.0,158.0,0.9932,3.21,0.52,9.3 +6.7,0.26,0.39,1.1,0.04,45.0,147.0,0.9935,3.32,0.58,9.6 +6.3,0.35,0.3,5.7,0.035,8.0,97.0,0.9927,3.27,0.41,11.0 +6.3,0.35,0.3,5.7,0.035,8.0,97.0,0.9927,3.27,0.41,11.0 +6.4,0.23,0.39,1.8,0.032,23.0,118.0,0.9912,3.32,0.5,11.8 +5.8,0.36,0.38,0.9,0.037,3.0,75.0,0.9904,3.28,0.34,11.4 +6.9,0.115,0.35,5.4,0.048,36.0,108.0,0.9939,3.32,0.42,10.2 +6.9,0.29,0.4,19.45,0.043,36.0,156.0,0.9996,2.93,0.47,8.9 +6.9,0.28,0.4,8.2,0.036,15.0,95.0,0.9944,3.17,0.33,10.2 +7.2,0.29,0.4,13.6,0.045,66.0,231.0,0.9977,3.08,0.59,9.6 +6.2,0.24,0.35,1.2,0.038,22.0,167.0,0.9912,3.1,0.48,10.6 +6.9,0.29,0.4,19.45,0.043,36.0,156.0,0.9996,2.93,0.47,8.9 +6.9,0.32,0.26,8.3,0.053,32.0,180.0,0.9965,3.25,0.51,9.2 +5.3,0.58,0.07,6.9,0.043,34.0,149.0,0.9944,3.34,0.57,9.7 +5.3,0.585,0.07,7.1,0.044,34.0,145.0,0.9945,3.34,0.57,9.7 +5.4,0.59,0.07,7.0,0.045,36.0,147.0,0.9944,3.34,0.57,9.7 +6.9,0.32,0.26,8.3,0.053,32.0,180.0,0.9965,3.25,0.51,9.2 +5.2,0.6,0.07,7.0,0.044,33.0,147.0,0.9944,3.33,0.58,9.7 +5.8,0.25,0.26,13.1,0.051,44.0,148.0,0.9972,3.29,0.38,9.3 +6.6,0.58,0.3,5.1,0.057,30.0,123.0,0.9949,3.24,0.38,9.0 +7.0,0.29,0.54,10.7,0.046,59.0,234.0,0.9966,3.05,0.61,9.5 +6.6,0.19,0.41,8.9,0.046,51.0,169.0,0.9954,3.14,0.57,9.8 +6.7,0.2,0.41,9.1,0.044,50.0,166.0,0.9954,3.14,0.58,9.8 +7.7,0.26,0.4,1.1,0.042,9.0,60.0,0.9915,2.89,0.5,10.6 +6.8,0.32,0.34,1.2,0.044,14.0,67.0,0.9919,3.05,0.47,10.6 +7.0,0.3,0.49,4.7,0.036,17.0,105.0,0.9916,3.26,0.68,12.4 +7.0,0.24,0.36,2.8,0.034,22.0,112.0,0.99,3.19,0.38,12.6 +6.1,0.31,0.58,5.0,0.039,36.0,114.0,0.9909,3.3,0.6,12.3 +6.8,0.44,0.37,5.1,0.047,46.0,201.0,0.9938,3.08,0.65,10.5 +6.7,0.34,0.3,15.6,0.054,51.0,196.0,0.9982,3.19,0.49,9.3 +7.1,0.35,0.24,15.4,0.055,46.0,198.0,0.9988,3.12,0.49,8.8 +7.3,0.32,0.25,7.2,0.056,47.0,180.0,0.9961,3.08,0.47,8.8 +6.5,0.28,0.33,15.7,0.053,51.0,190.0,0.9978,3.22,0.51,9.7 +7.2,0.23,0.39,14.2,0.058,49.0,192.0,0.9979,2.98,0.48,9.0 +7.2,0.23,0.39,14.2,0.058,49.0,192.0,0.9979,2.98,0.48,9.0 +7.2,0.23,0.39,14.2,0.058,49.0,192.0,0.9979,2.98,0.48,9.0 +7.2,0.23,0.39,14.2,0.058,49.0,192.0,0.9979,2.98,0.48,9.0 +5.9,0.15,0.31,5.8,0.041,53.0,155.0,0.9945,3.52,0.46,10.5 +7.4,0.28,0.42,19.8,0.066,53.0,195.0,1.0,2.96,0.44,9.1 +6.2,0.28,0.22,7.3,0.041,26.0,157.0,0.9957,3.44,0.64,9.8 +9.1,0.59,0.38,1.6,0.066,34.0,182.0,0.9968,3.23,0.38,8.5 +6.3,0.33,0.27,1.2,0.046,34.0,175.0,0.9934,3.37,0.54,9.4 +8.3,0.39,0.7,10.6,0.045,33.0,169.0,0.9976,3.09,0.57,9.4 +7.2,0.19,0.46,3.8,0.041,82.0,187.0,0.9932,3.19,0.6,11.2 +7.5,0.17,0.44,11.3,0.046,65.0,146.0,0.997,3.17,0.45,10.0 +6.7,0.17,0.5,2.1,0.043,27.0,122.0,0.9923,3.15,0.45,10.3 +6.1,0.41,0.0,1.6,0.063,36.0,87.0,0.9914,3.27,0.67,10.8 +8.3,0.2,0.35,0.9,0.05,12.0,74.0,0.992,3.13,0.38,10.5 +6.1,0.41,0.0,1.6,0.063,36.0,87.0,0.9914,3.27,0.67,10.8 +6.0,0.29,0.21,1.3,0.055,42.0,168.0,0.9914,3.32,0.43,11.1 +7.3,0.41,0.24,6.8,0.057,41.0,163.0,0.9949,3.2,0.41,9.9 +7.3,0.41,0.24,6.8,0.057,41.0,163.0,0.9949,3.2,0.41,9.9 +7.2,0.43,0.24,6.7,0.058,40.0,163.0,0.995,3.2,0.41,9.9 +7.3,0.4,0.24,6.7,0.058,41.0,166.0,0.995,3.2,0.41,9.9 +6.2,0.33,0.27,4.9,0.036,30.0,134.0,0.9927,3.2,0.42,10.4 +6.2,0.31,0.26,4.8,0.037,36.0,148.0,0.9928,3.21,0.41,10.4 +6.1,0.36,0.27,2.1,0.035,16.0,100.0,0.9917,3.4,0.71,11.5 +5.0,0.55,0.14,8.3,0.032,35.0,164.0,0.9918,3.53,0.51,12.5 +7.8,0.25,0.41,3.7,0.042,37.0,149.0,0.9954,3.36,0.45,10.0 +5.7,0.36,0.21,6.7,0.038,51.0,166.0,0.9941,3.29,0.63,10.0 +5.8,0.34,0.21,6.6,0.04,50.0,167.0,0.9941,3.29,0.62,10.0 +6.8,0.28,0.6,1.1,0.132,42.0,127.0,0.9934,3.09,0.44,9.1 +6.8,0.25,0.34,4.7,0.031,34.0,134.0,0.9927,3.21,0.38,10.6 +6.6,0.24,0.35,7.7,0.031,36.0,135.0,0.9938,3.19,0.37,10.5 +5.9,0.3,0.47,7.85,0.03,19.0,133.0,0.9933,3.52,0.43,11.5 +6.1,0.125,0.25,3.3,0.04,10.0,69.0,0.9934,3.54,0.59,10.1 +6.0,0.1,0.24,1.1,0.041,15.0,65.0,0.9927,3.61,0.61,10.3 +6.6,0.24,0.35,7.7,0.031,36.0,135.0,0.9938,3.19,0.37,10.5 +6.8,0.25,0.34,4.7,0.031,34.0,134.0,0.9927,3.21,0.38,10.6 +6.8,0.28,0.44,9.3,0.031,35.0,137.0,0.9946,3.16,0.36,10.4 +8.3,0.41,0.51,2.0,0.046,11.0,207.0,0.993,3.02,0.55,11.4 +7.5,0.27,0.31,5.8,0.057,131.0,313.0,0.9946,3.18,0.59,10.5 +7.9,0.26,0.41,15.15,0.04,38.0,216.0,0.9976,2.96,0.6,10.0 +6.4,0.34,0.23,6.3,0.039,37.0,143.0,0.9944,3.19,0.65,10.0 +6.5,0.28,0.35,15.4,0.042,55.0,195.0,0.9978,3.23,0.5,9.6 +7.2,0.21,0.41,1.3,0.036,33.0,85.0,0.992,3.17,0.51,10.4 +6.4,0.32,0.35,4.8,0.03,34.0,101.0,0.9912,3.36,0.6,12.5 +6.8,0.24,0.34,4.6,0.032,37.0,135.0,0.9927,3.2,0.39,10.6 +6.3,0.23,0.3,1.8,0.033,16.0,91.0,0.9906,3.28,0.4,11.8 +6.5,0.28,0.34,9.9,0.038,30.0,133.0,0.9954,3.11,0.44,9.8 +5.6,0.26,0.26,5.7,0.031,12.0,80.0,0.9923,3.25,0.38,10.8 +6.3,0.23,0.3,1.8,0.033,16.0,91.0,0.9906,3.28,0.4,11.8 +6.3,0.23,0.33,1.5,0.036,15.0,105.0,0.991,3.32,0.42,11.2 +5.8,0.27,0.27,12.3,0.045,55.0,170.0,0.9972,3.28,0.42,9.3 +5.9,0.26,0.4,1.3,0.047,12.0,139.0,0.9945,3.45,0.53,10.4 +6.6,0.18,0.35,1.5,0.049,49.0,141.0,0.9934,3.43,0.85,10.2 +7.4,0.2,0.43,7.8,0.045,27.0,153.0,0.9964,3.19,0.55,9.0 +8.0,0.24,0.36,1.5,0.047,17.0,129.0,0.9948,3.2,0.54,10.0 +6.4,0.26,0.42,9.7,0.044,30.0,140.0,0.9962,3.18,0.47,9.1 +5.4,0.31,0.47,3.0,0.053,46.0,144.0,0.9931,3.29,0.76,10.0 +5.4,0.29,0.47,3.0,0.052,47.0,145.0,0.993,3.29,0.75,10.0 +7.1,0.145,0.33,4.6,0.05,33.0,131.0,0.9942,3.28,0.4,9.6 +5.6,0.34,0.1,1.3,0.031,20.0,68.0,0.9906,3.36,0.51,11.2 +6.7,0.19,0.41,15.6,0.056,75.0,155.0,0.9995,3.2,0.44,8.8 +7.8,0.18,0.46,13.6,0.052,38.0,118.0,0.998,3.15,0.5,10.0 +7.6,0.17,0.45,11.2,0.054,56.0,137.0,0.997,3.15,0.47,10.0 +6.3,0.12,0.36,2.1,0.044,47.0,146.0,0.9914,3.27,0.74,11.4 +7.3,0.33,0.4,6.85,0.038,32.0,138.0,0.992,3.03,0.3,11.9 +5.5,0.335,0.3,2.5,0.071,27.0,128.0,0.9924,3.14,0.51,9.6 +7.3,0.33,0.4,6.85,0.038,32.0,138.0,0.992,3.03,0.3,11.9 +5.8,0.4,0.42,4.4,0.047,38.5,245.0,0.9937,3.25,0.57,9.6 +7.3,0.22,0.37,14.3,0.063,48.0,191.0,0.9978,2.89,0.38,9.0 +7.3,0.22,0.37,14.3,0.063,48.0,191.0,0.9978,2.89,0.38,9.0 +6.1,0.36,0.33,1.1,0.05,24.0,169.0,0.9927,3.15,0.78,9.5 +10.0,0.2,0.39,1.4,0.05,19.0,152.0,0.994,3.0,0.42,10.4 +6.9,0.24,0.34,4.7,0.04,43.0,161.0,0.9935,3.2,0.59,10.6 +6.4,0.24,0.32,14.9,0.047,54.0,162.0,0.9968,3.28,0.5,10.2 +7.1,0.365,0.14,1.2,0.055,24.0,84.0,0.9941,3.15,0.43,8.9 +6.8,0.15,0.3,5.3,0.05,40.0,127.0,0.9942,3.4,0.39,9.7 +7.3,0.22,0.37,14.3,0.063,48.0,191.0,0.9978,2.89,0.38,9.0 +6.8,0.16,0.4,2.3,0.037,18.0,102.0,0.9923,3.49,0.42,11.4 +6.0,0.26,0.32,3.5,0.028,29.0,113.0,0.9912,3.4,0.71,12.3 +6.0,0.18,0.27,1.5,0.089,40.0,143.0,0.9923,3.49,0.62,10.8 +6.9,0.33,0.21,1.0,0.053,39.0,148.0,0.9927,3.12,0.45,9.4 +7.7,0.29,0.48,2.3,0.049,36.0,178.0,0.9931,3.17,0.64,10.6 +7.1,0.39,0.35,12.5,0.044,26.0,72.0,0.9941,3.17,0.29,11.6 +6.9,0.33,0.21,1.0,0.053,39.0,148.0,0.9927,3.12,0.45,9.4 +7.7,0.29,0.48,2.3,0.049,36.0,178.0,0.9931,3.17,0.64,10.6 +6.6,0.905,0.19,0.8,0.048,17.0,204.0,0.9934,3.34,0.56,10.0 +7.2,0.27,0.27,2.4,0.048,30.0,149.0,0.9936,3.1,0.51,9.2 +5.1,0.33,0.22,1.6,0.027,18.0,89.0,0.9893,3.51,0.38,12.5 +5.1,0.33,0.22,1.6,0.027,18.0,89.0,0.9893,3.51,0.38,12.5 +6.4,0.31,0.28,1.5,0.037,12.0,119.0,0.9919,3.32,0.51,10.4 +7.3,0.2,0.44,1.4,0.045,21.0,98.0,0.9924,3.15,0.46,10.0 +5.7,0.32,0.5,2.6,0.049,17.0,155.0,0.9927,3.22,0.64,10.0 +6.4,0.31,0.28,1.5,0.037,12.0,119.0,0.9919,3.32,0.51,10.4 +7.3,0.2,0.44,1.4,0.045,21.0,98.0,0.9924,3.15,0.46,10.0 +7.2,0.28,0.26,12.5,0.046,48.0,179.0,0.9975,3.1,0.52,9.0 +7.5,0.35,0.28,9.6,0.051,26.0,157.0,0.9969,3.12,0.53,9.2 +7.2,0.27,0.27,2.4,0.048,30.0,149.0,0.9936,3.1,0.51,9.2 +6.0,0.36,0.39,3.2,0.027,20.0,125.0,0.991,3.38,0.39,11.3 +5.1,0.33,0.22,1.6,0.027,18.0,89.0,0.9893,3.51,0.38,12.5 +5.0,0.17,0.56,1.5,0.026,24.0,115.0,0.9906,3.48,0.39,10.8 +6.3,0.39,0.35,5.9,0.04,82.5,260.0,0.9941,3.12,0.66,10.1 +6.7,0.21,0.32,5.4,0.047,29.0,140.0,0.995,3.39,0.46,9.7 +7.0,0.3,0.38,14.9,0.032,60.0,181.0,0.9983,3.18,0.61,9.3 +7.0,0.3,0.38,14.9,0.032,60.0,181.0,0.9983,3.18,0.61,9.3 +6.5,0.36,0.32,1.1,0.031,13.0,66.0,0.9916,3.1,0.46,10.6 +6.1,0.55,0.15,9.8,0.031,19.0,125.0,0.9957,3.36,0.47,10.2 +7.3,0.24,0.43,2.0,0.021,20.0,69.0,0.99,3.08,0.56,12.2 +6.8,0.37,0.51,11.8,0.044,62.0,163.0,0.9976,3.19,0.44,8.8 +6.8,0.27,0.12,1.3,0.04,87.0,168.0,0.992,3.18,0.41,10.0 +8.2,0.28,0.42,1.8,0.031,30.0,93.0,0.9917,3.09,0.39,11.4 +6.3,0.2,0.4,1.5,0.037,35.0,107.0,0.9917,3.46,0.5,11.4 +5.9,0.26,0.27,18.2,0.048,52.0,168.0,0.9993,3.35,0.44,9.4 +6.4,0.19,0.42,2.9,0.032,32.0,83.0,0.9908,3.3,0.41,11.7 +6.3,0.2,0.4,1.5,0.037,35.0,107.0,0.9917,3.46,0.5,11.4 +6.8,0.37,0.51,11.8,0.044,62.0,163.0,0.9976,3.19,0.44,8.8 +6.1,0.35,0.07,1.4,0.069,22.0,108.0,0.9934,3.23,0.52,9.2 +7.1,0.27,0.31,18.2,0.046,55.0,252.0,1.0,3.07,0.56,8.7 +6.8,0.22,0.31,6.3,0.035,33.0,170.0,0.9918,3.24,0.66,12.6 +6.8,0.27,0.12,1.3,0.04,87.0,168.0,0.992,3.18,0.41,10.0 +5.8,0.28,0.34,4.0,0.031,40.0,99.0,0.9896,3.39,0.39,12.8 +6.9,0.49,0.24,1.2,0.049,13.0,125.0,0.9932,3.17,0.51,9.4 +6.3,0.14,0.39,1.2,0.044,26.0,116.0,0.992,3.26,0.53,10.3 +8.2,0.28,0.42,1.8,0.031,30.0,93.0,0.9917,3.09,0.39,11.4 +7.2,0.25,0.39,18.95,0.038,42.0,155.0,0.9999,2.97,0.47,9.0 +7.3,0.28,0.36,12.7,0.04,38.0,140.0,0.998,3.3,0.79,9.6 +7.2,0.19,0.39,1.2,0.036,32.0,85.0,0.9918,3.16,0.5,10.5 +7.2,0.19,0.39,1.2,0.036,32.0,85.0,0.9918,3.16,0.5,10.5 +7.2,0.25,0.39,18.95,0.038,42.0,155.0,0.9999,2.97,0.47,9.0 +7.3,0.28,0.36,12.7,0.04,38.0,140.0,0.998,3.3,0.79,9.6 +7.4,0.21,0.27,1.2,0.041,27.0,99.0,0.9927,3.19,0.33,9.8 +6.8,0.26,0.22,7.7,0.047,57.0,210.0,0.9959,3.1,0.47,9.0 +7.4,0.21,0.27,1.2,0.041,27.0,99.0,0.9927,3.19,0.33,9.8 +7.4,0.31,0.28,1.6,0.05,33.0,137.0,0.9929,3.31,0.56,10.5 +7.0,0.22,0.31,2.7,0.03,41.0,136.0,0.9898,3.16,0.37,12.7 +7.0,0.21,0.28,8.7,0.045,37.0,222.0,0.9954,3.25,0.54,10.4 +7.0,0.21,0.28,8.6,0.045,37.0,221.0,0.9954,3.25,0.54,10.4 +7.0,0.21,0.28,8.6,0.045,37.0,221.0,0.9954,3.25,0.54,10.4 +6.9,0.23,0.38,8.3,0.047,47.0,162.0,0.9954,3.34,0.52,10.5 +7.0,0.21,0.28,8.7,0.045,37.0,222.0,0.9954,3.25,0.54,10.4 +7.0,0.21,0.28,8.6,0.045,37.0,221.0,0.9954,3.25,0.54,10.4 +6.8,0.29,0.5,13.3,0.053,48.0,194.0,0.9974,3.09,0.45,9.4 +7.8,0.21,0.27,1.2,0.051,20.0,89.0,0.9936,3.06,0.46,9.1 +7.1,0.31,0.47,13.6,0.056,54.0,197.0,0.9978,3.1,0.49,9.3 +6.8,0.29,0.5,13.3,0.053,48.0,194.0,0.9974,3.09,0.45,9.4 +6.4,0.34,0.1,1.1,0.048,19.0,84.0,0.9927,3.21,0.38,9.8 +7.4,0.155,0.34,2.3,0.045,73.5,214.0,0.9934,3.18,0.61,9.9 +7.2,0.55,0.09,1.5,0.108,16.0,151.0,0.9938,3.07,0.57,9.2 +7.0,0.23,0.36,7.1,0.028,31.0,104.0,0.9922,3.35,0.47,12.1 +6.9,0.2,0.37,6.2,0.027,24.0,97.0,0.992,3.38,0.49,12.2 +6.1,0.28,0.32,2.5,0.042,23.0,218.5,0.9935,3.27,0.6,9.8 +6.6,0.16,0.32,1.4,0.035,49.0,186.0,0.9906,3.35,0.64,12.4 +7.4,0.155,0.34,2.3,0.045,73.5,214.0,0.9934,3.18,0.61,9.9 +6.2,0.35,0.04,1.2,0.06,23.0,108.0,0.9934,3.26,0.54,9.2 +6.7,0.22,0.37,1.6,0.028,24.0,102.0,0.9913,3.29,0.59,11.6 +6.1,0.38,0.2,6.6,0.033,25.0,137.0,0.9938,3.3,0.69,10.4 +6.0,0.25,0.28,2.2,0.026,54.0,126.0,0.9898,3.43,0.65,12.9 +6.6,0.52,0.44,12.2,0.048,54.0,245.0,0.9975,3.26,0.54,9.3 +6.9,0.24,0.36,20.8,0.031,40.0,139.0,0.9975,3.2,0.33,11.0 +7.1,0.32,0.32,11.0,0.038,16.0,66.0,0.9937,3.24,0.4,11.5 +5.8,0.28,0.27,2.6,0.054,30.0,156.0,0.9914,3.53,0.42,12.4 +6.5,0.41,0.24,14.0,0.048,24.0,113.0,0.9982,3.44,0.53,9.8 +6.5,0.41,0.24,14.0,0.048,24.0,113.0,0.9982,3.44,0.53,9.8 +6.4,0.28,0.29,1.6,0.052,34.0,127.0,0.9929,3.48,0.56,10.5 +7.2,0.6,0.2,9.9,0.07,21.0,174.0,0.9971,3.03,0.54,9.1 +6.1,0.2,0.25,1.2,0.038,34.0,128.0,0.9921,3.24,0.44,10.1 +5.9,0.46,0.14,2.7,0.042,27.0,160.0,0.9931,3.46,0.51,10.6 +6.0,0.27,0.27,1.6,0.046,32.0,113.0,0.9924,3.41,0.51,10.5 +6.4,0.28,0.29,1.6,0.052,34.0,127.0,0.9929,3.48,0.56,10.5 +6.4,0.41,0.24,14.0,0.048,24.0,113.0,0.9982,3.44,0.53,9.8 +6.3,0.23,0.31,1.5,0.022,11.0,82.0,0.9892,3.3,0.4,12.9 +7.1,0.21,0.27,8.6,0.056,26.0,111.0,0.9956,2.95,0.52,9.5 +6.0,0.37,0.32,1.0,0.053,31.0,218.5,0.9924,3.29,0.72,9.8 +6.1,0.43,0.35,9.1,0.059,83.0,249.0,0.9971,3.37,0.5,8.5 +7.1,0.21,0.27,8.6,0.056,26.0,111.0,0.9956,2.95,0.52,9.5 +7.0,0.25,0.29,15.2,0.047,40.0,171.0,0.9982,3.22,0.45,9.3 +5.9,0.25,0.19,12.4,0.047,50.0,162.0,0.9973,3.35,0.38,9.5 +6.8,0.32,0.21,2.2,0.044,15.0,68.0,0.9932,3.17,0.39,9.4 +7.2,0.39,0.62,11.0,0.047,66.0,178.0,0.9976,3.16,0.5,8.7 +6.3,0.21,0.58,10.0,0.081,34.0,126.0,0.9962,2.95,0.46,8.9 +7.0,0.14,0.32,9.0,0.039,54.0,141.0,0.9956,3.22,0.43,9.4 +6.8,0.32,0.21,2.2,0.044,15.0,68.0,0.9932,3.17,0.39,9.4 +7.2,0.39,0.62,11.0,0.047,66.0,178.0,0.9976,3.16,0.5,8.7 +7.2,0.29,0.53,18.15,0.047,59.0,182.0,0.9992,3.09,0.52,9.6 +8.6,0.37,0.7,12.15,0.039,21.0,158.0,0.9983,3.0,0.73,9.3 +6.5,0.38,0.34,3.4,0.036,34.0,200.0,0.9937,3.14,0.76,10.0 +6.6,0.24,0.29,2.0,0.023,19.0,86.0,0.99,3.25,0.45,12.5 +7.0,0.17,0.31,4.8,0.034,34.0,132.0,0.9944,3.36,0.48,9.6 +5.5,0.16,0.22,4.5,0.03,30.0,102.0,0.9938,3.24,0.36,9.4 +7.0,0.24,0.51,11.0,0.029,55.0,227.0,0.9965,3.03,0.61,9.5 +7.4,0.28,0.36,1.1,0.028,42.0,105.0,0.9893,2.99,0.39,12.4 +7.0,0.22,0.28,1.5,0.037,29.0,115.0,0.9927,3.11,0.55,10.5 +7.1,0.55,0.13,1.7,0.073,21.0,165.0,0.994,2.97,0.58,9.2 +6.3,0.22,0.33,1.7,0.041,67.0,164.0,0.9928,3.32,0.56,10.4 +6.7,0.47,0.34,8.9,0.043,31.0,172.0,0.9964,3.22,0.6,9.2 +5.9,0.36,0.41,1.3,0.047,45.0,104.0,0.9917,3.33,0.51,10.6 +5.8,0.25,0.24,13.3,0.044,41.0,137.0,0.9972,3.34,0.42,9.5 +6.7,0.47,0.34,8.9,0.043,31.0,172.0,0.9964,3.22,0.6,9.2 +6.2,0.37,0.3,6.6,0.346,79.0,200.0,0.9954,3.29,0.58,9.6 +6.2,0.18,0.38,1.5,0.028,36.0,117.0,0.993,3.47,0.54,9.7 +6.0,0.16,0.37,1.5,0.025,43.0,117.0,0.9928,3.46,0.51,9.7 +6.6,0.34,0.28,1.3,0.035,32.0,90.0,0.9916,3.1,0.42,10.7 +7.4,0.29,0.29,1.6,0.045,53.0,180.0,0.9936,3.34,0.68,10.5 +7.4,0.26,0.31,7.6,0.047,52.0,177.0,0.9962,3.13,0.45,8.9 +7.0,0.28,0.36,1.0,0.035,8.0,70.0,0.9899,3.09,0.46,12.1 +7.1,0.23,0.39,1.6,0.032,12.0,65.0,0.9898,3.25,0.4,12.7 +7.8,0.19,0.26,8.9,0.039,42.0,182.0,0.996,3.18,0.46,9.9 +6.3,0.19,0.28,1.8,0.022,28.0,158.0,0.9907,3.2,0.64,11.4 +6.8,0.2,0.38,4.7,0.04,27.0,103.0,0.994,3.37,0.58,10.7 +5.7,0.44,0.13,7.0,0.025,28.0,173.0,0.9913,3.33,0.48,12.5 +7.2,0.4,0.62,10.8,0.041,70.0,189.0,0.9976,3.08,0.49,8.6 +6.8,0.23,0.32,1.6,0.026,43.0,147.0,0.9904,3.29,0.54,12.5 +5.7,0.335,0.34,1.0,0.04,13.0,174.0,0.992,3.27,0.66,10.0 +7.2,0.4,0.62,10.8,0.041,70.0,189.0,0.9976,3.08,0.49,8.6 +7.2,0.28,0.54,16.7,0.045,54.0,200.0,0.999,3.08,0.49,9.5 +6.8,0.19,0.58,14.2,0.038,51.0,164.0,0.9975,3.12,0.48,9.6 +6.4,0.3,0.3,2.25,0.038,8.0,210.0,0.9937,3.2,0.62,9.9 +6.5,0.3,0.29,2.25,0.037,8.0,210.0,0.9937,3.19,0.62,9.9 +7.8,0.18,0.31,12.2,0.053,46.0,140.0,0.998,3.06,0.53,8.9 +7.8,0.18,0.31,12.2,0.053,46.0,140.0,0.998,3.06,0.53,8.9 +7.3,0.51,0.26,3.3,0.09,7.0,135.0,0.9944,3.01,0.52,8.8 +6.0,0.24,0.27,1.9,0.048,40.0,170.0,0.9938,3.64,0.54,10.0 +5.9,0.62,0.28,3.5,0.039,55.0,152.0,0.9907,3.44,0.44,12.0 +6.0,0.24,0.27,1.9,0.048,40.0,170.0,0.9938,3.64,0.54,10.0 +6.7,0.27,0.12,1.3,0.041,62.0,138.0,0.9921,3.21,0.42,10.0 +7.8,0.34,0.35,1.8,0.042,8.0,167.0,0.9908,3.11,0.41,12.1 +7.3,0.26,0.36,5.2,0.04,31.0,141.0,0.9931,3.16,0.59,11.0 +7.4,0.36,0.33,1.4,0.025,27.0,55.0,0.9915,3.21,0.33,11.2 +7.8,0.28,0.32,9.0,0.036,34.0,115.0,0.9952,3.17,0.39,10.3 +6.1,0.31,0.26,2.2,0.051,28.0,167.0,0.9926,3.37,0.47,10.4 +6.8,0.18,0.37,1.6,0.055,47.0,154.0,0.9934,3.08,0.45,9.1 +7.4,0.15,0.42,1.7,0.045,49.0,154.0,0.992,3.0,0.6,10.4 +5.9,0.13,0.28,1.9,0.05,20.0,78.0,0.9918,3.43,0.64,10.8 +7.2,0.34,0.34,12.6,0.048,7.0,41.0,0.9942,3.19,0.4,11.7 +7.9,0.19,0.26,2.1,0.039,8.0,143.0,0.9942,3.05,0.74,9.8 +7.9,0.19,0.26,2.1,0.039,8.0,143.0,0.9942,3.05,0.74,9.8 +6.9,0.25,0.4,1.3,0.038,22.0,101.0,0.9901,3.03,0.39,11.4 +5.8,0.36,0.32,1.7,0.033,22.0,96.0,0.9898,3.03,0.38,11.2 +5.6,0.35,0.37,1.0,0.038,6.0,72.0,0.9902,3.37,0.34,11.4 +5.9,0.32,0.39,3.3,0.114,24.0,140.0,0.9934,3.09,0.45,9.2 +7.2,0.31,0.46,5.0,0.04,3.0,29.0,0.9906,3.04,0.53,12.5 +6.1,0.28,0.22,1.8,0.034,32.0,116.0,0.9898,3.36,0.44,12.6 +5.2,0.36,0.02,1.6,0.031,24.0,104.0,0.9896,3.44,0.35,12.2 +5.6,0.19,0.47,4.5,0.03,19.0,112.0,0.9922,3.56,0.45,11.2 +6.4,0.1,0.35,4.9,0.048,31.0,103.0,0.9947,3.43,0.79,9.7 +6.4,0.18,0.48,4.0,0.186,64.0,150.0,0.9945,3.06,0.4,9.3 +7.4,0.25,0.36,13.2,0.067,53.0,178.0,0.9976,3.01,0.48,9.0 +7.4,0.25,0.36,13.2,0.067,53.0,178.0,0.9976,3.01,0.48,9.0 +7.4,0.25,0.36,13.2,0.067,53.0,178.0,0.9976,3.01,0.48,9.0 +7.9,0.345,0.51,15.3,0.047,54.0,171.0,0.9987,3.09,0.51,9.1 +7.9,0.345,0.51,15.3,0.047,54.0,171.0,0.9987,3.09,0.51,9.1 +7.4,0.25,0.36,13.2,0.067,53.0,178.0,0.9976,3.01,0.48,9.0 +6.1,0.24,0.3,1.5,0.045,22.0,61.0,0.992,3.31,0.54,10.4 +6.8,0.25,0.24,4.55,0.053,41.0,211.0,0.9955,3.37,0.67,9.5 +6.7,0.31,0.31,9.9,0.04,10.0,175.0,0.9953,3.46,0.55,11.4 +7.2,0.46,0.65,10.4,0.05,76.0,192.0,0.9976,3.16,0.42,8.7 +5.5,0.35,0.35,1.1,0.045,14.0,167.0,0.992,3.34,0.68,9.9 +6.7,0.24,0.41,8.7,0.036,29.0,148.0,0.9952,3.22,0.62,9.9 +6.8,0.28,0.17,13.9,0.047,49.0,162.0,0.9983,3.21,0.51,9.0 +6.4,0.16,0.22,1.4,0.04,41.0,149.0,0.9933,3.49,0.58,10.0 +6.3,0.26,0.24,7.2,0.039,38.0,172.0,0.9958,3.49,0.64,9.7 +7.7,0.22,0.42,1.9,0.052,10.0,87.0,0.9922,3.3,0.49,11.8 +6.5,0.18,0.31,1.7,0.044,30.0,127.0,0.9928,3.49,0.5,10.2 +7.2,0.46,0.65,10.4,0.05,76.0,192.0,0.9976,3.16,0.42,8.7 +7.0,0.3,0.51,13.6,0.05,40.0,168.0,0.9976,3.07,0.52,9.6 +9.2,0.25,0.34,1.2,0.026,31.0,93.0,0.9916,2.93,0.37,11.3 +7.8,0.28,0.34,1.6,0.028,32.0,118.0,0.9901,3.0,0.38,12.1 +7.0,0.3,0.51,13.6,0.05,40.0,168.0,0.9976,3.07,0.52,9.6 +7.8,0.28,0.34,1.6,0.028,32.0,118.0,0.9901,3.0,0.38,12.1 +9.2,0.25,0.34,1.2,0.026,31.0,93.0,0.9916,2.93,0.37,11.3 +8.4,0.35,0.71,12.2,0.046,22.0,160.0,0.9982,2.98,0.65,9.4 +6.1,0.41,0.24,1.6,0.049,16.0,137.0,0.993,3.32,0.5,10.4 +5.9,0.21,0.24,12.1,0.044,53.0,165.0,0.9969,3.25,0.39,9.5 +7.2,0.34,0.44,4.2,0.047,51.0,144.0,0.991,3.01,0.76,12.3 +6.7,0.21,0.42,9.1,0.049,31.0,150.0,0.9953,3.12,0.74,9.9 +5.9,0.37,0.1,1.6,0.057,39.0,128.0,0.9924,3.24,0.48,10.1 +7.7,0.34,0.27,8.8,0.063,39.0,184.0,0.9969,3.09,0.63,9.2 +7.4,0.3,0.22,1.4,0.046,16.0,135.0,0.9928,3.08,0.77,10.4 +6.8,0.51,0.3,4.2,0.066,38.0,165.0,0.9945,3.2,0.42,9.1 +7.8,0.22,0.38,10.3,0.059,28.0,99.0,0.9967,3.12,0.47,10.0 +7.2,0.35,0.34,12.4,0.051,6.0,37.0,0.9944,3.13,0.39,11.5 +6.0,0.26,0.5,2.2,0.048,59.0,153.0,0.9928,3.08,0.61,9.8 +6.1,0.26,0.51,2.2,0.05,61.0,154.0,0.9929,3.08,0.6,9.8 +6.5,0.28,0.27,5.2,0.04,44.0,179.0,0.9948,3.19,0.69,9.4 +7.4,0.41,0.66,10.8,0.051,77.0,194.0,0.9976,3.05,0.46,8.7 +6.5,0.28,0.29,2.7,0.038,26.0,107.0,0.9912,3.32,0.41,11.6 +6.7,0.34,0.54,16.3,0.047,44.0,181.0,0.9987,3.04,0.56,8.8 +7.2,0.2,0.34,2.7,0.032,49.0,151.0,0.99,3.16,0.39,12.7 +7.4,0.2,0.33,1.9,0.035,39.0,138.0,0.991,3.17,0.44,11.7 +8.2,0.22,0.3,1.8,0.047,47.0,185.0,0.9933,3.13,0.5,10.2 +8.2,0.23,0.29,1.8,0.047,47.0,187.0,0.9933,3.13,0.5,10.2 +7.1,0.22,0.33,2.8,0.033,48.0,153.0,0.9899,3.15,0.38,12.7 +6.5,0.28,0.29,2.7,0.038,26.0,107.0,0.9912,3.32,0.41,11.6 +6.0,0.38,0.26,6.0,0.034,42.0,134.0,0.9912,3.38,0.38,12.3 +7.4,0.41,0.66,10.8,0.051,77.0,194.0,0.9976,3.05,0.46,8.7 +5.7,0.18,0.22,4.2,0.042,25.0,111.0,0.994,3.35,0.39,9.4 +7.3,0.3,0.22,6.4,0.056,44.0,168.0,0.9947,3.13,0.35,10.1 +7.4,0.24,0.22,10.7,0.042,26.0,81.0,0.9954,2.86,0.36,9.7 +6.6,0.25,0.3,1.6,0.046,32.0,134.0,0.993,3.42,0.51,10.1 +7.4,0.24,0.22,10.7,0.042,26.0,81.0,0.9954,2.86,0.36,9.7 +7.4,0.26,0.3,7.9,0.049,38.0,157.0,0.9963,3.13,0.48,8.9 +6.1,0.32,0.25,1.7,0.034,37.0,136.0,0.992,3.47,0.5,10.8 +6.9,0.28,0.27,2.1,0.036,42.0,121.0,0.9926,3.42,0.49,10.8 +7.0,0.23,0.33,5.8,0.04,25.0,136.0,0.995,3.19,0.58,9.5 +7.1,0.31,0.5,14.5,0.059,6.0,148.0,0.9983,2.94,0.44,9.1 +7.3,0.2,0.37,1.2,0.037,48.0,119.0,0.992,3.32,0.49,10.9 +6.9,0.41,0.33,10.1,0.043,28.0,152.0,0.9968,3.2,0.52,9.4 +6.4,0.45,0.07,1.1,0.03,10.0,131.0,0.9905,2.97,0.28,10.8 +6.4,0.475,0.06,1.0,0.03,9.0,131.0,0.9904,2.97,0.29,10.8 +6.3,0.27,0.38,0.9,0.051,7.0,140.0,0.9926,3.45,0.5,10.5 +6.9,0.41,0.33,10.1,0.043,28.0,152.0,0.9968,3.2,0.52,9.4 +7.0,0.29,0.37,4.9,0.034,26.0,127.0,0.9928,3.17,0.44,10.8 +5.9,0.27,0.29,11.4,0.036,31.0,115.0,0.9949,3.35,0.48,10.5 +6.9,0.19,0.4,1.4,0.036,14.0,55.0,0.9909,3.08,0.68,11.5 +6.7,0.3,0.35,1.4,0.18,36.0,160.0,0.9937,3.11,0.54,9.4 +7.2,0.24,0.4,1.4,0.045,31.0,106.0,0.9914,2.88,0.38,10.8 +6.4,0.45,0.07,1.1,0.03,10.0,131.0,0.9905,2.97,0.28,10.8 +6.4,0.475,0.06,1.0,0.03,9.0,131.0,0.9904,2.97,0.29,10.8 +6.3,0.26,0.49,1.5,0.052,34.0,134.0,0.9924,2.99,0.61,9.8 +6.3,0.26,0.49,1.5,0.052,34.0,134.0,0.9924,2.99,0.61,9.8 +7.3,0.25,0.29,7.5,0.049,38.0,158.0,0.9965,3.43,0.38,9.6 +7.3,0.25,0.29,7.5,0.049,38.0,158.0,0.9965,3.43,0.38,9.6 +6.1,0.28,0.25,17.75,0.044,48.0,161.0,0.9993,3.34,0.48,9.5 +7.4,0.37,0.35,5.7,0.061,12.0,94.0,0.9965,3.48,0.69,10.7 +6.5,0.36,0.28,3.2,0.037,29.0,119.0,0.9908,3.25,0.65,12.4 +7.4,0.24,0.4,4.3,0.032,9.0,95.0,0.992,3.09,0.39,11.1 +7.5,0.23,0.68,11.0,0.047,37.0,133.0,0.9978,2.99,0.38,8.8 +7.5,0.21,0.68,10.9,0.045,38.0,133.0,0.9978,3.0,0.36,8.7 +7.5,0.21,0.68,10.9,0.045,38.0,133.0,0.9978,3.0,0.36,8.7 +7.5,0.23,0.68,11.0,0.047,37.0,133.0,0.9978,2.99,0.38,8.8 +7.8,0.32,0.33,2.4,0.037,18.0,101.0,0.9912,3.21,0.65,11.7 +7.8,0.26,0.27,1.9,0.051,52.0,195.0,0.9928,3.23,0.5,10.9 +7.7,0.24,0.27,1.8,0.051,52.0,190.0,0.9928,3.23,0.5,10.8 +7.4,0.19,0.3,1.4,0.057,33.0,135.0,0.993,3.12,0.5,9.6 +6.5,0.46,0.41,16.8,0.084,59.0,222.0,0.9993,3.18,0.58,9.0 +6.5,0.26,0.43,8.9,0.083,50.0,171.0,0.9965,2.85,0.5,9.0 +5.3,0.32,0.12,6.6,0.043,22.0,141.0,0.9937,3.36,0.6,10.4 +7.2,0.24,0.34,1.1,0.045,3.0,64.0,0.9913,3.23,0.51,11.4 +6.0,0.36,0.06,1.4,0.066,27.0,128.0,0.9934,3.26,0.55,9.3 +6.2,0.24,0.29,13.3,0.039,49.0,130.0,0.9952,3.33,0.46,11.0 +7.6,0.56,0.12,10.4,0.096,22.0,177.0,0.9983,3.32,0.45,9.1 +7.0,0.32,0.24,6.2,0.048,31.0,228.0,0.9957,3.23,0.62,9.4 +7.0,0.32,0.24,6.2,0.048,31.0,228.0,0.9957,3.23,0.62,9.4 +5.8,0.31,0.33,1.2,0.036,23.0,99.0,0.9916,3.18,0.6,10.5 +7.0,0.23,0.42,18.05,0.05,35.0,144.0,0.9999,3.22,0.42,8.8 +7.0,0.23,0.42,18.05,0.05,35.0,144.0,0.9999,3.22,0.42,8.8 +6.9,0.24,0.33,4.8,0.04,16.0,131.0,0.9936,3.26,0.64,10.7 +6.0,0.29,0.2,12.6,0.045,45.0,187.0,0.9972,3.33,0.42,9.5 +6.1,0.17,0.28,4.5,0.033,46.0,150.0,0.9933,3.43,0.49,10.9 +5.9,0.14,0.25,4.5,0.027,34.0,140.0,0.9934,3.49,0.51,10.8 +6.2,0.17,0.28,4.7,0.037,39.0,133.0,0.9931,3.41,0.46,10.8 +7.4,0.28,0.25,11.9,0.053,25.0,148.0,0.9976,3.1,0.62,9.2 +5.6,0.35,0.14,5.0,0.046,48.0,198.0,0.9937,3.3,0.71,10.3 +5.8,0.335,0.14,5.8,0.046,49.0,197.0,0.9937,3.3,0.71,10.3 +5.6,0.235,0.29,1.2,0.047,33.0,127.0,0.991,3.34,0.5,11.0 +6.1,0.28,0.25,12.9,0.054,34.0,189.0,0.9979,3.25,0.43,9.0 +6.3,0.21,0.33,13.9,0.046,68.0,179.0,0.9971,3.36,0.5,10.4 +6.4,0.24,0.28,11.5,0.05,34.0,163.0,0.9969,3.31,0.45,9.5 +6.4,0.24,0.29,11.4,0.051,32.0,166.0,0.9968,3.31,0.45,9.5 +6.3,0.26,0.25,7.8,0.058,44.0,166.0,0.9961,3.24,0.41,9.0 +6.5,0.33,0.72,1.1,0.061,7.0,151.0,0.993,3.09,0.57,9.5 +7.4,0.105,0.34,12.2,0.05,57.0,146.0,0.9973,3.16,0.37,9.0 +6.0,0.32,0.12,5.9,0.041,34.0,190.0,0.9944,3.16,0.72,10.0 +7.1,0.26,0.34,14.4,0.067,35.0,189.0,0.9986,3.07,0.53,9.1 +7.1,0.26,0.34,14.4,0.067,35.0,189.0,0.9986,3.07,0.53,9.1 +7.1,0.26,0.34,14.4,0.067,35.0,189.0,0.9986,3.07,0.53,9.1 +7.1,0.26,0.34,14.4,0.067,35.0,189.0,0.9986,3.07,0.53,9.1 +5.9,0.24,0.26,12.3,0.053,34.0,134.0,0.9972,3.34,0.45,9.5 +6.5,0.21,0.37,2.5,0.048,70.0,138.0,0.9917,3.33,0.75,11.4 +7.7,0.27,0.35,5.3,0.03,30.0,117.0,0.992,3.11,0.42,12.2 +9.0,0.27,0.35,4.9,0.028,27.0,95.0,0.9932,3.04,0.4,11.3 +7.3,0.34,0.21,3.2,0.05,14.0,136.0,0.9936,3.25,0.44,10.2 +6.6,0.27,0.25,3.1,0.052,41.0,188.0,0.9915,3.24,0.4,11.3 +6.8,0.29,0.16,1.4,0.038,122.5,234.5,0.9922,3.15,0.47,10.0 +7.1,0.28,0.26,1.9,0.049,12.0,86.0,0.9934,3.15,0.38,9.4 +6.8,0.25,0.34,14.0,0.032,47.0,133.0,0.9952,3.37,0.5,12.2 +7.0,0.57,0.1,8.3,0.094,23.0,188.0,0.9972,3.4,0.47,9.2 +7.1,0.28,0.26,1.9,0.049,12.0,86.0,0.9934,3.15,0.38,9.4 +7.1,0.17,0.38,7.4,0.052,49.0,182.0,0.9958,3.35,0.52,9.6 +7.8,0.28,0.22,1.4,0.056,24.0,130.0,0.9944,3.28,0.48,9.5 +6.8,0.22,0.37,1.7,0.036,38.0,195.0,0.9908,3.35,0.72,12.5 +7.1,0.17,0.38,7.4,0.052,49.0,182.0,0.9958,3.35,0.52,9.6 +6.1,0.14,0.25,1.3,0.047,37.0,173.0,0.9925,3.35,0.46,10.0 +6.4,0.24,0.5,11.6,0.047,60.0,211.0,0.9966,3.18,0.57,9.3 +7.8,0.42,0.26,9.2,0.058,34.0,199.0,0.9972,3.14,0.55,9.3 +6.6,0.28,0.36,1.7,0.038,22.0,101.0,0.9912,3.29,0.57,11.6 +7.1,0.32,0.34,14.5,0.039,46.0,150.0,0.995,3.38,0.5,12.5 +6.7,0.31,0.3,2.1,0.038,18.0,130.0,0.9928,3.36,0.63,10.6 +6.4,0.32,0.5,10.7,0.047,57.0,206.0,0.9968,3.08,0.6,9.4 +6.1,0.28,0.25,6.9,0.056,44.0,201.0,0.9955,3.19,0.4,9.1 +5.9,0.29,0.25,12.0,0.057,48.0,224.0,0.9981,3.23,0.41,9.0 +5.8,0.32,0.38,4.75,0.033,23.0,94.0,0.991,3.42,0.42,11.8 +5.8,0.32,0.38,4.75,0.033,23.0,94.0,0.991,3.42,0.42,11.8 +5.7,0.32,0.38,4.75,0.033,23.0,94.0,0.991,3.42,0.42,11.8 +6.7,0.28,0.14,1.4,0.043,64.0,159.0,0.992,3.17,0.39,10.0 +6.8,0.34,0.69,1.3,0.058,12.0,171.0,0.9931,3.06,0.47,9.7 +5.9,0.25,0.25,11.3,0.052,30.0,165.0,0.997,3.24,0.44,9.5 +6.4,0.27,0.32,4.5,0.24,61.0,174.0,0.9948,3.12,0.48,9.4 +8.1,0.46,0.31,1.7,0.052,50.0,183.0,0.9923,3.03,0.42,11.2 +6.2,0.36,0.26,13.2,0.051,54.0,201.0,0.9976,3.25,0.46,9.0 +6.8,0.22,0.35,5.5,0.043,21.0,114.0,0.9938,3.3,0.53,10.7 +6.8,0.67,0.3,13.0,0.29,22.0,193.0,0.9984,3.08,0.67,9.0 +7.2,0.28,0.3,10.7,0.044,61.0,222.0,0.9972,3.14,0.5,9.1 +6.7,0.17,0.37,2.0,0.039,34.0,125.0,0.9922,3.26,0.6,10.8 +6.9,0.2,0.34,1.9,0.043,25.0,136.0,0.9935,3.31,0.6,10.1 +6.1,0.36,0.16,6.4,0.037,36.0,198.0,0.9944,3.17,0.62,9.9 +6.0,0.36,0.16,6.3,0.036,36.0,191.0,0.9942,3.17,0.62,9.8 +5.9,0.37,0.14,6.3,0.036,34.0,185.0,0.9944,3.17,0.63,9.8 +7.6,0.29,0.58,17.5,0.041,51.0,225.0,0.9997,3.16,0.66,9.5 +6.3,0.34,0.28,14.7,0.047,49.0,198.0,0.9977,3.23,0.46,9.5 +6.7,0.19,0.34,1.0,0.022,22.0,94.0,0.9912,3.23,0.57,11.1 +7.5,0.31,0.51,14.8,0.039,62.0,204.0,0.9982,3.06,0.6,9.5 +7.5,0.31,0.51,14.8,0.039,62.0,204.0,0.9982,3.06,0.6,9.5 +7.4,0.31,0.48,14.2,0.042,62.0,204.0,0.9983,3.06,0.59,9.4 +8.4,0.4,0.7,13.1,0.042,29.0,197.0,0.998,3.06,0.64,9.7 +5.9,0.34,0.22,2.4,0.03,19.0,135.0,0.9894,3.41,0.78,13.9 +6.6,0.38,0.18,1.2,0.042,20.0,84.0,0.9927,3.22,0.45,10.1 +6.4,0.33,0.28,1.1,0.038,30.0,110.0,0.9917,3.12,0.42,10.5 +5.6,0.25,0.26,3.6,0.037,18.0,115.0,0.9904,3.42,0.5,12.6 +8.6,0.27,0.46,6.1,0.032,13.0,41.0,0.993,2.89,0.34,10.9 +6.2,0.31,0.21,6.3,0.041,50.0,218.0,0.9941,3.15,0.6,10.0 +7.2,0.18,0.45,4.4,0.046,57.0,166.0,0.9943,3.13,0.62,11.2 +7.7,0.2,0.44,13.9,0.05,44.0,130.0,0.99855,3.11,0.48,10.0 +6.2,0.47,0.21,1.0,0.044,13.0,98.0,0.99345,3.14,0.46,9.2 +6.1,0.25,0.24,12.1,0.046,51.0,172.0,0.998,3.35,0.45,9.5 +8.2,0.27,0.43,1.6,0.035,31.0,128.0,0.9916,3.1,0.5,12.3 +8.2,0.27,0.43,1.6,0.035,31.0,128.0,0.9916,3.1,0.5,12.3 +6.4,0.31,0.39,7.5,0.04,57.0,213.0,0.99475,3.32,0.43,10.0 +6.0,0.39,0.26,2.7,0.038,39.0,187.0,0.99325,3.41,0.5,10.8 +6.2,0.21,0.27,1.7,0.038,41.0,150.0,0.9933,3.49,0.71,10.5 +7.7,0.42,0.31,9.2,0.048,22.0,221.0,0.9969,3.06,0.61,9.2 +7.0,0.27,0.41,18.75,0.042,34.0,157.0,1.0002,2.96,0.5,9.1 +6.2,0.21,0.27,1.7,0.038,41.0,150.0,0.9933,3.49,0.71,10.5 +7.4,0.29,0.5,1.8,0.042,35.0,127.0,0.9937,3.45,0.5,10.2 +6.6,0.29,0.44,9.0,0.053,62.0,178.0,0.99685,3.02,0.45,8.9 +6.0,0.3,0.44,1.5,0.046,15.0,182.0,0.99455,3.5,0.52,10.4 +6.9,0.31,0.34,1.6,0.032,23.0,128.0,0.9917,3.37,0.47,11.7 +6.6,0.33,0.31,1.3,0.02,29.0,89.0,0.99035,3.26,0.44,12.4 +7.8,0.3,0.4,1.8,0.028,23.0,122.0,0.9914,3.14,0.39,10.9 +6.4,0.39,0.21,1.2,0.041,35.0,136.0,0.99225,3.15,0.46,10.2 +6.4,0.24,0.31,2.8,0.038,41.0,114.0,0.99155,3.37,0.66,11.7 +7.0,0.21,0.34,8.0,0.057,19.0,101.0,0.9954,2.99,0.59,9.4 +6.4,0.16,0.31,5.3,0.043,42.0,157.0,0.99455,3.35,0.47,10.5 +6.0,0.33,0.27,0.8,0.185,12.0,188.0,0.9924,3.12,0.62,9.4 +6.5,0.23,0.33,13.8,0.042,25.0,139.0,0.99695,3.35,0.56,10.4 +6.2,0.25,0.48,10.0,0.044,78.0,240.0,0.99655,3.25,0.47,9.5 +8.8,0.28,0.45,6.0,0.022,14.0,49.0,0.9934,3.01,0.33,11.1 +6.6,0.25,0.3,14.4,0.052,40.0,183.0,0.998,3.02,0.5,9.1 +6.9,0.38,0.25,9.8,0.04,28.0,191.0,0.9971,3.28,0.61,9.2 +6.4,0.25,0.3,5.5,0.038,15.0,129.0,0.9948,3.14,0.49,9.6 +6.6,0.25,0.3,14.4,0.052,40.0,183.0,0.998,3.02,0.5,9.1 +6.9,0.38,0.25,9.8,0.04,28.0,191.0,0.9971,3.28,0.61,9.2 +7.1,0.21,0.31,3.8,0.021,40.0,142.0,0.99215,3.17,0.39,10.8 +6.4,0.25,0.3,5.5,0.038,15.0,129.0,0.9948,3.14,0.49,9.6 +6.9,0.39,0.4,4.6,0.022,5.0,19.0,0.9915,3.31,0.37,12.6 +5.8,0.2,0.3,1.5,0.031,21.0,57.0,0.99115,3.44,0.55,11.0 +7.0,0.2,0.37,2.0,0.03,26.0,136.0,0.9932,3.28,0.61,10.2 +5.9,0.26,0.25,12.5,0.034,38.0,152.0,0.9977,3.33,0.43,9.4 +7.4,0.38,0.27,7.5,0.041,24.0,160.0,0.99535,3.17,0.43,10.0 +7.4,0.2,1.66,2.1,0.022,34.0,113.0,0.99165,3.26,0.55,12.2 +7.0,0.21,0.34,8.5,0.033,31.0,253.0,0.9953,3.22,0.56,10.5 +7.2,0.29,0.4,7.6,0.024,56.0,177.0,0.9928,3.04,0.32,11.5 +6.9,0.18,0.38,8.1,0.049,44.0,176.0,0.9958,3.3,0.54,9.8 +7.3,0.3,0.42,7.35,0.025,51.0,175.0,0.9928,3.04,0.32,11.4 +7.2,0.29,0.4,7.6,0.024,56.0,177.0,0.9928,3.04,0.32,11.5 +6.9,0.2,0.5,10.0,0.036,78.0,167.0,0.9964,3.15,0.55,10.2 +6.7,0.2,0.42,14.0,0.038,83.0,160.0,0.9987,3.16,0.5,9.4 +7.0,0.21,0.34,8.5,0.033,31.0,253.0,0.9953,3.22,0.56,10.5 +5.9,0.35,0.47,2.2,0.11,14.0,138.0,0.9932,3.09,0.5,9.1 +7.1,0.28,0.44,1.8,0.032,32.0,107.0,0.9907,3.25,0.48,12.2 +5.8,0.25,0.28,11.1,0.056,45.0,175.0,0.99755,3.42,0.43,9.5 +6.8,0.22,0.37,15.2,0.051,68.0,178.0,0.99935,3.4,0.85,9.3 +7.1,0.14,0.4,1.2,0.051,55.0,136.0,0.9932,3.3,0.96,9.8 +7.1,0.13,0.4,1.2,0.047,54.0,134.0,0.9932,3.3,0.97,9.8 +6.9,0.18,0.38,8.1,0.049,44.0,176.0,0.9958,3.3,0.54,9.8 +7.0,0.2,0.38,8.1,0.05,42.0,173.0,0.99585,3.3,0.54,9.8 +6.8,0.24,0.49,19.3,0.057,55.0,247.0,1.00055,3.0,0.56,8.7 +5.0,0.44,0.04,18.6,0.039,38.0,128.0,0.9985,3.37,0.57,10.2 +6.3,0.3,0.28,5.0,0.042,36.0,168.0,0.99505,3.22,0.69,9.5 +7.2,0.27,0.42,1.6,0.05,35.0,135.0,0.992,2.94,0.46,11.0 +6.7,0.5,0.63,13.4,0.078,81.0,238.0,0.9988,3.08,0.44,9.2 +6.8,0.2,0.36,1.6,0.028,7.0,46.0,0.99175,3.21,0.6,10.9 +6.7,0.11,0.34,8.8,0.043,41.0,113.0,0.9962,3.42,0.4,9.3 +6.7,0.11,0.34,8.8,0.043,41.0,113.0,0.9962,3.42,0.4,9.3 +6.8,0.12,0.31,5.2,0.045,29.0,120.0,0.9942,3.41,0.46,9.8 +6.6,0.16,0.57,1.1,0.13,58.0,140.0,0.9927,3.12,0.39,9.3 +6.6,0.21,0.6,1.1,0.135,61.0,144.0,0.9927,3.12,0.39,9.3 +6.1,0.27,0.3,16.7,0.039,49.0,172.0,0.99985,3.4,0.45,9.4 +9.1,0.27,0.45,10.6,0.035,28.0,124.0,0.997,3.2,0.46,10.4 +6.4,0.225,0.48,2.2,0.115,29.0,104.0,0.9918,3.24,0.58,12.1 +8.3,0.14,0.45,1.5,0.039,18.0,98.0,0.99215,3.02,0.56,11.0 +7.2,0.23,0.19,13.7,0.052,47.0,197.0,0.99865,3.12,0.53,9.0 +6.9,0.22,0.37,15.0,0.053,59.0,178.0,0.9992,3.37,0.82,9.5 +8.1,0.17,0.44,14.1,0.053,43.0,145.0,1.0006,3.28,0.75,8.8 +6.0,0.395,0.0,1.4,0.042,7.0,55.0,0.99135,3.37,0.38,11.2 +7.8,0.29,0.22,9.5,0.056,44.0,213.0,0.99715,3.08,0.61,9.3 +6.9,0.22,0.37,15.0,0.053,59.0,178.0,0.9992,3.37,0.82,9.5 +8.1,0.17,0.44,14.1,0.053,43.0,145.0,1.0006,3.28,0.75,8.8 +7.2,0.23,0.19,13.7,0.052,47.0,197.0,0.99865,3.12,0.53,9.0 +7.6,0.3,0.27,10.6,0.039,31.0,119.0,0.99815,3.27,0.3,9.3 +7.7,0.34,0.28,11.0,0.04,31.0,117.0,0.99815,3.27,0.29,9.2 +7.7,0.34,0.28,11.0,0.04,31.0,117.0,0.99815,3.27,0.29,9.2 +5.8,0.34,0.16,7.0,0.037,26.0,116.0,0.9949,3.46,0.45,10.0 +7.6,0.3,0.27,10.6,0.039,31.0,119.0,0.99815,3.27,0.3,9.3 +7.7,0.34,0.28,11.0,0.04,31.0,117.0,0.99815,3.27,0.29,9.2 +5.9,0.24,0.3,2.0,0.033,28.0,92.0,0.99225,3.39,0.69,10.9 +6.4,0.46,0.08,4.9,0.046,34.0,144.0,0.99445,3.1,0.56,10.0 +5.9,0.24,0.3,2.0,0.033,28.0,92.0,0.99225,3.39,0.69,10.9 +7.4,0.32,0.27,1.4,0.049,38.0,173.0,0.99335,3.03,0.52,9.3 +7.2,0.31,0.26,7.3,0.05,37.0,157.0,0.99625,3.09,0.43,9.0 +7.8,0.42,0.23,8.8,0.054,42.0,215.0,0.9971,3.02,0.58,9.2 +6.9,0.24,0.33,12.5,0.046,47.0,153.0,0.9983,3.28,0.77,9.6 +5.4,0.18,0.24,4.8,0.041,30.0,113.0,0.99445,3.42,0.4,9.4 +6.0,0.18,0.31,1.4,0.036,14.0,75.0,0.99085,3.34,0.58,11.1 +7.8,0.27,0.58,11.2,0.036,44.0,161.0,0.9977,3.06,0.41,8.9 +6.0,0.28,0.49,6.8,0.048,61.0,222.0,0.9953,3.19,0.47,9.3 +6.8,0.39,0.35,11.6,0.044,57.0,220.0,0.99775,3.07,0.53,9.3 +6.6,0.21,0.31,11.4,0.039,46.0,165.0,0.99795,3.41,0.44,9.8 +7.3,0.32,0.34,6.6,0.032,24.0,112.0,0.99505,3.22,0.46,9.8 +7.8,0.27,0.58,11.2,0.036,44.0,161.0,0.9977,3.06,0.41,8.9 +6.4,0.31,0.26,13.2,0.046,57.0,205.0,0.9975,3.17,0.41,9.6 +6.2,0.29,0.26,13.1,0.046,55.0,204.0,0.99745,3.16,0.41,9.6 +6.0,0.39,0.17,12.0,0.046,65.0,246.0,0.9976,3.15,0.38,9.0 +6.2,0.3,0.26,13.4,0.046,57.0,206.0,0.99775,3.17,0.43,9.5 +6.0,0.28,0.49,6.8,0.048,61.0,222.0,0.9953,3.19,0.47,9.3 +6.0,0.41,0.05,1.5,0.063,17.0,120.0,0.9932,3.21,0.56,9.2 +6.4,0.35,0.28,1.1,0.055,9.0,160.0,0.99405,3.42,0.5,9.1 +6.5,0.26,0.32,16.5,0.045,44.0,166.0,1.0,3.38,0.46,9.5 +7.9,0.35,0.24,15.6,0.072,44.0,229.0,0.99785,3.03,0.59,10.5 +6.2,0.3,0.17,2.8,0.04,24.0,125.0,0.9939,3.01,0.46,9.0 +8.4,0.18,0.42,5.1,0.036,7.0,77.0,0.9939,3.16,0.52,11.7 +6.6,0.56,0.22,8.9,0.034,27.0,133.0,0.99675,3.2,0.51,9.1 +6.2,0.3,0.17,2.8,0.04,24.0,125.0,0.9939,3.01,0.46,9.0 +6.6,0.56,0.22,8.9,0.034,27.0,133.0,0.99675,3.2,0.51,9.1 +6.6,0.36,0.29,1.6,0.021,24.0,85.0,0.98965,3.41,0.61,12.4 +7.3,0.655,0.2,10.2,0.071,28.0,212.0,0.9971,2.96,0.58,9.2 +6.8,0.18,0.21,5.4,0.053,34.0,104.0,0.99445,3.3,0.43,9.4 +6.7,0.19,0.23,6.2,0.047,36.0,117.0,0.9945,3.34,0.43,9.6 +8.4,0.18,0.42,5.1,0.036,7.0,77.0,0.9939,3.16,0.52,11.7 +7.0,0.21,0.37,7.2,0.042,36.0,167.0,0.9958,3.26,0.56,9.8 +6.8,0.25,0.38,8.1,0.046,24.0,155.0,0.9956,3.33,0.59,10.2 +7.4,0.24,0.36,2.0,0.031,27.0,139.0,0.99055,3.28,0.48,12.5 +7.1,0.16,0.36,10.7,0.044,20.0,90.0,0.9959,3.16,0.44,10.9 +7.1,0.16,0.36,1.2,0.043,21.0,90.0,0.9925,3.16,0.42,11.0 +7.3,0.205,0.31,1.7,0.06,34.0,110.0,0.9963,3.72,0.69,10.5 +7.4,0.17,0.4,5.5,0.037,34.0,161.0,0.9935,3.05,0.62,11.5 +7.3,0.3,0.34,2.7,0.044,34.0,108.0,0.99105,3.36,0.53,12.8 +6.9,0.25,0.34,1.3,0.035,27.0,82.0,0.99045,3.18,0.44,12.2 +7.3,0.205,0.31,1.7,0.06,34.0,110.0,0.9963,3.72,0.69,10.5 +7.5,0.42,0.34,4.3,0.04,34.0,108.0,0.99155,3.14,0.45,12.8 +7.3,0.25,0.36,2.1,0.034,30.0,177.0,0.99085,3.25,0.4,11.9 +7.3,0.25,0.36,2.1,0.034,30.0,177.0,0.99085,3.25,0.4,11.9 +7.3,0.25,0.36,2.1,0.034,30.0,177.0,0.99085,3.25,0.4,11.9 +7.5,0.34,0.35,6.0,0.034,12.0,126.0,0.9924,3.16,0.39,12.0 +7.6,0.33,0.35,6.3,0.036,12.0,126.0,0.9924,3.16,0.39,12.0 +8.7,0.23,0.32,13.4,0.044,35.0,169.0,0.99975,3.12,0.47,8.8 +8.7,0.23,0.32,13.4,0.044,35.0,169.0,0.99975,3.12,0.47,8.8 +6.9,0.19,0.35,1.7,0.036,33.0,101.0,0.99315,3.21,0.54,10.8 +7.3,0.21,0.29,1.6,0.034,29.0,118.0,0.9917,3.3,0.5,11.0 +7.3,0.21,0.29,1.6,0.034,29.0,118.0,0.9917,3.3,0.5,11.0 +6.6,0.22,0.37,15.4,0.035,62.0,153.0,0.99845,3.02,0.4,9.3 +9.2,0.34,0.27,1.2,0.026,17.0,73.0,0.9921,3.08,0.39,10.8 +8.7,0.23,0.32,13.4,0.044,35.0,169.0,0.99975,3.12,0.47,8.8 +6.0,0.2,0.24,1.8,0.03,30.0,105.0,0.9909,3.31,0.47,11.5 +6.9,0.19,0.35,1.7,0.036,33.0,101.0,0.99315,3.21,0.54,10.8 +8.2,0.38,0.49,13.6,0.042,58.0,166.0,0.99855,3.1,0.54,9.4 +6.9,0.18,0.36,1.3,0.036,40.0,117.0,0.9934,3.27,0.95,9.5 +7.7,0.34,0.58,11.1,0.039,41.0,151.0,0.9978,3.06,0.49,8.6 +6.9,0.18,0.36,1.3,0.036,40.0,117.0,0.9934,3.27,0.95,9.5 +7.4,0.2,0.35,2.1,0.038,30.0,116.0,0.9949,3.49,0.77,10.3 +8.2,0.38,0.49,13.6,0.042,58.0,166.0,0.99855,3.1,0.54,9.4 +8.2,0.4,0.48,13.7,0.042,59.0,169.0,0.9986,3.1,0.52,9.4 +6.7,0.22,0.39,10.2,0.038,60.0,149.0,0.99725,3.17,0.54,10.0 +6.6,0.3,0.3,4.8,0.17,60.0,166.0,0.9946,3.18,0.47,9.4 +8.1,0.27,0.35,1.7,0.03,38.0,103.0,0.99255,3.22,0.63,10.4 +7.3,0.25,0.42,14.2,0.041,57.0,182.0,0.9996,3.29,0.75,9.1 +4.8,0.34,0.0,6.5,0.028,33.0,163.0,0.9939,3.36,0.61,9.9 +6.2,0.28,0.33,1.7,0.029,24.0,111.0,0.99,3.24,0.5,12.1 +4.8,0.33,0.0,6.5,0.028,34.0,163.0,0.9937,3.35,0.61,9.9 +6.1,0.27,0.33,2.2,0.021,26.0,117.0,0.9886,3.12,0.3,12.5 +6.9,0.18,0.36,1.3,0.036,40.0,117.0,0.9934,3.27,0.95,9.5 +7.8,0.18,0.46,12.6,0.042,41.0,143.0,1.0,3.24,0.76,8.5 +7.3,0.28,0.42,14.4,0.04,49.0,173.0,0.9994,3.28,0.82,9.0 +7.3,0.24,0.29,1.2,0.037,37.0,97.0,0.9926,3.19,0.7,10.1 +6.0,0.45,0.65,9.7,0.08,11.0,159.0,0.9956,3.04,0.48,9.4 +7.7,0.34,0.58,11.1,0.039,41.0,151.0,0.9978,3.06,0.49,8.6 +6.3,0.26,0.21,4.0,0.03,24.0,125.0,0.9915,3.06,0.34,10.7 +10.3,0.17,0.47,1.4,0.037,5.0,33.0,0.9939,2.89,0.28,9.6 +7.7,0.15,0.29,1.3,0.029,10.0,64.0,0.9932,3.35,0.39,10.1 +7.1,0.21,0.32,2.2,0.037,28.0,141.0,0.993,3.2,0.57,10.0 +6.9,0.36,0.34,4.2,0.018,57.0,119.0,0.9898,3.28,0.36,12.7 +6.0,0.28,0.34,1.6,0.119,33.0,104.0,0.9921,3.19,0.38,10.2 +6.2,0.16,0.54,1.4,0.126,37.0,110.0,0.9932,3.23,0.37,8.9 +6.9,0.12,0.36,2.2,0.037,18.0,111.0,0.9919,3.41,0.82,11.9 +7.1,0.21,0.32,2.2,0.037,28.0,141.0,0.993,3.2,0.57,10.0 +8.8,0.36,0.44,1.9,0.04,9.0,121.0,0.9953,3.19,0.48,9.9 +7.4,0.26,0.43,6.0,0.022,22.0,125.0,0.9928,3.13,0.55,11.5 +7.4,0.26,0.43,6.0,0.022,22.0,125.0,0.9928,3.13,0.55,11.5 +6.8,0.23,0.29,12.2,0.035,38.0,236.0,0.9976,3.35,0.52,9.8 +6.1,0.34,0.27,2.6,0.024,20.0,105.0,0.9906,3.4,0.67,12.2 +7.3,0.26,0.31,1.6,0.04,39.0,173.0,0.9918,3.19,0.51,11.4 +6.5,0.3,0.32,2.0,0.044,34.0,90.0,0.99185,3.37,0.68,11.0 +7.3,0.26,0.31,1.6,0.04,39.0,173.0,0.9918,3.19,0.51,11.4 +6.5,0.3,0.32,2.0,0.044,34.0,90.0,0.99185,3.37,0.68,11.0 +5.0,0.31,0.0,6.4,0.046,43.0,166.0,0.994,3.3,0.63,9.9 +5.8,0.26,0.18,1.2,0.031,40.0,114.0,0.9908,3.42,0.4,11.0 +5.9,0.26,0.3,1.0,0.036,38.0,114.0,0.9928,3.58,0.48,9.4 +7.0,0.31,0.29,1.4,0.037,33.0,128.0,0.9896,3.12,0.36,12.2 +5.8,0.26,0.18,1.2,0.031,40.0,114.0,0.9908,3.42,0.4,11.0 +5.6,0.19,0.39,1.1,0.043,17.0,67.0,0.9918,3.23,0.53,10.3 +6.8,0.18,0.28,8.7,0.047,52.0,242.0,0.9952,3.22,0.53,10.5 +7.0,0.29,0.26,1.6,0.044,12.0,87.0,0.9923,3.08,0.46,10.5 +6.6,0.26,0.29,1.4,0.039,13.0,67.0,0.9915,3.05,0.49,10.9 +6.8,0.18,0.28,8.5,0.047,52.0,242.0,0.9952,3.22,0.53,10.5 +6.6,0.2,0.38,7.9,0.052,30.0,145.0,0.9947,3.32,0.56,11.0 +8.0,0.29,0.29,13.2,0.046,26.0,113.0,0.9983,3.25,0.37,9.7 +6.1,0.28,0.35,12.8,0.048,63.0,229.0,0.9975,3.08,0.4,8.9 +5.9,0.31,0.3,7.7,0.047,60.0,206.0,0.995,3.2,0.39,9.6 +6.9,0.21,0.28,2.4,0.056,49.0,159.0,0.9944,3.02,0.47,8.8 +8.4,0.19,0.42,1.6,0.047,9.0,101.0,0.994,3.06,0.65,11.1 +8.3,0.27,0.45,1.3,0.048,8.0,72.0,0.9944,3.08,0.61,10.3 +7.1,0.25,0.39,2.1,0.036,30.0,124.0,0.9908,3.28,0.43,12.2 +8.0,0.23,0.37,9.6,0.054,23.0,159.0,0.99795,3.32,0.47,9.8 +7.5,0.24,0.31,13.0,0.049,46.0,217.0,0.9985,3.08,0.53,8.8 +6.3,0.33,0.2,5.8,0.04,24.0,144.0,0.99425,3.15,0.63,9.9 +6.2,0.33,0.19,5.6,0.042,22.0,143.0,0.99425,3.15,0.63,9.9 +6.3,0.34,0.19,5.8,0.041,22.0,145.0,0.9943,3.15,0.63,9.9 +5.8,0.29,0.05,0.8,0.038,11.0,30.0,0.9924,3.36,0.35,9.2 +8.0,0.32,0.26,1.2,0.05,11.5,88.0,0.9946,3.24,0.37,9.5 +5.6,0.29,0.05,0.8,0.038,11.0,30.0,0.9924,3.36,0.35,9.2 +7.4,0.13,0.39,4.7,0.042,36.0,137.0,0.995,3.36,0.56,10.3 +7.7,0.3,0.32,1.6,0.037,23.0,124.0,0.9919,2.93,0.33,11.0 +7.0,0.24,0.34,1.4,0.031,27.0,107.0,0.99,3.06,0.39,11.9 +8.6,0.18,0.4,1.1,0.04,20.0,107.0,0.9923,2.94,0.32,10.2 +7.0,0.11,0.32,4.6,0.057,59.0,144.0,0.9956,3.55,0.44,9.4 +7.7,0.32,0.62,10.6,0.036,56.0,153.0,0.9978,3.13,0.44,8.9 +7.7,0.32,0.62,10.6,0.036,56.0,153.0,0.9978,3.13,0.44,8.9 +6.5,0.26,0.27,12.9,0.044,69.0,215.0,0.9967,3.17,0.43,10.0 +7.9,0.28,0.41,2.0,0.044,50.0,152.0,0.9934,3.45,0.49,10.7 +6.3,0.27,0.23,2.9,0.047,13.0,100.0,0.9936,3.28,0.43,9.8 +5.4,0.595,0.1,2.8,0.042,26.0,80.0,0.9932,3.36,0.38,9.3 +6.7,0.25,0.33,2.9,0.057,52.0,173.0,0.9934,3.02,0.48,9.5 +6.5,0.25,0.35,12.0,0.055,47.0,179.0,0.998,3.58,0.47,10.0 +6.1,0.36,0.58,15.0,0.044,42.0,115.0,0.9978,3.15,0.51,9.0 +7.7,0.17,0.52,5.9,0.017,21.0,84.0,0.9929,3.14,0.4,11.9 +6.4,0.26,0.43,12.6,0.033,64.0,230.0,0.9974,3.08,0.38,8.9 +6.5,0.26,0.28,12.5,0.046,80.0,225.0,0.99685,3.18,0.41,10.0 +5.9,0.29,0.33,7.4,0.037,58.0,205.0,0.99495,3.26,0.41,9.6 +6.2,0.28,0.43,13.0,0.039,64.0,233.0,0.99745,3.08,0.38,8.9 +6.1,0.27,0.44,6.7,0.041,61.0,230.0,0.99505,3.12,0.4,8.9 +6.4,0.43,0.32,1.4,0.048,10.0,67.0,0.992,3.08,0.41,11.4 +6.1,0.36,0.58,15.0,0.044,42.0,115.0,0.9978,3.15,0.51,9.0 +6.2,0.35,0.29,7.3,0.044,56.0,244.0,0.9956,3.36,0.55,10.0 +7.7,0.24,0.29,15.3,0.044,39.0,194.0,0.9982,3.06,0.47,9.6 +6.2,0.34,0.28,7.5,0.034,40.0,197.0,0.99485,3.14,0.6,9.7 +6.3,0.27,0.46,11.75,0.037,61.0,212.0,0.9971,3.25,0.53,9.5 +5.4,0.415,0.19,1.6,0.039,27.0,88.0,0.99265,3.54,0.41,10.0 +6.9,0.48,0.36,3.5,0.03,31.0,135.0,0.9904,3.14,0.38,12.2 +6.5,0.18,0.33,8.0,0.051,16.0,131.0,0.9965,3.28,0.44,8.7 +6.7,0.15,0.29,5.0,0.058,28.0,105.0,0.9946,3.52,0.44,10.2 +8.2,0.345,1.0,18.2,0.047,55.0,205.0,0.99965,2.96,0.43,9.6 +8.5,0.16,0.35,1.6,0.039,24.0,147.0,0.9935,2.96,0.36,10.0 +6.8,0.705,0.25,3.2,0.048,10.0,57.0,0.996,3.36,0.52,9.5 +7.3,0.25,0.39,6.4,0.034,8.0,84.0,0.9942,3.18,0.46,11.5 +7.6,0.345,0.26,1.9,0.043,15.0,134.0,0.9936,3.08,0.38,9.5 +7.6,0.22,0.34,9.7,0.035,26.0,143.0,0.9965,3.08,0.49,9.8 +6.5,0.17,0.33,1.4,0.028,14.0,99.0,0.9928,3.23,0.55,10.1 +8.2,0.23,0.37,1.3,0.042,39.0,117.0,0.9928,2.99,0.36,10.0 +7.6,0.22,0.34,9.7,0.035,26.0,143.0,0.9965,3.08,0.49,9.8 +7.6,0.345,0.26,1.9,0.043,15.0,134.0,0.9936,3.08,0.38,9.5 +7.5,0.32,0.26,1.8,0.042,13.0,133.0,0.9938,3.07,0.38,9.5 +6.6,0.23,0.32,0.9,0.041,25.0,79.0,0.9926,3.39,0.54,10.2 +6.6,0.2,0.32,1.1,0.039,25.0,78.0,0.9926,3.39,0.54,10.2 +7.3,0.24,0.34,15.4,0.05,38.0,174.0,0.9983,3.03,0.42,9.0 +7.3,0.24,0.34,15.4,0.05,38.0,174.0,0.9983,3.03,0.42,9.0 +8.0,0.42,0.36,5.0,0.037,34.0,101.0,0.992,3.13,0.57,12.3 +7.3,0.24,0.34,15.4,0.05,38.0,174.0,0.9983,3.03,0.42,9.0 +6.1,0.19,0.25,4.0,0.023,23.0,112.0,0.9923,3.37,0.51,11.6 +5.9,0.26,0.21,12.5,0.034,36.0,152.0,0.9972,3.28,0.43,9.5 +8.3,0.23,0.43,3.2,0.035,14.0,101.0,0.9928,3.15,0.36,11.5 +6.5,0.34,0.28,1.8,0.041,43.0,188.0,0.9928,3.13,0.37,9.6 +6.8,0.22,0.35,17.5,0.039,38.0,153.0,0.9994,3.24,0.42,9.0 +6.5,0.08,0.33,1.9,0.028,23.0,93.0,0.991,3.34,0.7,12.0 +5.5,0.42,0.09,1.6,0.019,18.0,68.0,0.9906,3.33,0.51,11.4 +5.1,0.42,0.01,1.5,0.017,25.0,102.0,0.9894,3.38,0.36,12.3 +6.0,0.27,0.19,1.7,0.02,24.0,110.0,0.9898,3.32,0.47,12.6 +6.8,0.22,0.35,17.5,0.039,38.0,153.0,0.9994,3.24,0.42,9.0 +6.5,0.08,0.33,1.9,0.028,23.0,93.0,0.991,3.34,0.7,12.0 +7.1,0.13,0.38,1.8,0.046,14.0,114.0,0.9925,3.32,0.9,11.7 +7.6,0.3,0.25,4.3,0.054,22.0,111.0,0.9956,3.12,0.49,9.2 +6.6,0.13,0.3,4.9,0.058,47.0,131.0,0.9946,3.51,0.45,10.3 +6.5,0.14,0.33,7.6,0.05,53.0,189.0,0.9966,3.25,0.49,8.6 +7.7,0.28,0.33,6.7,0.037,32.0,155.0,0.9951,3.39,0.62,10.7 +6.0,0.2,0.71,1.6,0.15,10.0,54.0,0.9927,3.12,0.47,9.8 +6.0,0.19,0.71,1.5,0.152,9.0,55.0,0.9927,3.12,0.46,9.8 +7.7,0.28,0.33,6.7,0.037,32.0,155.0,0.9951,3.39,0.62,10.7 +5.1,0.39,0.21,1.7,0.027,15.0,72.0,0.9894,3.5,0.45,12.5 +5.7,0.36,0.34,4.2,0.026,21.0,77.0,0.9907,3.41,0.45,11.9 +6.9,0.19,0.33,1.6,0.043,63.0,149.0,0.9925,3.44,0.52,10.8 +6.0,0.41,0.21,1.9,0.05,29.0,122.0,0.9928,3.42,0.52,10.5 +7.4,0.28,0.3,5.3,0.054,44.0,161.0,0.9941,3.12,0.48,10.3 +7.4,0.3,0.3,5.2,0.053,45.0,163.0,0.9941,3.12,0.45,10.3 +6.9,0.19,0.33,1.6,0.043,63.0,149.0,0.9925,3.44,0.52,10.8 +7.7,0.28,0.39,8.9,0.036,8.0,117.0,0.9935,3.06,0.38,12.0 +8.6,0.16,0.38,3.4,0.04,41.0,143.0,0.9932,2.95,0.39,10.2 +8.2,0.26,0.44,1.3,0.046,7.0,69.0,0.9944,3.14,0.62,10.2 +6.5,0.25,0.27,15.2,0.049,75.0,217.0,0.9972,3.19,0.39,9.9 +7.0,0.24,0.18,1.3,0.046,9.0,62.0,0.994,3.38,0.47,10.1 +8.6,0.18,0.36,1.8,0.04,24.0,187.0,0.9956,3.25,0.55,9.5 +7.8,0.27,0.34,1.6,0.046,27.0,154.0,0.9927,3.05,0.45,10.5 +6.0,0.26,0.34,1.3,0.046,6.0,29.0,0.9924,3.29,0.63,10.4 +6.1,0.24,0.27,9.8,0.062,33.0,152.0,0.9966,3.31,0.47,9.5 +8.0,0.24,0.3,17.45,0.056,43.0,184.0,0.9997,3.05,0.5,9.2 +7.6,0.21,0.6,2.1,0.046,47.0,165.0,0.9936,3.05,0.54,10.1 +8.0,0.19,0.36,1.8,0.05,16.0,84.0,0.9936,3.15,0.45,9.8 +6.4,0.28,0.41,6.8,0.045,61.0,216.0,0.9952,3.09,0.46,9.4 +6.4,0.28,0.43,7.1,0.045,60.0,221.0,0.9952,3.09,0.45,9.4 +6.9,0.24,0.39,1.3,0.063,18.0,136.0,0.9928,3.31,0.48,10.4 +5.8,0.36,0.26,3.3,0.038,40.0,153.0,0.9911,3.34,0.55,11.3 +6.6,0.18,0.28,3.3,0.044,18.0,91.0,0.993,3.42,0.64,10.8 +5.8,0.36,0.26,3.3,0.038,40.0,153.0,0.9911,3.34,0.55,11.3 +5.1,0.52,0.06,2.7,0.052,30.0,79.0,0.9932,3.32,0.43,9.3 +6.6,0.22,0.37,1.2,0.059,45.0,199.0,0.993,3.37,0.55,10.3 +8.3,0.15,0.39,1.3,0.055,32.0,146.0,0.993,3.08,0.39,10.5 +7.6,0.16,0.44,1.4,0.043,25.0,109.0,0.9932,3.11,0.75,10.3 +7.7,0.16,0.41,1.7,0.048,60.0,173.0,0.9932,3.24,0.66,11.2 +8.3,0.16,0.48,1.7,0.057,31.0,98.0,0.9943,3.15,0.41,10.3 +6.2,0.25,0.47,11.6,0.048,62.0,210.0,0.9968,3.19,0.5,9.5 +6.1,0.16,0.27,12.6,0.064,63.0,162.0,0.9994,3.66,0.43,8.9 +7.6,0.39,0.22,2.8,0.036,19.0,113.0,0.9926,3.03,0.29,10.2 +6.8,0.37,0.47,11.2,0.071,44.0,136.0,0.9968,2.98,0.88,9.2 +7.6,0.16,0.44,1.4,0.043,25.0,109.0,0.9932,3.11,0.75,10.3 +7.1,0.18,0.42,1.4,0.045,47.0,157.0,0.9916,2.95,0.31,10.5 +8.3,0.14,0.26,1.5,0.049,56.0,189.0,0.9946,3.21,0.62,9.5 +8.6,0.2,0.42,1.5,0.041,35.0,125.0,0.9925,3.11,0.49,11.4 +8.6,0.2,0.42,1.5,0.041,35.0,125.0,0.9925,3.11,0.49,11.4 +6.8,0.19,0.32,7.05,0.019,54.0,188.0,0.9935,3.25,0.37,11.1 +7.6,0.19,0.38,10.6,0.06,48.0,174.0,0.9962,3.13,0.38,10.5 +6.8,0.34,0.74,2.8,0.088,23.0,185.0,0.9928,3.51,0.7,12.0 +6.2,0.15,0.46,1.6,0.039,38.0,123.0,0.993,3.38,0.51,9.7 +6.6,0.14,0.44,1.6,0.042,47.0,140.0,0.993,3.32,0.51,10.2 +8.0,0.55,0.17,8.2,0.04,13.0,60.0,0.9956,3.09,0.3,9.5 +7.0,0.24,0.35,1.5,0.052,51.0,128.0,0.9941,3.41,0.59,10.4 +6.3,0.6,0.44,11.0,0.05,50.0,245.0,0.9972,3.19,0.57,9.3 +7.1,0.2,0.41,2.1,0.054,24.0,166.0,0.9948,3.48,0.62,10.5 +6.2,0.34,0.29,7.6,0.047,45.0,232.0,0.9955,3.35,0.62,10.0 +7.1,0.3,0.36,6.8,0.055,44.5,234.0,0.9972,3.49,0.64,10.2 +7.1,0.3,0.36,6.8,0.055,44.5,234.0,0.9972,3.49,0.64,10.2 +7.9,0.64,0.46,10.6,0.244,33.0,227.0,0.9983,2.87,0.74,9.1 +8.8,0.17,0.38,1.8,0.04,39.0,148.0,0.9942,3.16,0.67,10.2 +7.5,0.17,0.37,1.5,0.06,18.0,75.0,0.9936,3.54,0.88,10.7 +7.1,0.47,0.24,6.0,0.044,11.0,77.0,0.9956,3.21,0.56,9.7 +7.1,0.15,0.34,5.3,0.034,33.0,104.0,0.9953,3.37,0.52,9.3 +7.5,0.17,0.34,1.4,0.035,13.0,102.0,0.9918,3.05,0.74,11.0 +8.2,0.68,0.3,2.1,0.047,17.0,138.0,0.995,3.22,0.71,10.8 +7.7,0.275,0.3,1.0,0.039,19.0,75.0,0.992,3.01,0.56,10.7 +7.3,0.49,0.32,5.2,0.043,18.0,104.0,0.9952,3.24,0.45,10.7 +7.5,0.33,0.48,19.45,0.048,55.0,243.0,1.001,2.95,0.4,8.8 +7.2,0.21,0.37,1.6,0.049,23.0,94.0,0.9924,3.16,0.48,10.9 +7.3,0.15,0.4,2.0,0.05,24.0,92.0,0.9932,3.14,0.45,10.5 +6.5,0.19,0.1,1.3,0.046,23.0,107.0,0.9937,3.29,0.45,10.0 +7.0,0.31,0.52,1.7,0.029,5.0,61.0,0.9918,3.07,0.43,10.4 +8.3,0.4,0.38,1.1,0.038,15.0,75.0,0.9934,3.03,0.43,9.2 +6.1,0.37,0.36,4.7,0.035,36.0,116.0,0.991,3.31,0.62,12.6 +7.3,0.24,0.34,7.5,0.048,29.0,152.0,0.9962,3.1,0.54,9.0 +6.9,0.21,0.81,1.1,0.137,52.0,123.0,0.9932,3.03,0.39,9.2 +7.6,0.29,0.42,1.3,0.035,18.0,86.0,0.9908,2.99,0.39,11.3 +9.4,0.29,0.55,2.2,0.05,17.0,119.0,0.9962,3.12,0.69,10.3 +7.0,0.31,0.52,1.7,0.029,5.0,61.0,0.9918,3.07,0.43,10.4 +8.6,0.26,0.41,2.2,0.049,29.0,111.0,0.9941,2.96,0.44,10.0 +7.5,0.21,0.34,1.2,0.06,26.0,111.0,0.9931,3.51,0.47,10.7 +7.2,0.51,0.24,10.0,0.093,35.0,197.0,0.9981,3.41,0.47,9.0 +7.5,0.21,0.34,1.2,0.06,26.0,111.0,0.9931,3.51,0.47,10.7 +5.3,0.3,0.2,1.1,0.077,48.0,166.0,0.9944,3.3,0.54,8.7 +8.0,0.26,0.36,2.0,0.054,30.0,121.0,0.992,3.09,0.72,11.6 +7.0,0.21,0.28,7.5,0.07,45.0,185.0,0.9966,3.34,0.55,9.4 +6.7,0.26,0.26,4.0,0.079,35.5,216.0,0.9956,3.31,0.68,9.5 +6.7,0.26,0.26,4.1,0.073,36.0,202.0,0.9956,3.3,0.67,9.5 +8.1,0.26,0.37,1.9,0.072,48.0,159.0,0.9949,3.37,0.7,10.9 +8.3,0.22,0.38,14.8,0.054,32.0,126.0,1.0002,3.22,0.5,9.7 +6.4,0.3,0.51,5.5,0.048,62.0,172.0,0.9942,3.08,0.45,9.1 +7.5,0.19,0.34,2.6,0.037,33.0,125.0,0.9923,3.1,0.49,11.1 +8.8,0.33,0.44,6.35,0.024,9.0,87.0,0.9917,2.96,0.4,12.6 +6.9,0.2,0.36,1.5,0.031,38.0,147.0,0.9931,3.35,0.56,11.0 +8.0,0.37,0.32,1.6,0.04,32.0,166.0,0.992,3.0,0.55,11.3 +8.3,0.22,0.38,14.8,0.054,32.0,126.0,1.0002,3.22,0.5,9.7 +8.2,0.29,0.33,9.1,0.036,28.0,118.0,0.9953,2.96,0.4,10.9 +7.7,0.34,0.3,8.0,0.048,25.0,192.0,0.9951,2.97,0.47,10.9 +6.2,0.55,0.45,12.0,0.049,27.0,186.0,0.9974,3.17,0.5,9.3 +6.4,0.4,0.19,3.2,0.033,28.0,124.0,0.9904,3.22,0.54,12.7 +7.5,0.28,0.33,7.7,0.048,42.0,180.0,0.9974,3.37,0.59,10.1 +7.8,0.26,0.44,1.3,0.037,43.0,132.0,0.9944,3.18,0.65,10.0 +6.5,0.26,0.34,16.3,0.051,56.0,197.0,1.0004,3.49,0.42,9.8 +6.3,0.34,0.29,6.2,0.046,29.0,227.0,0.9952,3.29,0.53,10.1 +6.8,0.15,0.33,4.7,0.059,31.0,118.0,0.9956,3.43,0.39,9.0 +6.3,0.27,0.25,5.8,0.038,52.0,155.0,0.995,3.28,0.38,9.4 +6.3,0.27,0.25,5.8,0.038,52.0,155.0,0.995,3.28,0.38,9.4 +7.4,0.2,0.37,16.95,0.048,43.0,190.0,0.9995,3.03,0.42,9.2 +6.3,0.23,0.21,5.1,0.035,29.0,142.0,0.9942,3.36,0.33,10.1 +7.3,0.31,0.69,10.2,0.041,58.0,160.0,0.9977,3.06,0.45,8.6 +5.2,0.24,0.45,3.8,0.027,21.0,128.0,0.992,3.55,0.49,11.2 +7.0,0.24,0.32,1.3,0.037,39.0,123.0,0.992,3.17,0.42,11.2 +7.4,0.2,0.37,16.95,0.048,43.0,190.0,0.9995,3.03,0.42,9.2 +7.0,0.17,0.33,4.0,0.034,17.0,127.0,0.9934,3.19,0.39,10.6 +8.3,0.21,0.58,17.1,0.049,62.0,213.0,1.0006,3.01,0.51,9.3 +7.2,0.21,0.35,14.5,0.048,35.0,178.0,0.9982,3.05,0.47,8.9 +7.1,0.21,0.4,1.2,0.069,24.0,156.0,0.9928,3.42,0.43,10.6 +8.4,0.17,0.31,6.7,0.038,29.0,132.0,0.9945,3.1,0.32,10.6 +7.4,0.24,0.31,8.4,0.045,52.0,183.0,0.9963,3.09,0.32,8.8 +5.3,0.24,0.33,1.3,0.033,25.0,97.0,0.9906,3.59,0.38,11.0 +6.5,0.28,0.26,8.8,0.04,44.0,139.0,0.9956,3.32,0.37,10.2 +6.3,0.23,0.21,5.1,0.035,29.0,142.0,0.9942,3.36,0.33,10.1 +6.5,0.29,0.25,10.6,0.039,32.0,120.0,0.9962,3.31,0.34,10.1 +5.8,0.29,0.21,2.6,0.025,12.0,120.0,0.9894,3.39,0.79,14.0 +6.3,0.27,0.25,5.8,0.038,52.0,155.0,0.995,3.28,0.38,9.4 +6.3,0.17,0.42,2.8,0.028,45.0,107.0,0.9908,3.27,0.43,11.8 +6.3,0.16,0.4,1.6,0.033,59.0,148.0,0.9914,3.44,0.53,11.4 +7.9,0.29,0.39,6.7,0.036,6.0,117.0,0.9938,3.12,0.42,10.7 +7.3,0.31,0.69,10.2,0.041,58.0,160.0,0.9977,3.06,0.45,8.6 +5.5,0.32,0.45,4.9,0.028,25.0,191.0,0.9922,3.51,0.49,11.5 +5.2,0.24,0.45,3.8,0.027,21.0,128.0,0.992,3.55,0.49,11.2 +7.2,0.37,0.15,2.0,0.029,27.0,87.0,0.9903,3.3,0.59,12.6 +6.1,0.29,0.27,1.7,0.024,13.0,76.0,0.9893,3.21,0.51,12.6 +9.2,0.22,0.4,2.4,0.054,18.0,151.0,0.9952,3.04,0.46,9.3 +7.2,0.37,0.15,2.0,0.029,27.0,87.0,0.9903,3.3,0.59,12.6 +8.0,0.18,0.37,1.3,0.04,15.0,96.0,0.9912,3.06,0.61,12.1 +6.5,0.22,0.34,12.0,0.053,55.0,177.0,0.9983,3.52,0.44,9.9 +7.4,0.18,0.4,1.6,0.047,22.0,102.0,0.9937,3.28,0.44,10.7 +6.5,0.52,0.17,1.4,0.047,5.0,26.0,0.9932,3.26,0.32,10.0 +7.0,0.15,0.38,2.2,0.047,33.0,96.0,0.9928,3.13,0.39,10.4 +5.9,0.415,0.13,1.4,0.04,11.0,64.0,0.9922,3.29,0.52,10.5 +8.1,0.45,0.34,8.3,0.037,33.0,216.0,0.9976,3.31,0.64,9.7 +5.8,0.415,0.13,1.4,0.04,11.0,64.0,0.9922,3.29,0.52,10.5 +6.4,0.5,0.16,12.9,0.042,26.0,138.0,0.9974,3.28,0.33,9.0 +6.7,0.105,0.32,12.4,0.051,34.0,106.0,0.998,3.54,0.45,9.2 +6.0,0.4,0.3,1.6,0.047,30.0,117.0,0.9931,3.17,0.48,10.1 +6.6,0.25,0.39,1.45,0.04,40.0,89.0,0.9911,3.35,0.4,11.4 +9.8,0.36,0.45,1.6,0.042,11.0,124.0,0.9944,2.93,0.46,10.8 +9.6,0.23,0.4,1.5,0.044,19.0,135.0,0.9937,2.96,0.49,10.9 +6.3,0.55,0.45,13.0,0.047,33.0,182.0,0.9974,3.2,0.46,9.2 +6.5,0.115,0.29,1.95,0.038,73.0,166.0,0.989,3.12,0.25,12.9 +6.4,0.125,0.29,5.85,0.042,24.0,99.0,0.992,3.23,0.32,12.0 +5.7,0.1,0.27,1.3,0.047,21.0,100.0,0.9928,3.27,0.46,9.5 +7.9,0.25,0.29,5.3,0.031,33.0,117.0,0.9918,3.06,0.32,11.8 +6.9,0.2,0.28,1.2,0.048,36.0,159.0,0.9936,3.19,0.43,9.1 +6.9,0.23,0.34,4.0,0.047,24.0,128.0,0.9944,3.2,0.52,9.7 +6.8,0.39,0.31,14.35,0.043,28.0,162.0,0.9988,3.17,0.54,9.1 +8.7,0.22,0.42,2.3,0.053,27.0,114.0,0.994,2.99,0.43,10.0 +7.4,0.41,0.34,4.7,0.042,19.0,127.0,0.9953,3.25,0.42,10.4 +6.7,0.25,0.34,12.85,0.048,30.0,161.0,0.9986,3.44,0.47,9.5 +6.0,0.26,0.42,5.2,0.027,70.0,178.0,0.9914,3.4,0.4,12.3 +6.1,0.31,0.37,8.4,0.031,70.0,170.0,0.9934,3.42,0.4,11.7 +9.2,0.28,0.46,3.2,0.058,39.0,133.0,0.996,3.14,0.58,9.5 +9.0,0.31,0.49,6.9,0.034,26.0,91.0,0.9937,2.99,0.34,11.5 +8.5,0.16,0.33,1.0,0.076,17.0,57.0,0.9921,3.14,0.46,10.6 +9.3,0.34,0.49,7.3,0.052,30.0,146.0,0.998,3.17,0.61,10.2 +9.2,0.28,0.46,3.2,0.058,39.0,133.0,0.996,3.14,0.58,9.5 +7.2,0.24,0.3,1.6,0.048,27.0,131.0,0.9933,3.25,0.45,10.5 +7.2,0.25,0.32,1.5,0.047,27.0,132.0,0.9933,3.26,0.44,10.5 +6.8,0.32,0.18,7.5,0.041,71.0,223.0,0.9959,3.14,0.41,8.9 +9.1,0.27,0.32,1.1,0.031,15.0,151.0,0.9936,3.03,0.41,10.6 +8.9,0.34,0.32,1.3,0.041,12.0,188.0,0.9953,3.17,0.49,9.5 +7.0,0.17,0.37,5.7,0.025,29.0,111.0,0.9938,3.2,0.49,10.8 +6.7,0.25,0.23,7.2,0.038,61.0,220.0,0.9952,3.14,0.35,9.5 +6.9,0.32,0.17,7.6,0.042,69.0,219.0,0.9959,3.13,0.4,8.9 +6.8,0.32,0.18,7.5,0.041,71.0,223.0,0.9959,3.14,0.41,8.9 +6.1,0.6,0.0,1.3,0.042,24.0,79.0,0.9937,3.31,0.38,9.4 +5.3,0.395,0.07,1.3,0.035,26.0,102.0,0.992,3.5,0.35,10.6 +7.9,0.16,0.3,4.8,0.037,37.0,171.0,0.9967,3.47,0.44,9.0 +7.6,0.33,0.36,2.1,0.034,26.0,172.0,0.9944,3.42,0.48,10.5 +7.8,0.3,0.29,16.85,0.054,23.0,135.0,0.9998,3.16,0.38,9.0 +7.8,0.3,0.29,16.85,0.054,23.0,135.0,0.9998,3.16,0.38,9.0 +5.7,0.26,0.27,4.1,0.201,73.5,189.5,0.9942,3.27,0.38,9.4 +7.8,0.3,0.29,16.85,0.054,23.0,135.0,0.9998,3.16,0.38,9.0 +7.5,0.14,0.34,1.3,0.055,50.0,153.0,0.9945,3.29,0.8,9.6 +7.8,0.3,0.29,16.85,0.054,23.0,135.0,0.9998,3.16,0.38,9.0 +6.6,0.25,0.41,7.4,0.043,29.0,151.0,0.9946,3.15,0.6,10.2 +5.7,0.26,0.27,4.1,0.201,73.5,189.5,0.9942,3.27,0.38,9.4 +8.2,0.23,0.49,0.9,0.057,15.0,73.0,0.9928,3.07,0.38,10.4 +6.0,0.24,0.32,6.3,0.03,34.0,129.0,0.9946,3.52,0.41,10.4 +6.1,0.45,0.27,0.8,0.039,13.0,82.0,0.9927,3.23,0.32,9.5 +7.4,0.23,0.43,1.4,0.044,22.0,113.0,0.9938,3.22,0.62,10.6 +7.2,0.2,0.38,1.0,0.037,21.0,74.0,0.9918,3.21,0.37,11.0 +7.5,0.14,0.34,1.3,0.055,50.0,153.0,0.9945,3.29,0.8,9.6 +7.7,0.25,0.43,4.5,0.062,20.0,115.0,0.9966,3.38,0.5,9.9 +8.2,0.61,0.45,5.4,0.03,15.0,118.0,0.9954,3.14,0.34,9.6 +7.6,0.21,0.44,1.9,0.036,10.0,119.0,0.9913,3.01,0.7,12.8 +7.4,0.22,0.33,2.0,0.045,31.0,101.0,0.9931,3.42,0.55,11.4 +7.2,0.26,0.26,12.7,0.036,49.0,214.0,0.9986,3.41,0.5,10.0 +6.4,0.25,0.41,8.6,0.042,57.0,173.0,0.9965,3.0,0.44,9.1 +6.3,0.32,0.35,11.1,0.039,29.0,198.0,0.9984,3.36,0.5,9.4 +6.8,0.25,0.29,2.0,0.042,19.0,189.0,0.9952,3.46,0.54,10.2 +9.8,0.44,0.4,2.8,0.036,35.0,167.0,0.9956,2.97,0.39,9.2 +7.2,0.2,0.25,4.5,0.044,31.0,109.0,0.9949,3.23,0.36,9.4 +8.2,0.61,0.45,5.4,0.03,15.0,118.0,0.9954,3.14,0.34,9.6 +7.5,0.42,0.45,9.1,0.029,20.0,125.0,0.996,3.12,0.36,10.1 +7.4,0.22,0.33,2.0,0.045,31.0,101.0,0.9931,3.42,0.55,11.4 +6.4,0.26,0.3,2.2,0.025,33.0,134.0,0.992,3.21,0.47,10.6 +7.9,0.46,0.32,4.1,0.033,40.0,138.0,0.9912,3.18,0.44,12.8 +6.5,0.41,0.64,11.8,0.065,65.0,225.0,0.9978,3.12,0.51,8.9 +7.5,0.32,0.37,1.2,0.048,22.0,184.0,0.9938,3.09,0.43,9.3 +6.6,0.21,0.38,2.2,0.026,40.0,104.0,0.9914,3.25,0.4,11.1 +7.1,0.21,0.3,1.4,0.037,45.0,143.0,0.9932,3.13,0.33,9.9 +7.6,0.26,0.47,1.6,0.068,5.0,55.0,0.9944,3.1,0.45,9.6 +7.6,0.21,0.44,1.9,0.036,10.0,119.0,0.9913,3.01,0.7,12.8 +6.9,0.25,0.26,5.2,0.024,36.0,135.0,0.9948,3.16,0.72,10.7 +7.1,0.26,0.32,14.45,0.074,29.0,107.0,0.998,2.96,0.42,9.2 +7.3,0.22,0.4,14.75,0.042,44.5,129.5,0.9998,3.36,0.41,9.1 +6.2,0.37,0.22,8.3,0.025,36.0,216.0,0.9964,3.33,0.6,9.6 +7.9,0.22,0.45,14.2,0.038,53.0,141.0,0.9992,3.03,0.46,9.2 +6.9,0.25,0.26,5.2,0.024,36.0,135.0,0.9948,3.16,0.72,10.7 +7.3,0.22,0.4,14.75,0.042,44.5,129.5,0.9998,3.36,0.41,9.1 +7.1,0.26,0.32,14.45,0.074,29.0,107.0,0.998,2.96,0.42,9.2 +7.4,0.25,0.37,6.9,0.02,14.0,93.0,0.9939,3.0,0.48,10.7 +6.8,0.18,0.37,1.5,0.027,37.0,93.0,0.992,3.3,0.45,10.8 +7.0,0.17,0.37,1.5,0.028,26.0,75.0,0.9922,3.3,0.46,10.8 +6.4,0.3,0.38,7.8,0.046,35.0,192.0,0.9955,3.1,0.37,9.0 +5.0,0.33,0.16,1.5,0.049,10.0,97.0,0.9917,3.48,0.44,10.7 +5.0,0.33,0.16,1.5,0.049,10.0,97.0,0.9917,3.48,0.44,10.7 +8.9,0.33,0.32,1.5,0.047,11.0,200.0,0.9954,3.19,0.46,9.4 +7.0,0.26,0.46,15.55,0.037,61.0,171.0,0.9986,2.94,0.35,8.8 +6.4,0.3,0.38,7.8,0.046,35.0,192.0,0.9955,3.1,0.37,9.0 +6.3,0.21,0.4,1.7,0.031,48.0,134.0,0.9917,3.42,0.49,11.5 +8.0,0.23,0.46,1.5,0.03,30.0,125.0,0.9907,3.23,0.47,12.5 +9.2,0.28,0.41,1.0,0.042,14.0,59.0,0.9922,2.96,0.25,10.5 +7.3,0.27,0.39,6.7,0.064,28.0,188.0,0.9958,3.29,0.3,9.7 +7.6,0.32,0.36,1.6,0.04,32.0,155.0,0.993,3.23,0.52,11.3 +5.0,0.33,0.16,1.5,0.049,10.0,97.0,0.9917,3.48,0.44,10.7 +9.7,0.24,0.45,1.2,0.033,11.0,59.0,0.9926,2.74,0.47,10.8 +8.0,0.28,0.42,7.1,0.045,41.0,169.0,0.9959,3.17,0.43,10.6 +8.2,0.37,0.36,1.0,0.034,17.0,93.0,0.9906,3.04,0.32,11.7 +8.0,0.61,0.38,12.1,0.301,24.0,220.0,0.9993,2.94,0.48,9.2 +7.2,0.26,0.44,7.1,0.027,25.0,126.0,0.993,3.02,0.34,11.1 +8.2,0.37,0.36,1.0,0.034,17.0,93.0,0.9906,3.04,0.32,11.7 +6.4,0.23,0.33,1.15,0.044,15.5,217.5,0.992,3.33,0.44,11.0 +5.9,0.4,0.32,6.0,0.034,50.0,127.0,0.992,3.51,0.58,12.5 +7.6,0.28,0.39,1.2,0.038,21.0,115.0,0.994,3.16,0.67,10.0 +8.0,0.28,0.42,7.1,0.045,41.0,169.0,0.9959,3.17,0.43,10.6 +7.2,0.23,0.39,2.3,0.033,29.0,102.0,0.9908,3.26,0.54,12.3 +6.8,0.32,0.37,3.4,0.023,19.0,87.0,0.9902,3.14,0.53,12.7 +7.2,0.23,0.39,2.3,0.033,29.0,102.0,0.9908,3.26,0.54,12.3 +6.9,0.18,0.38,6.5,0.039,20.0,110.0,0.9943,3.1,0.42,10.5 +9.4,0.26,0.53,1.2,0.047,25.0,109.0,0.9921,3.23,0.28,12.5 +8.3,0.33,0.42,1.15,0.033,18.0,96.0,0.9911,3.2,0.32,12.4 +7.3,0.29,0.3,13.0,0.043,46.0,238.0,0.9986,3.06,0.41,8.7 +7.9,0.41,0.37,4.5,0.03,40.0,114.0,0.992,3.17,0.54,12.4 +7.9,0.44,0.37,5.85,0.033,27.0,93.0,0.992,3.16,0.54,12.6 +7.7,0.39,0.3,5.2,0.037,29.0,131.0,0.9943,3.38,0.44,11.0 +7.7,0.26,0.31,1.3,0.043,47.0,155.0,0.9937,3.42,0.5,10.1 +7.8,0.32,0.31,1.7,0.036,46.0,195.0,0.993,3.03,0.48,10.5 +6.8,0.32,0.37,3.4,0.023,19.0,87.0,0.9902,3.14,0.53,12.7 +7.3,0.24,0.39,3.6,0.024,35.0,116.0,0.9928,3.17,0.51,10.9 +7.1,0.44,0.37,2.7,0.041,35.0,128.0,0.9896,3.07,0.43,13.5 +10.3,0.25,0.48,2.2,0.042,28.0,164.0,0.998,3.19,0.59,9.7 +7.9,0.14,0.28,1.8,0.041,44.0,178.0,0.9954,3.45,0.43,9.2 +7.4,0.18,0.42,2.1,0.036,33.0,187.0,0.9938,3.4,0.41,10.6 +8.1,0.43,0.42,6.6,0.033,36.0,141.0,0.9918,2.98,0.39,13.3 +7.1,0.44,0.37,2.7,0.041,35.0,128.0,0.9896,3.07,0.43,13.5 +6.4,0.26,0.22,5.1,0.037,23.0,131.0,0.9944,3.29,0.32,10.1 +8.0,0.66,0.72,17.55,0.042,62.0,233.0,0.9999,2.92,0.68,9.4 +8.0,0.2,0.4,5.2,0.055,41.0,167.0,0.9953,3.18,0.4,10.6 +7.2,0.21,0.34,1.1,0.046,25.0,80.0,0.992,3.25,0.4,11.3 +7.2,0.18,0.31,1.1,0.045,20.0,73.0,0.9925,3.32,0.4,10.8 +8.4,0.57,0.44,10.7,0.051,46.0,195.0,0.9981,3.15,0.51,10.4 +5.3,0.26,0.23,5.15,0.034,48.0,160.0,0.9952,3.82,0.51,10.5 +5.7,0.245,0.33,1.1,0.049,28.0,150.0,0.9927,3.13,0.42,9.3 +5.6,0.245,0.32,1.1,0.047,24.0,152.0,0.9927,3.12,0.42,9.3 +7.3,0.25,0.41,1.8,0.037,52.0,165.0,0.9911,3.29,0.39,12.2 +7.0,0.16,0.73,1.0,0.138,58.0,150.0,0.9936,3.08,0.3,9.2 +6.4,0.22,0.34,1.8,0.057,29.0,104.0,0.9959,3.81,0.57,10.3 +7.3,0.18,0.65,1.4,0.046,28.0,157.0,0.9946,3.33,0.62,9.4 +6.4,0.17,0.27,6.7,0.036,88.0,223.0,0.9948,3.28,0.35,10.2 +6.9,0.29,0.16,6.8,0.034,65.0,212.0,0.9955,3.08,0.39,9.0 +6.2,0.21,0.38,6.8,0.036,64.0,245.0,0.9951,3.06,0.36,9.3 +6.4,0.23,0.3,7.1,0.037,63.0,236.0,0.9952,3.06,0.34,9.2 +7.3,0.19,0.68,1.5,0.05,31.0,156.0,0.9946,3.32,0.64,9.4 +7.3,0.18,0.65,1.4,0.046,28.0,157.0,0.9946,3.33,0.62,9.4 +9.6,0.29,0.46,1.45,0.039,77.5,223.0,0.9944,2.92,0.46,9.5 +7.2,0.14,0.35,1.2,0.036,15.0,73.0,0.9938,3.46,0.39,9.9 +6.9,0.31,0.34,7.4,0.059,36.0,174.0,0.9963,3.46,0.62,11.1 +7.5,0.28,0.34,4.2,0.028,36.0,116.0,0.991,2.99,0.41,12.3 +8.0,0.22,0.42,14.6,0.044,45.0,163.0,1.0003,3.21,0.69,8.6 +7.6,0.31,0.29,10.5,0.04,21.0,145.0,0.9966,3.04,0.35,9.4 +8.4,0.35,0.56,13.8,0.048,55.0,190.0,0.9993,3.07,0.58,9.4 +8.0,0.22,0.42,14.6,0.044,45.0,163.0,1.0003,3.21,0.69,8.6 +8.1,0.5,0.47,1.1,0.037,23.0,126.0,0.9938,3.21,0.42,10.9 +7.0,0.39,0.31,5.3,0.169,32.0,162.0,0.9965,3.2,0.48,9.4 +8.1,0.5,0.47,1.1,0.037,23.0,126.0,0.9938,3.21,0.42,10.9 +8.4,0.35,0.56,13.8,0.048,55.0,190.0,0.9993,3.07,0.58,9.4 +6.2,0.22,0.27,1.5,0.064,20.0,132.0,0.9938,3.22,0.46,9.2 +8.0,0.22,0.42,14.6,0.044,45.0,163.0,1.0003,3.21,0.69,8.6 +7.6,0.31,0.29,10.5,0.04,21.0,145.0,0.9966,3.04,0.35,9.4 +7.0,0.24,0.36,4.9,0.083,10.0,133.0,0.9942,3.33,0.37,10.8 +6.6,0.27,0.3,1.9,0.025,14.0,153.0,0.9928,3.29,0.62,10.5 +7.8,0.16,0.41,1.7,0.026,29.0,140.0,0.991,3.02,0.78,12.5 +7.7,0.27,0.34,1.8,0.028,26.0,168.0,0.9911,2.99,0.48,12.1 +7.4,0.31,0.74,10.7,0.039,51.0,147.0,0.9977,3.02,0.43,8.7 +8.0,0.45,0.36,8.8,0.026,50.0,151.0,0.9927,3.07,0.25,12.7 +7.7,0.27,0.34,1.8,0.028,26.0,168.0,0.9911,2.99,0.48,12.1 +7.8,0.16,0.41,1.7,0.026,29.0,140.0,0.991,3.02,0.78,12.5 +6.6,0.16,0.29,1.8,0.05,40.0,147.0,0.9912,3.06,0.44,11.4 +8.3,0.21,0.4,1.6,0.032,35.0,110.0,0.9907,3.02,0.6,12.9 +7.2,0.32,0.33,1.4,0.029,29.0,109.0,0.9902,3.15,0.51,12.8 +6.6,0.16,0.3,1.6,0.034,15.0,78.0,0.992,3.38,0.44,11.2 +8.4,0.16,0.33,1.5,0.033,16.0,98.0,0.994,3.14,0.42,9.7 +7.5,0.23,0.32,9.2,0.038,54.0,191.0,0.9966,3.04,0.56,9.7 +6.2,0.17,0.3,1.1,0.037,14.0,79.0,0.993,3.5,0.54,10.3 +6.9,0.39,0.22,4.3,0.03,10.0,102.0,0.993,3.0,0.87,11.6 +6.9,0.41,0.22,4.2,0.031,10.0,102.0,0.993,3.0,0.86,11.6 +7.5,0.23,0.32,9.2,0.038,54.0,191.0,0.9966,3.04,0.56,9.7 +7.5,0.38,0.33,5.0,0.045,30.0,131.0,0.9942,3.32,0.44,10.9 +7.3,0.42,0.38,6.8,0.045,29.0,122.0,0.9925,3.19,0.37,12.6 +7.3,0.34,0.39,5.2,0.04,45.0,163.0,0.9925,3.3,0.47,12.4 +7.8,0.23,0.28,4.75,0.042,45.0,166.0,0.9928,2.96,0.4,11.5 +9.0,0.245,0.38,5.9,0.045,52.0,159.0,0.995,2.93,0.35,10.2 +6.9,0.2,0.4,7.7,0.032,51.0,176.0,0.9939,3.22,0.27,11.4 +7.4,0.19,0.42,6.4,0.067,39.0,212.0,0.9958,3.3,0.33,9.6 +8.2,0.2,0.36,8.1,0.035,60.0,163.0,0.9952,3.05,0.3,10.3 +8.0,0.59,0.71,17.35,0.038,61.0,228.0,1.0,2.95,0.75,9.3 +7.9,0.14,0.45,1.8,0.05,17.0,114.0,0.9948,3.33,0.49,10.7 +6.8,0.24,0.4,1.8,0.047,34.0,105.0,0.99,3.13,0.49,12.8 +9.7,0.14,0.59,1.5,0.049,23.0,142.0,0.9958,2.98,0.62,9.5 +9.2,0.15,0.68,1.6,0.046,22.0,130.0,0.9948,3.02,0.45,10.4 +9.4,0.17,0.55,1.6,0.049,14.0,94.0,0.9949,3.02,0.61,10.3 +5.2,0.365,0.08,13.5,0.041,37.0,142.0,0.997,3.46,0.39,9.9 +6.3,0.23,0.22,3.75,0.039,37.0,116.0,0.9927,3.23,0.5,10.7 +9.6,0.25,0.54,1.3,0.04,16.0,160.0,0.9938,2.94,0.43,10.5 +9.2,0.32,0.42,1.3,0.046,14.0,186.0,0.9949,3.08,0.48,9.6 +6.4,0.31,0.4,6.2,0.04,46.0,169.0,0.9953,3.15,0.46,9.3 +8.1,0.2,0.36,9.7,0.044,63.0,162.0,0.997,3.1,0.46,10.0 +7.9,0.255,0.26,2.0,0.026,40.0,190.0,0.9932,3.04,0.39,11.2 +7.0,0.15,0.34,1.4,0.039,21.0,177.0,0.9927,3.32,0.62,10.8 +6.4,0.15,0.31,1.1,0.044,25.0,96.0,0.9932,3.54,0.51,10.3 +6.4,0.25,0.53,6.6,0.038,59.0,234.0,0.9955,3.03,0.42,8.8 +7.6,0.19,0.42,1.5,0.044,6.0,114.0,0.9914,3.04,0.74,12.8 +7.3,0.43,0.37,4.6,0.028,17.0,114.0,0.991,3.23,0.43,13.2 +5.1,0.31,0.3,0.9,0.037,28.0,152.0,0.992,3.54,0.56,10.1 +6.2,0.2,0.26,1.7,0.093,40.0,161.0,0.9924,3.44,0.66,11.0 +6.9,0.16,0.35,1.3,0.043,21.0,182.0,0.9927,3.25,0.62,10.8 +7.7,0.32,0.48,2.3,0.04,28.0,114.0,0.9911,3.2,0.52,12.8 +6.5,0.22,0.72,6.8,0.042,33.0,168.0,0.9958,3.12,0.36,9.2 +6.8,0.26,0.33,1.5,0.047,44.0,167.0,0.9928,3.12,0.44,10.5 +5.2,0.37,0.33,1.2,0.028,13.0,81.0,0.9902,3.37,0.38,11.7 +8.4,0.19,0.43,2.1,0.052,20.0,104.0,0.994,2.85,0.46,9.5 +8.3,0.21,0.41,2.2,0.05,24.0,108.0,0.994,2.85,0.45,9.5 +6.8,0.15,0.32,8.8,0.058,24.0,110.0,0.9972,3.4,0.4,8.8 +7.9,0.16,0.64,17.0,0.05,69.0,210.0,1.0004,3.15,0.51,9.3 +7.8,0.21,0.39,1.8,0.034,62.0,180.0,0.991,3.09,0.75,12.6 +9.0,0.24,0.5,1.2,0.048,26.0,107.0,0.9918,3.21,0.34,12.4 +5.7,0.21,0.24,2.3,0.047,60.0,189.0,0.995,3.65,0.72,10.1 +7.8,0.29,0.36,7.0,0.042,38.0,161.0,0.9941,3.26,0.37,11.2 +6.7,0.18,0.3,6.4,0.048,40.0,251.0,0.9956,3.29,0.52,10.0 +6.7,0.18,0.3,6.4,0.048,40.0,251.0,0.9956,3.29,0.52,10.0 +8.4,0.58,0.27,12.15,0.033,37.0,116.0,0.9959,2.99,0.39,10.8 +7.2,0.16,0.32,0.8,0.04,50.0,121.0,0.9922,3.27,0.33,10.0 +7.6,0.54,0.23,2.0,0.029,13.0,151.0,0.9931,3.04,0.33,10.4 +8.4,0.58,0.27,12.15,0.033,37.0,116.0,0.9959,2.99,0.39,10.8 +6.6,0.25,0.31,12.4,0.059,52.0,181.0,0.9984,3.51,0.47,9.8 +7.3,0.23,0.37,1.9,0.041,51.0,165.0,0.9908,3.26,0.4,12.2 +7.3,0.39,0.37,1.1,0.043,36.0,113.0,0.991,3.39,0.48,12.7 +7.0,0.46,0.39,6.2,0.039,46.0,163.0,0.9928,3.21,0.35,12.2 +8.2,0.35,0.4,6.3,0.039,35.0,162.0,0.9936,3.15,0.34,11.9 +7.8,0.29,0.36,7.0,0.042,38.0,161.0,0.9941,3.26,0.37,11.2 +9.2,0.35,0.39,0.9,0.042,15.0,61.0,0.9924,2.96,0.28,10.4 +8.0,0.57,0.39,3.9,0.034,22.0,122.0,0.9917,3.29,0.67,12.8 +6.5,0.37,0.33,3.9,0.027,40.0,130.0,0.9906,3.28,0.39,12.7 +5.7,0.21,0.24,2.3,0.047,60.0,189.0,0.995,3.65,0.72,10.1 +6.7,0.18,0.3,6.4,0.048,40.0,251.0,0.9956,3.29,0.52,10.0 +7.8,0.13,0.3,1.8,0.04,43.0,179.0,0.9955,3.43,0.41,9.0 +7.6,0.19,0.41,1.1,0.04,38.0,143.0,0.9907,2.92,0.42,11.4 +7.3,0.22,0.41,15.4,0.05,55.0,191.0,1.0,3.32,0.59,8.9 +6.3,0.29,0.4,6.5,0.039,43.0,167.0,0.9953,3.15,0.44,9.3 +6.8,0.35,0.32,2.4,0.048,35.0,103.0,0.9911,3.28,0.46,12.0 +6.5,0.19,0.32,1.4,0.04,31.0,132.0,0.9922,3.36,0.54,10.8 +6.2,0.12,0.26,5.7,0.044,56.0,158.0,0.9951,3.52,0.37,10.5 +6.0,0.13,0.28,5.7,0.038,56.0,189.5,0.9948,3.59,0.43,10.6 +6.4,0.25,0.33,1.4,0.04,42.0,115.0,0.9906,3.19,0.48,11.3 +6.9,0.32,0.16,1.4,0.051,15.0,96.0,0.994,3.22,0.38,9.5 +7.6,0.19,0.41,1.1,0.04,38.0,143.0,0.9907,2.92,0.42,11.4 +6.7,0.13,0.28,1.2,0.046,35.0,140.0,0.9927,3.33,0.33,10.1 +7.0,0.14,0.41,0.9,0.037,22.0,95.0,0.9914,3.25,0.43,10.9 +7.6,0.27,0.24,3.8,0.058,19.0,115.0,0.9958,3.15,0.45,8.9 +7.3,0.22,0.41,15.4,0.05,55.0,191.0,1.0,3.32,0.59,8.9 +7.4,0.64,0.47,14.15,0.168,42.0,185.0,0.9984,2.9,0.49,9.3 +7.6,0.28,0.39,1.9,0.052,23.0,116.0,0.9941,3.25,0.4,10.4 +8.3,0.26,0.41,9.2,0.042,41.0,162.0,0.9944,3.1,0.38,12.0 +10.7,0.22,0.56,8.2,0.044,37.0,181.0,0.998,2.87,0.68,9.5 +10.7,0.22,0.56,8.2,0.044,37.0,181.0,0.998,2.87,0.68,9.5 +6.9,0.23,0.34,2.7,0.032,24.0,121.0,0.9902,3.14,0.38,12.4 +6.2,0.3,0.32,1.7,0.032,30.0,130.0,0.9911,3.28,0.41,11.2 +6.9,0.27,0.41,1.7,0.047,6.0,134.0,0.9929,3.15,0.69,11.4 +6.9,0.28,0.41,1.7,0.05,10.0,136.0,0.993,3.16,0.71,11.4 +6.9,0.28,0.3,1.6,0.047,46.0,132.0,0.9918,3.35,0.38,11.1 +6.9,0.46,0.2,0.9,0.054,5.0,126.0,0.992,3.1,0.42,10.4 +6.9,0.38,0.32,8.5,0.044,36.0,152.0,0.9932,3.38,0.35,12.0 +5.7,0.43,0.3,5.7,0.039,24.0,98.0,0.992,3.54,0.61,12.3 +6.6,0.56,0.16,3.1,0.045,28.0,92.0,0.994,3.12,0.35,9.1 +7.1,0.36,0.56,1.3,0.046,25.0,102.0,0.9923,3.24,0.33,10.5 +6.8,0.23,0.4,1.6,0.047,5.0,133.0,0.993,3.23,0.7,11.4 +6.2,0.33,0.29,1.3,0.042,26.0,138.0,0.9956,3.77,0.64,9.5 +5.6,0.49,0.13,4.5,0.039,17.0,116.0,0.9907,3.42,0.9,13.7 +6.6,0.42,0.33,2.8,0.034,15.0,85.0,0.99,3.28,0.51,13.4 +7.3,0.18,0.29,1.2,0.044,12.0,143.0,0.9918,3.2,0.48,11.3 +8.1,0.19,0.4,0.9,0.037,73.0,180.0,0.9926,3.06,0.34,10.0 +5.9,0.19,0.26,7.4,0.034,33.0,123.0,0.995,3.49,0.42,10.1 +6.2,0.16,0.47,1.4,0.029,23.0,81.0,0.99,3.26,0.42,12.2 +6.6,0.42,0.33,2.8,0.034,15.0,85.0,0.99,3.28,0.51,13.4 +5.7,0.135,0.3,4.6,0.042,19.0,101.0,0.9946,3.31,0.42,9.3 +5.6,0.49,0.13,4.5,0.039,17.0,116.0,0.9907,3.42,0.9,13.7 +6.9,0.19,0.33,1.6,0.039,27.0,98.0,0.9898,3.09,0.46,12.3 +7.3,0.18,0.29,1.2,0.044,12.0,143.0,0.9918,3.2,0.48,11.3 +7.3,0.25,0.36,13.1,0.05,35.0,200.0,0.9986,3.04,0.46,8.9 +7.3,0.25,0.36,13.1,0.05,35.0,200.0,0.9986,3.04,0.46,8.9 +7.0,0.2,0.34,5.7,0.035,32.0,83.0,0.9928,3.19,0.46,11.5 +7.3,0.25,0.36,13.1,0.05,35.0,200.0,0.9986,3.04,0.46,8.9 +6.3,0.67,0.48,12.6,0.052,57.0,222.0,0.9979,3.17,0.52,9.3 +7.4,0.4,0.29,5.4,0.044,31.0,122.0,0.994,3.3,0.5,11.1 +7.1,0.26,0.31,2.2,0.044,29.0,128.0,0.9937,3.34,0.64,10.9 +9.0,0.31,0.48,6.6,0.043,11.0,73.0,0.9938,2.9,0.38,11.6 +6.3,0.39,0.24,6.9,0.069,9.0,117.0,0.9942,3.15,0.35,10.2 +8.2,0.22,0.36,6.8,0.034,12.0,90.0,0.9944,3.01,0.38,10.5 +7.1,0.19,0.28,3.6,0.033,16.0,78.0,0.993,2.91,0.78,11.4 +7.3,0.25,0.36,13.1,0.05,35.0,200.0,0.9986,3.04,0.46,8.9 +7.9,0.2,0.34,1.2,0.04,29.0,118.0,0.9932,3.14,0.41,10.6 +7.1,0.26,0.32,5.9,0.037,39.0,97.0,0.9934,3.31,0.4,11.6 +7.0,0.2,0.34,5.7,0.035,32.0,83.0,0.9928,3.19,0.46,11.5 +6.9,0.3,0.33,4.1,0.035,26.0,155.0,0.9925,3.25,0.79,12.3 +8.1,0.29,0.49,7.1,0.042,22.0,124.0,0.9944,3.14,0.41,10.8 +5.8,0.17,0.3,1.4,0.037,55.0,130.0,0.9909,3.29,0.38,11.3 +5.9,0.415,0.02,0.8,0.038,22.0,63.0,0.9932,3.36,0.36,9.3 +6.6,0.23,0.26,1.3,0.045,16.0,128.0,0.9934,3.36,0.6,10.0 +8.6,0.55,0.35,15.55,0.057,35.5,366.5,1.0001,3.04,0.63,11.0 +6.9,0.35,0.74,1.0,0.044,18.0,132.0,0.992,3.13,0.55,10.2 +7.6,0.14,0.74,1.6,0.04,27.0,103.0,0.9916,3.07,0.4,10.8 +9.2,0.28,0.49,11.8,0.042,29.0,137.0,0.998,3.1,0.34,10.1 +6.2,0.18,0.49,4.5,0.047,17.0,90.0,0.9919,3.27,0.37,11.6 +5.3,0.165,0.24,1.1,0.051,25.0,105.0,0.9925,3.32,0.47,9.1 +9.8,0.25,0.74,10.0,0.056,36.0,225.0,0.9977,3.06,0.43,10.0 +8.1,0.29,0.49,7.1,0.042,22.0,124.0,0.9944,3.14,0.41,10.8 +6.8,0.22,0.49,0.9,0.052,26.0,128.0,0.991,3.25,0.35,11.4 +7.2,0.22,0.49,1.0,0.045,34.0,140.0,0.99,3.05,0.34,12.7 +7.4,0.25,0.49,1.1,0.042,35.0,156.0,0.9917,3.13,0.55,11.3 +8.2,0.18,0.49,1.1,0.033,28.0,81.0,0.9923,3.0,0.68,10.4 +6.1,0.22,0.49,1.5,0.051,18.0,87.0,0.9928,3.3,0.46,9.6 +7.0,0.39,0.24,1.0,0.048,8.0,119.0,0.9923,3.0,0.31,10.1 +6.1,0.22,0.49,1.5,0.051,18.0,87.0,0.9928,3.3,0.46,9.6 +6.5,0.36,0.49,2.9,0.03,16.0,94.0,0.9902,3.1,0.49,12.1 +7.1,0.29,0.49,1.2,0.031,32.0,99.0,0.9893,3.07,0.33,12.2 +7.4,0.25,0.49,1.1,0.042,35.0,156.0,0.9917,3.13,0.55,11.3 +6.9,0.23,0.24,14.2,0.053,19.0,94.0,0.9982,3.17,0.5,9.6 +8.5,0.56,0.74,17.85,0.051,51.0,243.0,1.0005,2.99,0.7,9.2 +8.2,0.18,0.49,1.1,0.033,28.0,81.0,0.9923,3.0,0.68,10.4 +6.3,0.23,0.49,7.1,0.05,67.0,210.0,0.9951,3.23,0.34,9.5 +6.1,0.25,0.49,7.6,0.052,67.0,226.0,0.9956,3.16,0.47,8.9 +7.2,0.26,0.74,13.6,0.05,56.0,162.0,0.998,3.03,0.44,8.8 +7.2,0.31,0.24,1.4,0.057,17.0,117.0,0.9928,3.16,0.35,10.5 +8.0,0.25,0.49,1.2,0.061,27.0,117.0,0.9938,3.08,0.34,9.4 +7.0,0.18,0.49,5.3,0.04,34.0,125.0,0.9914,3.24,0.4,12.2 +7.8,0.43,0.49,13.0,0.033,37.0,158.0,0.9955,3.14,0.35,11.3 +8.3,0.2,0.74,4.45,0.044,33.0,130.0,0.9924,3.25,0.42,12.2 +6.3,0.27,0.49,1.2,0.063,35.0,92.0,0.9911,3.38,0.42,12.2 +7.4,0.16,0.49,1.2,0.055,18.0,150.0,0.9917,3.23,0.47,11.2 +7.4,0.16,0.49,1.2,0.055,18.0,150.0,0.9917,3.23,0.47,11.2 +6.9,0.19,0.49,6.6,0.036,49.0,172.0,0.9932,3.2,0.27,11.5 +7.8,0.43,0.49,13.0,0.033,37.0,158.0,0.9955,3.14,0.35,11.3 +7.2,0.4,0.49,1.1,0.048,11.0,138.0,0.9929,3.01,0.42,9.3 +7.8,0.43,0.49,13.0,0.033,37.0,158.0,0.9955,3.14,0.35,11.3 +7.6,0.52,0.49,14.0,0.034,37.0,156.0,0.9958,3.14,0.38,11.8 +8.3,0.21,0.49,19.8,0.054,50.0,231.0,1.0012,2.99,0.54,9.2 +6.9,0.34,0.74,11.2,0.069,44.0,150.0,0.9968,3.0,0.81,9.2 +6.3,0.27,0.49,1.2,0.063,35.0,92.0,0.9911,3.38,0.42,12.2 +8.3,0.2,0.74,4.45,0.044,33.0,130.0,0.9924,3.25,0.42,12.2 +7.1,0.22,0.74,2.7,0.044,42.0,144.0,0.991,3.31,0.41,12.2 +7.9,0.11,0.49,4.5,0.048,27.0,133.0,0.9946,3.24,0.42,10.6 +8.5,0.17,0.74,3.6,0.05,29.0,128.0,0.9928,3.28,0.4,12.4 +6.4,0.145,0.49,5.4,0.048,54.0,164.0,0.9946,3.56,0.44,10.8 +7.4,0.16,0.49,1.2,0.055,18.0,150.0,0.9917,3.23,0.47,11.2 +8.3,0.19,0.49,1.2,0.051,11.0,137.0,0.9918,3.06,0.46,11.0 +8.0,0.44,0.49,9.1,0.031,46.0,151.0,0.9926,3.16,0.27,12.7 +7.0,0.2,0.74,0.8,0.044,19.0,163.0,0.9931,3.46,0.53,10.2 +6.9,0.19,0.49,6.6,0.036,49.0,172.0,0.9932,3.2,0.27,11.5 +7.1,0.25,0.49,3.0,0.03,30.0,96.0,0.9903,3.13,0.39,12.3 +6.5,0.24,0.24,1.6,0.046,15.0,60.0,0.9928,3.19,0.39,9.8 +7.2,0.4,0.49,1.1,0.048,11.0,138.0,0.9929,3.01,0.42,9.3 +7.6,0.52,0.49,14.0,0.034,37.0,156.0,0.9958,3.14,0.38,11.8 +7.8,0.43,0.49,13.0,0.033,37.0,158.0,0.9955,3.14,0.35,11.3 +7.8,0.21,0.49,1.35,0.052,6.0,48.0,0.9911,3.15,0.28,11.4 +7.0,0.2,0.49,5.9,0.038,39.0,128.0,0.9938,3.21,0.48,10.8 +6.9,0.25,0.24,3.6,0.057,13.0,85.0,0.9942,2.99,0.48,9.5 +7.2,0.08,0.49,1.3,0.05,18.0,148.0,0.9945,3.46,0.44,10.2 +7.1,0.85,0.49,8.7,0.028,40.0,184.0,0.9962,3.22,0.36,10.7 +7.6,0.51,0.24,1.2,0.04,10.0,104.0,0.992,3.05,0.29,10.8 +7.9,0.22,0.24,4.6,0.044,39.0,159.0,0.9927,2.99,0.28,11.5 +7.7,0.16,0.49,2.0,0.056,20.0,124.0,0.9948,3.32,0.49,10.7 +7.2,0.08,0.49,1.3,0.05,18.0,148.0,0.9945,3.46,0.44,10.2 +6.6,0.25,0.24,1.7,0.048,26.0,124.0,0.9942,3.37,0.6,10.1 +6.7,0.16,0.49,2.4,0.046,57.0,187.0,0.9952,3.62,0.81,10.4 +6.9,0.25,0.24,3.6,0.057,13.0,85.0,0.9942,2.99,0.48,9.5 +7.5,0.32,0.24,4.6,0.053,8.0,134.0,0.9958,3.14,0.5,9.1 +7.4,0.28,0.49,1.5,0.034,20.0,126.0,0.9918,2.98,0.39,10.6 +6.2,0.15,0.49,0.9,0.033,17.0,51.0,0.9932,3.3,0.7,9.4 +6.7,0.25,0.74,19.4,0.054,44.0,169.0,1.0004,3.51,0.45,9.8 +6.5,0.26,0.74,13.3,0.044,68.0,224.0,0.9972,3.18,0.54,9.5 +7.9,0.16,0.74,17.85,0.037,52.0,187.0,0.9998,2.99,0.41,9.3 +5.6,0.185,0.49,1.1,0.03,28.0,117.0,0.9918,3.55,0.45,10.3 +7.5,0.2,0.49,1.3,0.031,8.0,97.0,0.9918,3.06,0.62,11.1 +8.0,0.3,0.49,9.4,0.046,47.0,188.0,0.9964,3.14,0.48,10.0 +8.0,0.34,0.49,9.0,0.033,39.0,180.0,0.9936,3.13,0.38,12.3 +7.7,0.35,0.49,8.65,0.033,42.0,186.0,0.9931,3.14,0.38,12.4 +7.6,0.29,0.49,9.6,0.03,45.0,197.0,0.9938,3.13,0.38,12.3 +6.7,0.62,0.24,1.1,0.039,6.0,62.0,0.9934,3.41,0.32,10.4 +6.8,0.27,0.49,1.2,0.044,35.0,126.0,0.99,3.13,0.48,12.1 +7.7,0.27,0.49,1.8,0.041,23.0,86.0,0.9914,3.16,0.42,12.5 +6.7,0.51,0.24,2.1,0.043,14.0,155.0,0.9904,3.22,0.6,13.0 +7.4,0.19,0.49,9.3,0.03,26.0,132.0,0.994,2.99,0.32,11.0 +8.3,0.2,0.49,1.7,0.04,34.0,169.0,0.9938,3.05,0.37,10.1 +6.6,0.3,0.24,1.2,0.034,17.0,121.0,0.9933,3.13,0.36,9.2 +6.8,0.36,0.24,4.6,0.039,24.0,124.0,0.9909,3.27,0.34,12.6 +7.0,0.17,0.74,12.8,0.045,24.0,126.0,0.9942,3.26,0.38,12.2 +9.2,0.18,0.49,1.5,0.041,39.0,130.0,0.9945,3.04,0.49,9.8 +8.1,0.2,0.49,8.1,0.051,51.0,205.0,0.9954,3.1,0.52,11.0 +7.8,0.26,0.74,7.5,0.044,59.0,160.0,0.996,3.22,0.64,10.0 +6.8,0.21,0.49,14.5,0.06,50.0,170.0,0.9991,3.55,0.44,9.8 +7.9,0.2,0.49,1.6,0.053,15.0,144.0,0.993,3.16,0.47,10.5 +8.0,0.18,0.49,1.8,0.061,10.0,145.0,0.9942,3.23,0.48,10.0 +8.8,0.23,0.74,3.2,0.042,15.0,126.0,0.9934,3.02,0.51,11.2 +7.3,0.22,0.49,9.4,0.034,29.0,134.0,0.9939,2.99,0.32,11.0 +7.3,0.22,0.49,9.9,0.031,48.0,161.0,0.9937,3.01,0.28,11.2 +7.4,0.19,0.49,9.3,0.03,26.0,132.0,0.994,2.99,0.32,11.0 +7.3,0.155,0.49,1.3,0.039,34.0,136.0,0.9926,3.14,0.77,10.5 +8.2,0.22,0.49,9.6,0.037,53.0,154.0,0.9951,3.02,0.33,10.6 +8.2,0.24,0.49,9.3,0.038,52.0,163.0,0.9952,3.02,0.33,10.6 +8.4,0.23,0.49,7.8,0.035,22.0,95.0,0.9935,3.04,0.34,12.0 +8.3,0.2,0.49,1.7,0.04,34.0,169.0,0.9938,3.05,0.37,10.1 +8.3,0.2,0.49,1.7,0.038,38.0,167.0,0.9939,3.05,0.37,10.1 +6.6,0.3,0.24,1.2,0.034,17.0,121.0,0.9933,3.13,0.36,9.2 +6.9,0.21,0.49,1.4,0.041,15.0,164.0,0.9927,3.25,0.63,11.0 +8.0,0.25,0.49,9.0,0.044,31.0,185.0,0.998,3.34,0.49,10.0 +6.6,0.21,0.49,18.15,0.042,41.0,158.0,0.9997,3.28,0.39,8.7 +7.2,0.27,0.74,12.5,0.037,47.0,156.0,0.9981,3.04,0.44,8.7 +14.2,0.27,0.49,1.1,0.037,33.0,156.0,0.992,3.15,0.54,11.1 +7.9,0.28,0.49,7.7,0.045,48.0,195.0,0.9954,3.04,0.55,11.0 +7.4,0.27,0.49,1.1,0.037,33.0,156.0,0.992,3.15,0.54,11.1 +6.6,0.21,0.49,18.15,0.042,41.0,158.0,0.9997,3.28,0.39,8.7 +7.2,0.27,0.74,12.5,0.037,47.0,156.0,0.9981,3.04,0.44,8.7 +8.1,0.3,0.49,8.1,0.037,26.0,174.0,0.9943,3.1,0.3,11.2 +7.5,0.23,0.49,7.7,0.049,61.0,209.0,0.9941,3.14,0.3,11.1 +7.3,0.26,0.49,5.0,0.028,32.0,107.0,0.9936,3.24,0.54,10.8 +7.1,0.18,0.74,15.6,0.044,44.0,176.0,0.9996,3.38,0.67,9.0 +8.5,0.15,0.49,1.5,0.031,17.0,122.0,0.9932,3.03,0.4,10.3 +8.9,0.13,0.49,1.0,0.028,6.0,24.0,0.9926,2.91,0.32,9.9 +8.1,0.28,0.49,1.0,0.04,32.0,148.0,0.9936,3.13,0.41,10.0 +6.0,0.17,0.49,1.0,0.034,26.0,106.0,0.992,3.21,0.42,9.8 +7.3,0.26,0.49,5.0,0.028,32.0,107.0,0.9936,3.24,0.54,10.8 +7.1,0.18,0.74,15.6,0.044,44.0,176.0,0.9996,3.38,0.67,9.0 +7.1,0.53,0.24,0.8,0.029,29.0,86.0,0.993,3.16,0.32,9.1 +7.2,0.16,0.49,1.3,0.037,27.0,104.0,0.9924,3.23,0.57,10.6 +7.3,0.14,0.49,1.1,0.038,28.0,99.0,0.9928,3.2,0.72,10.6 +8.9,0.13,0.49,1.0,0.028,6.0,24.0,0.9926,2.91,0.32,9.9 +7.9,0.12,0.49,5.2,0.049,33.0,152.0,0.9952,3.18,0.47,10.6 +6.7,0.29,0.49,4.7,0.034,35.0,156.0,0.9945,3.13,0.45,9.9 +6.7,0.3,0.49,4.8,0.034,36.0,158.0,0.9945,3.12,0.45,9.9 +7.1,0.36,0.24,1.8,0.025,32.0,102.0,0.9903,3.34,0.59,12.8 +8.5,0.15,0.49,1.5,0.031,17.0,122.0,0.9932,3.03,0.4,10.3 +7.9,0.18,0.49,5.2,0.051,36.0,157.0,0.9953,3.18,0.48,10.6 +6.6,0.19,0.99,1.2,0.122,45.0,129.0,0.9936,3.09,0.31,8.7 +7.3,0.21,0.49,1.8,0.038,44.0,152.0,0.9912,3.32,0.44,12.6 +6.9,0.3,0.49,7.6,0.057,25.0,156.0,0.9962,3.43,0.63,11.0 +7.9,0.42,0.49,8.2,0.056,32.0,164.0,0.9965,3.29,0.6,11.2 +6.9,0.24,0.49,1.3,0.032,35.0,148.0,0.9932,3.45,0.57,10.7 +7.6,0.23,0.49,10.0,0.036,45.0,182.0,0.9967,3.08,0.58,9.6 +7.9,0.18,0.49,5.2,0.051,36.0,157.0,0.9953,3.18,0.48,10.6 +6.2,0.43,0.49,6.4,0.045,12.0,115.0,0.9963,3.27,0.57,9.0 +8.8,0.35,0.49,1.0,0.036,14.0,56.0,0.992,2.96,0.33,10.5 +7.8,0.3,0.74,1.8,0.033,33.0,156.0,0.991,3.29,0.52,12.8 +9.1,0.28,0.49,2.0,0.059,10.0,112.0,0.9958,3.15,0.46,10.1 +7.1,0.34,0.49,1.5,0.027,26.0,126.0,0.99,3.3,0.33,12.2 +7.8,0.3,0.74,1.8,0.033,33.0,156.0,0.991,3.29,0.52,12.8 +9.1,0.28,0.49,2.0,0.059,10.0,112.0,0.9958,3.15,0.46,10.1 +8.5,0.19,0.49,3.5,0.044,29.0,117.0,0.9938,3.14,0.51,10.1 +7.6,0.18,0.49,18.05,0.046,36.0,158.0,0.9996,3.06,0.41,9.2 +7.5,0.19,0.49,1.8,0.055,19.0,110.0,0.9946,3.33,0.44,9.9 +7.4,0.3,0.49,8.2,0.055,49.0,188.0,0.9974,3.52,0.58,9.7 +6.7,0.3,0.74,5.0,0.038,35.0,157.0,0.9945,3.21,0.46,9.9 +6.6,0.3,0.74,4.6,0.041,36.0,159.0,0.9946,3.21,0.45,9.9 +7.4,0.3,0.49,8.2,0.055,49.0,188.0,0.9974,3.52,0.58,9.7 +6.9,0.22,0.49,7.0,0.063,50.0,168.0,0.9957,3.54,0.5,10.3 +7.8,0.26,0.49,3.1,0.045,21.0,116.0,0.9931,3.16,0.35,10.3 +8.5,0.17,0.49,8.8,0.048,23.0,108.0,0.9947,2.88,0.34,10.5 +6.8,0.17,0.74,2.4,0.053,61.0,182.0,0.9953,3.63,0.76,10.5 +6.2,0.27,0.49,1.4,0.05,20.0,74.0,0.9931,3.32,0.44,9.8 +7.1,0.64,0.49,1.8,0.05,17.0,128.0,0.9946,3.31,0.58,10.6 +6.4,0.18,0.74,11.9,0.046,54.0,168.0,0.9978,3.58,0.68,10.1 +7.6,0.31,0.49,13.4,0.062,50.0,191.0,0.9989,3.22,0.53,9.0 +9.8,0.31,0.49,15.4,0.046,13.0,119.0,1.0004,3.18,0.45,9.5 +9.0,0.3,0.49,7.2,0.039,32.0,84.0,0.9938,2.94,0.32,11.5 +8.4,0.24,0.49,7.4,0.039,46.0,108.0,0.9934,3.03,0.33,11.9 +6.4,0.18,0.74,11.9,0.046,54.0,168.0,0.9978,3.58,0.68,10.1 +6.4,0.25,0.74,7.8,0.045,52.0,209.0,0.9956,3.21,0.42,9.2 +7.3,0.3,0.74,13.5,0.039,46.0,165.0,0.9982,3.02,0.4,8.7 +9.3,0.31,0.49,1.3,0.042,34.0,147.0,0.9948,3.11,0.46,9.8 +6.4,0.25,0.74,7.8,0.045,52.0,209.0,0.9956,3.21,0.42,9.2 +7.3,0.3,0.74,13.5,0.039,46.0,165.0,0.9982,3.02,0.4,8.7 +7.0,0.27,0.74,1.5,0.036,27.0,122.0,0.9926,3.35,0.48,11.2 +7.9,0.14,0.74,1.2,0.028,30.0,165.0,0.991,3.08,0.82,12.3 +6.4,0.12,0.49,6.4,0.042,49.0,161.0,0.9945,3.34,0.44,10.4 +6.8,0.21,0.74,1.2,0.047,25.0,111.0,0.9916,3.13,0.41,10.7 +8.6,0.16,0.49,7.3,0.043,9.0,63.0,0.9953,3.13,0.59,10.5 +7.0,0.29,0.49,3.8,0.047,37.0,136.0,0.9938,2.95,0.4,9.4 +6.4,0.27,0.49,7.3,0.046,53.0,206.0,0.9956,3.24,0.43,9.2 +6.6,0.55,0.01,2.7,0.034,56.0,122.0,0.9906,3.15,0.3,11.9 +6.4,0.27,0.49,7.3,0.046,53.0,206.0,0.9956,3.24,0.43,9.2 +6.3,0.24,0.74,1.4,0.172,24.0,108.0,0.9932,3.27,0.39,9.9 +6.7,0.33,0.49,1.6,0.167,20.0,94.0,0.9914,3.11,0.5,11.4 +7.0,0.29,0.49,3.8,0.047,37.0,136.0,0.9938,2.95,0.4,9.4 +8.2,0.34,0.49,8.0,0.046,55.0,223.0,0.996,3.08,0.52,10.7 +5.6,0.39,0.24,4.7,0.034,27.0,77.0,0.9906,3.28,0.36,12.7 +5.6,0.41,0.24,1.9,0.034,10.0,53.0,0.98815,3.32,0.5,13.5 +6.7,0.41,0.01,2.8,0.048,39.0,137.0,0.9942,3.24,0.35,9.5 +7.1,0.26,0.49,2.2,0.032,31.0,113.0,0.9903,3.37,0.42,12.9 +7.5,0.32,0.49,1.7,0.031,44.0,109.0,0.9906,3.07,0.46,12.5 +5.8,0.19,0.49,4.9,0.04,44.0,118.0,0.9935,3.34,0.38,9.5 +6.9,0.27,0.49,23.5,0.057,59.0,235.0,1.0024,2.98,0.47,8.6 +8.1,0.2,0.49,11.8,0.048,46.0,212.0,0.9968,3.09,0.46,10.0 +7.5,0.32,0.49,1.7,0.031,44.0,109.0,0.9906,3.07,0.46,12.5 +8.2,0.26,0.49,5.2,0.04,19.0,100.0,0.9941,3.12,0.34,10.1 +7.8,0.26,0.49,3.2,0.027,28.0,87.0,0.9919,3.03,0.32,11.3 +8.0,0.14,0.49,1.5,0.035,42.0,120.0,0.9928,3.26,0.4,10.6 +8.0,0.29,0.49,11.7,0.035,40.0,131.0,0.9958,3.14,0.34,10.8 +7.5,0.19,0.49,1.6,0.047,42.0,140.0,0.9932,3.4,0.47,10.7 +6.9,0.34,0.49,7.3,0.045,61.0,206.0,0.9957,3.09,0.4,9.0 +6.2,0.2,0.49,1.6,0.065,17.0,143.0,0.9937,3.22,0.52,9.2 +6.4,0.37,0.49,13.3,0.045,53.0,243.0,0.9982,3.14,0.48,8.5 +6.2,0.22,0.49,6.0,0.029,31.0,128.0,0.9928,3.41,0.36,11.3 +7.8,0.26,0.49,3.2,0.027,28.0,87.0,0.9919,3.03,0.32,11.3 +8.9,0.32,0.49,1.6,0.05,17.0,131.0,0.9956,3.13,0.34,9.4 +6.5,0.44,0.49,7.7,0.045,16.0,169.0,0.9957,3.11,0.37,8.7 +7.0,0.14,0.49,5.9,0.053,22.0,118.0,0.9954,3.36,0.36,9.4 +9.0,0.17,0.49,1.0,0.039,46.0,131.0,0.993,3.09,0.51,10.5 +6.4,0.26,0.49,6.4,0.037,37.0,161.0,0.9954,3.38,0.53,9.7 +9.0,0.22,0.49,10.4,0.048,52.0,195.0,0.9987,3.31,0.44,10.2 +8.9,0.32,0.49,1.6,0.05,17.0,131.0,0.9956,3.13,0.34,9.4 +8.2,0.2,0.49,3.5,0.057,14.0,108.0,0.9928,3.19,0.35,11.5 +7.8,0.15,0.24,7.7,0.047,21.0,98.0,0.9951,2.94,0.31,9.6 +6.9,0.25,0.24,1.8,0.053,6.0,121.0,0.993,3.23,0.7,11.4 +8.2,0.2,0.49,3.5,0.057,14.0,108.0,0.9928,3.19,0.35,11.5 +7.1,0.28,0.49,6.5,0.041,28.0,111.0,0.9926,3.41,0.58,12.2 +7.4,0.19,0.49,6.7,0.037,15.0,110.0,0.9938,3.2,0.38,11.0 +8.3,0.25,0.49,16.8,0.048,50.0,228.0,1.0001,3.03,0.52,9.2 +7.5,0.14,0.74,1.6,0.035,21.0,126.0,0.9933,3.26,0.45,10.2 +7.8,0.49,0.49,7.0,0.043,29.0,149.0,0.9952,3.21,0.33,10.0 +8.1,0.12,0.49,1.2,0.042,43.0,160.0,0.9934,3.13,0.48,9.7 +7.6,0.47,0.49,13.0,0.239,42.0,220.0,0.9988,2.96,0.51,9.2 +7.9,0.22,0.49,3.8,0.042,26.0,105.0,0.993,3.1,0.39,10.5 +7.8,0.49,0.49,7.0,0.043,29.0,149.0,0.9952,3.21,0.33,10.0 +6.4,0.22,0.49,7.5,0.054,42.0,151.0,0.9948,3.27,0.52,10.1 +7.3,0.19,0.49,15.55,0.058,50.0,134.0,0.9998,3.42,0.36,9.1 +8.1,0.3,0.49,12.3,0.049,50.0,144.0,0.9971,3.09,0.57,10.2 +7.3,0.19,0.49,15.55,0.058,50.0,134.0,0.9998,3.42,0.36,9.1 +7.5,0.24,0.49,9.4,0.048,50.0,149.0,0.9962,3.17,0.59,10.5 +6.4,0.22,0.49,7.5,0.054,42.0,151.0,0.9948,3.27,0.52,10.1 +7.8,0.21,0.49,1.2,0.036,20.0,99.0,0.99,3.05,0.28,12.1 +7.1,0.3,0.49,1.6,0.045,31.0,100.0,0.9942,3.4,0.59,10.2 +6.9,0.26,0.49,1.6,0.058,39.0,166.0,0.9965,3.65,0.52,9.4 +7.6,0.31,0.49,3.95,0.044,27.0,131.0,0.9912,3.08,0.67,12.8 +6.4,0.42,0.74,12.8,0.076,48.0,209.0,0.9978,3.12,0.58,9.0 +8.2,0.29,0.49,1.0,0.044,29.0,118.0,0.9928,3.24,0.36,10.9 +7.9,0.33,0.28,31.6,0.053,35.0,176.0,1.0103,3.15,0.38,8.8 +6.6,0.46,0.49,7.4,0.052,19.0,184.0,0.9956,3.11,0.38,9.0 +7.8,0.28,0.49,1.3,0.046,27.0,142.0,0.9936,3.09,0.59,10.2 +5.8,0.15,0.49,1.1,0.048,21.0,98.0,0.9929,3.19,0.48,9.2 +7.8,0.4,0.49,7.8,0.06,34.0,162.0,0.9966,3.26,0.58,11.3 +6.6,0.31,0.49,7.7,0.05,52.0,220.0,0.9964,3.12,0.45,8.8 +6.6,0.325,0.49,7.7,0.049,53.0,217.0,0.996,3.16,0.4,9.3 +6.6,0.27,0.49,7.8,0.049,62.0,217.0,0.9959,3.17,0.45,9.4 +6.7,0.26,0.49,8.3,0.047,54.0,191.0,0.9954,3.23,0.4,10.3 +6.7,0.21,0.49,1.4,0.047,30.0,114.0,0.9914,2.92,0.42,10.8 +7.9,0.33,0.28,31.6,0.053,35.0,176.0,1.0103,3.15,0.38,8.8 +8.1,0.28,0.46,15.4,0.059,32.0,177.0,1.0004,3.27,0.58,9.0 +6.5,0.13,0.37,1.0,0.036,48.0,114.0,0.9911,3.41,0.51,11.5 +7.8,0.445,0.56,1.0,0.04,8.0,84.0,0.9938,3.25,0.43,10.8 +8.8,0.39,0.34,5.9,0.055,33.0,128.0,0.9927,2.95,0.51,11.8 +7.9,0.18,0.33,1.2,0.033,20.0,72.0,0.9922,3.12,0.38,10.5 +7.1,0.31,0.38,1.2,0.036,10.0,124.0,0.9924,3.14,0.44,9.9 +7.8,0.24,0.18,6.7,0.046,33.0,160.0,0.9963,3.2,0.56,9.8 +7.0,0.35,0.3,6.5,0.028,27.0,87.0,0.9936,3.4,0.42,11.4 +6.6,0.26,0.31,4.8,0.138,41.0,168.0,0.9951,3.2,0.38,9.3 +6.6,0.27,0.31,5.3,0.137,35.0,163.0,0.9951,3.2,0.38,9.3 +6.8,0.22,0.29,8.9,0.046,82.0,188.0,0.9955,3.3,0.44,10.3 +6.2,0.27,0.32,8.8,0.047,65.0,224.0,0.9961,3.17,0.47,8.9 +7.0,0.35,0.3,6.5,0.028,27.0,87.0,0.9936,3.4,0.42,11.4 +7.3,0.23,0.37,1.8,0.032,60.0,156.0,0.992,3.11,0.35,11.1 +6.2,0.3,0.2,6.6,0.045,42.0,170.0,0.9944,3.36,0.45,10.4 +6.4,0.35,0.2,5.7,0.034,18.0,117.0,0.9944,3.33,0.43,10.1 +7.6,0.32,0.34,18.35,0.054,44.0,197.0,1.0008,3.22,0.55,9.0 +6.3,0.31,0.3,10.0,0.046,49.0,212.0,0.9962,3.74,0.55,11.9 +7.2,0.25,0.28,14.4,0.055,55.0,205.0,0.9986,3.12,0.38,9.0 +7.2,0.25,0.28,14.4,0.055,55.0,205.0,0.9986,3.12,0.38,9.0 +7.3,0.26,0.33,17.85,0.049,41.5,195.0,1.0,3.06,0.44,9.1 +7.2,0.25,0.28,14.4,0.055,55.0,205.0,0.9986,3.12,0.38,9.0 +7.4,0.26,0.37,9.4,0.047,42.0,147.0,0.9982,3.46,0.72,10.0 +7.3,0.26,0.33,17.85,0.049,41.5,195.0,1.0,3.06,0.44,9.1 +6.7,0.25,0.26,1.55,0.041,118.5,216.0,0.9949,3.55,0.63,9.4 +7.1,0.16,0.25,1.3,0.034,28.0,123.0,0.9915,3.27,0.55,11.4 +9.0,0.43,0.3,1.5,0.05,7.0,175.0,0.9951,3.11,0.45,9.7 +7.2,0.25,0.28,14.4,0.055,55.0,205.0,0.9986,3.12,0.38,9.0 +7.0,0.24,0.3,4.2,0.04,41.0,213.0,0.9927,3.28,0.49,11.8 +6.7,0.265,0.22,8.6,0.048,54.0,198.0,0.9955,3.25,0.41,10.2 +7.7,0.12,0.32,1.4,0.06,47.0,150.0,0.9952,3.37,0.42,9.2 +7.2,0.21,0.33,3.0,0.036,35.0,132.0,0.9928,3.25,0.4,11.0 +8.5,0.32,0.36,14.9,0.041,47.0,190.0,0.9982,3.08,0.31,10.0 +6.9,0.18,0.3,2.0,0.038,39.0,190.0,0.9914,3.32,0.37,12.2 +7.0,0.24,0.3,4.2,0.04,41.0,213.0,0.9927,3.28,0.49,11.8 +6.3,0.26,0.29,2.2,0.043,35.0,175.0,0.9918,3.38,0.43,11.6 +6.7,0.26,0.3,1.8,0.043,25.0,121.0,0.9944,3.44,0.61,10.2 +7.9,0.29,0.36,11.1,0.033,43.0,208.0,0.9969,3.14,0.46,10.3 +6.5,0.27,0.19,4.2,0.046,6.0,114.0,0.9955,3.25,0.35,8.6 +6.7,0.33,0.42,6.4,0.058,27.0,151.0,0.9954,3.16,0.44,9.6 +6.7,0.31,0.42,6.4,0.057,25.0,148.0,0.9955,3.16,0.45,9.6 +6.6,0.25,0.31,1.5,0.035,32.0,127.0,0.9921,3.41,0.47,11.3 +6.4,0.24,0.22,1.5,0.038,38.0,157.0,0.9934,3.41,0.55,9.9 +6.8,0.26,0.29,16.95,0.056,48.0,179.0,0.9998,3.45,0.4,9.6 +7.0,0.61,0.26,1.7,0.051,25.0,161.0,0.9946,3.36,0.6,10.6 +6.8,0.22,0.3,13.6,0.055,50.0,180.0,0.9984,3.44,0.39,9.8 +8.1,0.31,0.24,1.6,0.032,10.0,67.0,0.9924,3.08,0.47,10.5 +7.0,0.2,0.3,6.1,0.037,31.0,120.0,0.9939,3.24,0.51,10.8 +7.9,0.18,0.37,3.0,0.061,25.0,178.0,0.995,3.22,0.51,10.0 +6.6,0.34,0.27,6.2,0.059,23.0,136.0,0.9957,3.3,0.49,10.1 +6.8,0.3,0.24,6.6,0.123,35.0,116.0,0.9953,3.07,0.48,9.4 +6.5,0.18,0.34,1.6,0.04,43.0,148.0,0.9912,3.32,0.59,11.5 +7.0,0.21,0.31,6.0,0.046,29.0,108.0,0.9939,3.26,0.5,10.8 +6.8,0.27,0.32,1.5,0.044,19.0,142.0,0.9921,3.1,0.43,9.9 +9.3,0.2,0.33,1.7,0.05,28.0,178.0,0.9954,3.16,0.43,9.0 +5.8,0.23,0.27,1.8,0.043,24.0,69.0,0.9933,3.38,0.31,9.4 +7.6,0.2,0.39,2.6,0.044,30.0,180.0,0.9941,3.46,0.44,10.8 +8.2,0.15,0.48,2.7,0.052,24.0,190.0,0.995,3.5,0.45,10.9 +7.5,0.4,1.0,19.5,0.041,33.0,148.0,0.9977,3.24,0.38,12.0 +6.5,0.18,0.34,1.6,0.04,43.0,148.0,0.9912,3.32,0.59,11.5 +7.0,0.13,0.3,5.0,0.056,31.0,122.0,0.9945,3.47,0.42,10.5 +6.9,0.17,0.22,4.6,0.064,55.0,152.0,0.9952,3.29,0.37,9.3 +7.0,0.3,0.32,6.4,0.034,28.0,97.0,0.9924,3.23,0.44,11.8 +7.6,0.445,0.44,14.5,0.045,68.0,212.0,0.9986,3.48,0.36,10.0 +6.8,0.3,0.24,6.6,0.123,35.0,116.0,0.9953,3.07,0.48,9.4 +7.5,0.22,0.33,6.7,0.036,45.0,138.0,0.9939,3.2,0.68,11.4 +9.2,0.23,0.3,1.1,0.031,40.0,99.0,0.9929,2.94,0.3,10.4 +8.7,0.34,0.46,13.8,0.055,68.0,198.0,0.9988,3.36,0.37,9.5 +6.6,0.545,0.04,2.5,0.031,48.0,111.0,0.9906,3.14,0.32,11.9 +8.1,0.3,0.31,1.1,0.041,49.0,123.0,0.9914,2.99,0.45,11.1 +6.9,0.16,0.3,9.6,0.057,50.0,185.0,0.9978,3.39,0.38,9.6 +8.0,0.32,0.36,4.6,0.042,56.0,178.0,0.9928,3.29,0.47,12.0 +6.1,0.22,0.23,3.1,0.052,15.0,104.0,0.9948,3.14,0.42,8.7 +6.9,0.16,0.3,9.6,0.057,50.0,185.0,0.9978,3.39,0.38,9.6 +7.5,0.15,0.38,1.8,0.054,19.0,101.0,0.9946,3.24,0.44,10.0 +8.4,0.29,0.29,1.05,0.032,4.0,55.0,0.9908,2.91,0.32,11.4 +6.6,0.37,0.47,6.5,0.061,23.0,150.0,0.9954,3.14,0.45,9.6 +7.7,0.38,0.4,2.0,0.038,28.0,152.0,0.9906,3.18,0.32,12.9 +6.3,0.25,0.23,14.9,0.039,47.0,142.0,0.99705,3.14,0.35,9.7 +8.3,0.3,0.36,10.0,0.042,33.0,169.0,0.9982,3.23,0.51,9.3 +6.6,0.22,0.58,1.1,0.133,52.0,136.0,0.9932,3.1,0.3,9.1 +6.1,0.34,0.31,12.0,0.053,46.0,238.0,0.9977,3.16,0.48,8.6 +7.5,0.22,0.29,4.8,0.05,33.0,87.0,0.994,3.14,0.42,9.9 +8.3,0.3,0.36,10.0,0.042,33.0,169.0,0.9982,3.23,0.51,9.3 +8.0,0.27,0.24,1.2,0.044,20.0,102.0,0.9929,3.28,0.42,10.9 +6.1,0.17,0.27,1.5,0.056,45.0,135.0,0.9924,3.2,0.43,10.2 +7.4,0.18,0.3,10.4,0.045,44.0,174.0,0.9966,3.11,0.57,9.7 +6.7,0.16,0.28,2.5,0.046,40.0,153.0,0.9921,3.38,0.51,11.4 +6.1,0.255,0.44,12.3,0.045,53.0,197.0,0.9967,3.24,0.54,9.5 +7.4,0.23,0.25,1.4,0.049,43.0,141.0,0.9934,3.42,0.54,10.2 +6.4,0.16,0.28,2.2,0.042,33.0,93.0,0.9914,3.31,0.43,11.1 +6.3,0.25,0.23,14.9,0.039,47.0,142.0,0.99705,3.14,0.35,9.7 +6.7,0.27,0.25,8.0,0.053,54.0,202.0,0.9961,3.22,0.43,9.3 +6.9,0.29,0.23,8.6,0.056,56.0,215.0,0.9967,3.17,0.44,8.8 +9.6,0.21,0.28,1.2,0.038,12.0,53.0,0.9926,2.8,0.46,10.6 +6.6,0.62,0.2,8.7,0.046,81.0,224.0,0.99605,3.17,0.44,9.3 +6.4,0.28,0.19,5.4,0.042,67.0,181.0,0.99435,3.31,0.35,10.2 +8.0,0.3,0.28,5.7,0.044,31.0,124.0,0.9948,3.16,0.51,10.2 +6.4,0.17,0.27,1.5,0.037,20.0,98.0,0.9916,3.46,0.42,11.0 +7.3,0.21,0.3,10.9,0.037,18.0,112.0,0.997,3.4,0.5,9.6 +6.7,0.27,0.25,8.0,0.053,54.0,202.0,0.9961,3.22,0.43,9.3 +6.9,0.29,0.23,8.6,0.056,56.0,215.0,0.9967,3.17,0.44,8.8 +6.6,0.32,0.26,7.7,0.054,56.0,209.0,0.9961,3.17,0.45,8.8 +7.4,0.32,0.22,1.7,0.051,50.0,179.0,0.9955,3.28,0.69,8.9 +6.6,0.37,0.07,1.4,0.048,58.0,144.0,0.9922,3.17,0.38,10.0 +7.7,0.43,0.28,4.5,0.046,33.0,102.0,0.9918,3.16,0.56,12.2 +7.8,0.39,0.26,9.9,0.059,33.0,181.0,0.9955,3.04,0.42,10.9 +6.5,0.18,0.26,1.4,0.041,40.0,141.0,0.9941,3.34,0.72,9.5 +7.8,0.4,0.26,9.5,0.059,32.0,178.0,0.9955,3.04,0.43,10.9 +7.8,0.39,0.26,9.9,0.059,33.0,181.0,0.9955,3.04,0.42,10.9 +6.9,0.19,0.28,3.0,0.054,33.0,99.0,0.9924,3.16,0.4,10.8 +7.7,0.49,1.0,19.6,0.03,28.0,135.0,0.9973,3.24,0.4,12.0 +6.6,0.25,0.35,14.0,0.069,42.0,163.0,0.999,3.56,0.47,9.8 +6.5,0.18,0.26,1.4,0.041,40.0,141.0,0.9941,3.34,0.72,9.5 +6.4,0.15,0.36,1.8,0.034,43.0,150.0,0.9922,3.42,0.69,11.0 +6.4,0.15,0.36,1.8,0.034,43.0,150.0,0.9922,3.42,0.69,11.0 +8.4,0.17,0.31,5.4,0.052,47.0,150.0,0.9953,3.24,0.38,9.8 +6.1,0.32,0.37,1.8,0.051,13.0,200.0,0.9945,3.49,0.44,10.5 +8.5,0.21,0.26,9.25,0.034,73.0,142.0,0.9945,3.05,0.37,11.4 +8.7,0.45,0.4,1.5,0.067,17.0,100.0,0.9957,3.27,0.57,10.1 +6.7,0.24,0.29,6.8,0.038,54.0,127.0,0.9932,3.33,0.46,11.6 +8.5,0.21,0.26,9.25,0.034,73.0,142.0,0.9945,3.05,0.37,11.4 +7.4,0.33,0.26,2.6,0.04,29.0,115.0,0.9913,3.07,0.52,11.8 +7.2,0.26,0.3,2.1,0.033,50.0,158.0,0.9909,3.33,0.43,12.1 +8.2,0.36,0.29,7.6,0.035,37.0,122.0,0.9939,3.16,0.34,12.0 +7.8,0.2,0.24,1.6,0.026,26.0,189.0,0.991,3.08,0.74,12.1 +9.4,0.16,0.3,1.4,0.042,26.0,176.0,0.9954,3.15,0.46,9.1 +6.4,0.33,0.24,1.6,0.054,25.0,117.0,0.9943,3.36,0.5,9.3 +7.8,0.22,0.36,1.4,0.056,21.0,153.0,0.993,3.2,0.53,10.4 +7.4,0.35,0.31,17.95,0.062,42.0,187.0,1.0002,3.27,0.64,9.1 +6.6,0.37,0.24,2.0,0.064,23.0,120.0,0.9946,3.32,0.54,9.4 +6.7,0.37,0.41,6.3,0.061,22.0,149.0,0.9953,3.16,0.47,9.6 +7.1,0.37,0.32,1.4,0.037,27.0,126.0,0.9918,3.19,0.62,12.0 +6.9,0.25,0.27,9.05,0.039,37.0,128.0,0.9936,3.27,0.34,11.3 +6.8,0.23,0.29,15.4,0.073,56.0,173.0,0.9984,3.06,0.41,8.7 +6.4,0.26,0.21,7.1,0.04,35.0,162.0,0.9956,3.39,0.58,9.9 +7.6,0.3,0.22,10.2,0.049,57.0,191.0,0.9966,3.08,0.4,9.3 +9.4,0.16,0.23,1.6,0.042,14.0,67.0,0.9942,3.07,0.32,9.5 +6.8,0.23,0.29,15.4,0.073,56.0,173.0,0.9984,3.06,0.41,8.7 +6.4,0.26,0.21,7.1,0.04,35.0,162.0,0.9956,3.39,0.58,9.9 +7.6,0.3,0.22,10.2,0.049,57.0,191.0,0.9966,3.08,0.4,9.3 +7.5,0.33,0.39,12.4,0.065,29.0,119.0,0.9974,3.16,0.39,9.4 +7.6,0.38,0.2,3.4,0.046,9.0,116.0,0.9944,3.15,0.41,9.4 +8.8,0.2,0.43,15.0,0.053,60.0,184.0,1.0008,3.28,0.79,8.8 +7.5,0.33,0.39,12.4,0.065,29.0,119.0,0.9974,3.16,0.39,9.4 +8.8,0.2,0.43,15.0,0.053,60.0,184.0,1.0008,3.28,0.79,8.8 +6.6,0.36,0.21,1.5,0.049,39.0,184.0,0.9928,3.18,0.41,9.9 +7.6,0.38,0.2,3.4,0.046,9.0,116.0,0.9944,3.15,0.41,9.4 +5.6,0.46,0.24,4.8,0.042,24.0,72.0,0.9908,3.29,0.37,12.6 +7.2,0.15,0.38,1.2,0.038,18.0,110.0,0.9917,3.19,0.43,11.1 +8.2,0.42,0.29,4.1,0.03,31.0,100.0,0.9911,3.0,0.32,12.8 +6.8,0.3,0.35,2.8,0.038,10.0,164.0,0.9912,3.09,0.53,12.0 +6.7,0.27,0.3,13.9,0.029,34.0,131.0,0.9953,3.36,0.5,12.0 +7.2,0.5,0.0,0.8,0.034,46.0,114.0,0.9932,3.19,0.34,9.2 +6.0,0.26,0.29,1.0,0.032,27.0,96.0,0.9896,3.38,0.44,12.3 +6.8,0.33,0.28,1.2,0.032,38.0,131.0,0.9889,3.19,0.41,13.0 +6.8,0.3,0.35,2.8,0.038,10.0,164.0,0.9912,3.09,0.53,12.0 +7.4,0.29,0.31,1.7,0.035,23.0,110.0,0.9926,3.07,0.38,10.9 +8.2,0.42,0.29,4.1,0.03,31.0,100.0,0.9911,3.0,0.32,12.8 +7.3,0.19,0.24,6.3,0.054,34.0,231.0,0.9964,3.36,0.54,10.0 +6.5,0.32,0.12,11.5,0.033,35.0,165.0,0.9974,3.22,0.32,9.0 +7.1,0.32,0.4,1.5,0.034,13.0,84.0,0.9944,3.42,0.6,10.4 +6.5,0.32,0.12,11.5,0.033,35.0,165.0,0.9974,3.22,0.32,9.0 +7.3,0.19,0.24,6.3,0.054,34.0,231.0,0.9964,3.36,0.54,10.0 +7.3,0.17,0.23,6.3,0.051,35.0,240.0,0.9963,3.36,0.54,10.0 +7.7,0.44,0.24,11.2,0.031,41.0,167.0,0.9948,3.12,0.43,11.3 +7.7,0.44,0.24,11.2,0.031,41.0,167.0,0.9948,3.12,0.43,11.3 +7.4,0.49,0.24,15.1,0.03,34.0,153.0,0.9953,3.13,0.51,12.0 +7.7,0.44,0.24,11.2,0.031,41.0,167.0,0.9948,3.12,0.43,11.3 +7.4,0.49,0.24,15.1,0.03,34.0,153.0,0.9953,3.13,0.51,12.0 +6.4,0.21,0.3,5.6,0.044,43.0,160.0,0.9949,3.6,0.41,10.6 +8.0,0.55,0.42,12.6,0.211,37.0,213.0,0.9988,2.99,0.56,9.3 +7.0,0.19,0.23,5.7,0.123,27.0,104.0,0.9954,3.04,0.54,9.4 +7.2,0.24,0.29,2.2,0.037,37.0,102.0,0.992,3.27,0.64,11.0 +6.5,0.34,0.36,11.0,0.052,53.0,247.0,0.9984,3.44,0.55,9.3 +7.0,0.19,0.23,5.7,0.123,27.0,104.0,0.9954,3.04,0.54,9.4 +6.9,0.18,0.33,1.0,0.054,24.0,164.0,0.9926,3.42,0.51,10.5 +7.2,0.24,0.29,2.2,0.037,37.0,102.0,0.992,3.27,0.64,11.0 +8.2,0.18,0.31,11.8,0.039,96.0,249.0,0.9976,3.07,0.52,9.5 +8.3,0.28,0.45,7.8,0.059,32.0,139.0,0.9972,3.33,0.77,11.2 +6.1,0.34,0.46,4.7,0.029,21.0,94.0,0.991,3.29,0.62,12.3 +7.4,0.44,0.2,11.5,0.049,44.0,157.0,0.998,3.27,0.44,9.0 +7.6,0.26,0.58,7.9,0.041,62.0,180.0,0.9966,3.07,0.38,9.0 +7.4,0.44,0.2,11.5,0.049,44.0,157.0,0.998,3.27,0.44,9.0 +8.7,0.49,0.57,17.8,0.052,34.0,243.0,1.0007,2.98,0.82,9.0 +7.0,0.24,0.25,1.7,0.042,48.0,189.0,0.992,3.25,0.42,11.4 +7.1,0.25,0.25,1.6,0.046,50.0,181.0,0.9925,3.2,0.42,11.0 +6.1,0.34,0.46,4.7,0.029,21.0,94.0,0.991,3.29,0.62,12.3 +6.4,0.18,0.31,1.6,0.049,36.0,127.0,0.9934,3.6,0.67,10.4 +8.3,0.27,0.39,2.4,0.058,16.0,107.0,0.9955,3.28,0.59,10.3 +6.8,0.24,0.35,6.4,0.048,44.0,172.0,0.9944,3.29,0.55,10.5 +8.0,0.22,0.28,14.0,0.053,83.0,197.0,0.9981,3.14,0.45,9.8 +10.0,0.91,0.42,1.6,0.056,34.0,181.0,0.9968,3.11,0.46,10.0 +8.9,0.34,0.34,1.6,0.056,13.0,176.0,0.9946,3.14,0.47,9.7 +8.9,0.33,0.34,1.4,0.056,14.0,171.0,0.9946,3.13,0.47,9.7 +8.0,0.22,0.28,14.0,0.053,83.0,197.0,0.9981,3.14,0.45,9.8 +6.7,0.18,0.19,4.7,0.046,57.0,161.0,0.9946,3.32,0.66,10.5 +7.8,0.2,0.28,10.2,0.054,78.0,186.0,0.997,3.14,0.46,10.0 +7.3,0.13,0.31,2.3,0.054,22.0,104.0,0.9924,3.24,0.92,11.5 +6.6,0.28,0.3,7.8,0.049,57.0,202.0,0.9958,3.24,0.39,9.5 +7.1,0.25,0.3,2.4,0.042,25.0,122.0,0.994,3.43,0.61,10.5 +7.6,0.36,0.44,8.3,0.255,28.0,142.0,0.9958,3.12,0.43,10.2 +7.6,0.27,0.25,13.9,0.05,45.0,199.0,0.9984,3.34,0.5,9.8 +6.9,0.37,0.28,13.8,0.031,34.0,137.0,0.9948,3.1,0.37,11.6 +7.4,0.21,0.27,7.3,0.031,41.0,144.0,0.9932,3.15,0.38,11.8 +8.2,0.18,0.28,8.5,0.035,41.0,140.0,0.9952,3.04,0.37,10.1 +6.3,0.19,0.21,1.8,0.049,35.0,163.0,0.9924,3.31,0.5,10.3 +7.0,0.21,0.22,5.1,0.048,38.0,168.0,0.9945,3.34,0.49,10.4 +5.8,0.33,0.2,16.05,0.047,26.0,166.0,0.9976,3.09,0.46,8.9 +5.8,0.33,0.2,16.05,0.047,26.0,166.0,0.9976,3.09,0.46,8.9 +7.9,0.29,0.31,7.35,0.034,37.0,154.0,0.9938,3.06,0.31,10.8 +6.6,0.31,0.38,16.05,0.058,16.0,165.0,0.9997,3.38,0.6,9.2 +8.0,0.19,0.3,2.0,0.053,48.0,140.0,0.994,3.18,0.49,9.6 +8.0,0.2,0.36,1.2,0.032,21.0,78.0,0.9921,3.08,0.37,10.4 +8.0,0.25,0.26,14.0,0.043,41.0,248.0,0.9986,3.03,0.57,8.7 +7.2,0.2,0.61,16.2,0.043,14.0,103.0,0.9987,3.06,0.36,9.2 +7.7,0.3,0.42,14.3,0.045,45.0,213.0,0.9991,3.18,0.63,9.2 +7.2,0.2,0.61,16.2,0.043,14.0,103.0,0.9987,3.06,0.36,9.2 +7.7,0.3,0.42,14.3,0.045,45.0,213.0,0.9991,3.18,0.63,9.2 +7.7,0.3,0.42,14.3,0.045,45.0,213.0,0.9991,3.18,0.63,9.2 +6.4,0.22,0.32,7.9,0.029,34.0,124.0,0.9948,3.4,0.39,10.2 +7.2,0.2,0.61,16.2,0.043,14.0,103.0,0.9987,3.06,0.36,9.2 +7.0,0.53,0.02,1.0,0.036,39.0,107.0,0.993,3.2,0.32,9.0 +7.3,0.24,0.41,13.6,0.05,41.0,178.0,0.9988,3.37,0.43,9.7 +7.2,0.24,0.4,17.85,0.049,50.0,185.0,1.0,3.34,0.42,9.6 +7.6,0.15,0.4,1.3,0.036,24.0,112.0,0.9932,3.14,0.76,10.0 +7.7,0.3,0.42,14.3,0.045,45.0,213.0,0.9991,3.18,0.63,9.2 +7.6,0.33,0.41,13.7,0.045,44.0,197.0,0.9989,3.18,0.64,9.1 +6.8,0.24,0.31,18.3,0.046,40.0,142.0,1.0,3.3,0.41,8.7 +6.8,0.24,0.31,18.3,0.046,40.0,142.0,1.0,3.3,0.41,8.7 +6.8,0.35,0.44,6.5,0.056,31.0,161.0,0.9952,3.14,0.44,9.5 +7.9,0.26,0.33,10.3,0.039,73.0,212.0,0.9969,2.93,0.49,9.5 +7.5,0.29,0.67,8.1,0.037,53.0,166.0,0.9966,2.9,0.41,8.9 +7.5,0.29,0.67,8.1,0.037,53.0,166.0,0.9966,2.9,0.41,8.9 +7.2,0.31,0.41,8.6,0.053,15.0,89.0,0.9976,3.29,0.64,9.9 +6.7,0.44,0.31,1.9,0.03,41.0,104.0,0.99,3.29,0.62,12.6 +10.0,0.23,0.27,14.1,0.033,45.0,166.0,0.9988,2.72,0.43,9.7 +7.4,0.21,0.3,7.9,0.039,14.0,118.0,0.9942,2.96,0.34,10.4 +8.8,0.23,0.35,10.7,0.04,26.0,183.0,0.9984,2.93,0.49,9.1 +7.8,0.34,0.27,1.2,0.04,25.0,106.0,0.9932,3.01,0.55,10.4 +7.9,0.26,0.33,10.3,0.039,73.0,212.0,0.9969,2.93,0.49,9.5 +7.5,0.29,0.67,8.1,0.037,53.0,166.0,0.9966,2.9,0.41,8.9 +6.0,0.28,0.35,1.9,0.037,16.0,120.0,0.9933,3.16,0.69,10.6 +7.9,0.37,0.3,2.7,0.029,64.0,158.0,0.9916,3.12,0.59,12.0 +7.2,0.36,0.36,5.7,0.038,26.0,98.0,0.9914,2.93,0.59,12.5 +7.6,0.13,0.34,9.3,0.062,40.0,126.0,0.9966,3.21,0.39,9.6 +6.6,0.25,0.36,8.1,0.045,54.0,180.0,0.9958,3.08,0.42,9.2 +7.1,0.18,0.26,1.3,0.041,20.0,71.0,0.9926,3.04,0.74,9.9 +7.9,0.3,0.27,8.5,0.036,20.0,112.0,0.9939,2.96,0.46,11.7 +8.3,0.23,0.3,2.1,0.049,21.0,153.0,0.9953,3.09,0.5,9.6 +6.8,0.43,0.3,3.5,0.033,27.0,135.0,0.9906,3.0,0.37,12.0 +7.2,0.36,0.36,5.7,0.038,26.0,98.0,0.9914,2.93,0.59,12.5 +6.6,0.25,0.36,8.1,0.045,54.0,180.0,0.9958,3.08,0.42,9.2 +7.1,0.18,0.26,1.3,0.041,20.0,71.0,0.9926,3.04,0.74,9.9 +6.6,0.35,0.29,14.4,0.044,54.0,177.0,0.9991,3.17,0.58,8.9 +7.3,0.22,0.5,13.7,0.049,56.0,189.0,0.9994,3.24,0.66,9.0 +8.1,0.26,0.33,11.1,0.052,52.5,158.0,0.9976,3.03,0.49,10.2 +7.6,0.13,0.34,9.3,0.062,40.0,126.0,0.9966,3.21,0.39,9.6 +7.0,0.12,0.19,4.9,0.055,27.0,127.0,0.9953,3.29,0.41,9.4 +8.2,0.37,0.27,1.7,0.028,10.0,59.0,0.9923,2.97,0.48,10.4 +7.6,0.26,0.36,1.6,0.032,6.0,106.0,0.993,3.15,0.4,10.4 +6.3,0.2,0.58,1.4,0.204,15.0,97.0,0.9931,3.16,0.43,10.0 +6.3,0.22,0.57,1.4,0.208,14.0,96.0,0.9932,3.16,0.43,10.0 +7.1,0.25,0.28,1.6,0.052,46.0,169.0,0.9926,3.05,0.41,10.5 +7.0,0.27,0.32,6.8,0.047,47.0,193.0,0.9938,3.23,0.39,11.4 +8.8,0.34,0.33,9.7,0.036,46.0,172.0,0.9966,3.08,0.4,10.2 +9.2,0.27,0.34,10.5,0.043,49.0,228.0,0.9974,3.04,0.41,10.4 +7.1,0.49,0.22,2.0,0.047,146.5,307.5,0.9924,3.24,0.37,11.0 +9.2,0.71,0.23,6.2,0.042,15.0,93.0,0.9948,2.89,0.34,10.1 +7.2,0.47,0.65,8.3,0.083,27.0,182.0,0.9964,3.0,0.35,9.2 +6.8,0.28,0.36,1.6,0.04,25.0,87.0,0.9924,3.23,0.66,10.3 +8.8,0.34,0.33,9.7,0.036,46.0,172.0,0.9966,3.08,0.4,10.2 +9.2,0.27,0.34,10.5,0.043,49.0,228.0,0.9974,3.04,0.41,10.4 +7.3,0.13,0.27,4.6,0.08,34.0,172.0,0.9938,3.23,0.39,11.1 +7.2,0.16,0.35,1.2,0.031,27.0,84.0,0.9928,3.33,0.34,9.9 +6.8,0.31,0.32,7.6,0.052,35.0,143.0,0.9959,3.14,0.38,9.0 +8.3,0.36,0.57,15.0,0.052,35.0,256.0,1.0001,2.93,0.64,8.6 +6.8,0.31,0.32,7.6,0.052,35.0,143.0,0.9959,3.14,0.38,9.0 +8.3,0.36,0.57,15.0,0.052,35.0,256.0,1.0001,2.93,0.64,8.6 +6.3,0.25,0.44,11.6,0.041,48.0,195.0,0.9968,3.18,0.52,9.5 +6.0,0.45,0.42,1.1,0.051,61.0,197.0,0.9932,3.02,0.4,9.0 +8.1,0.26,0.3,7.8,0.049,39.0,152.0,0.9954,2.99,0.58,10.0 +6.4,0.22,0.32,12.0,0.066,57.0,158.0,0.9992,3.6,0.43,9.0 +5.7,0.45,0.42,1.1,0.051,61.0,197.0,0.9932,3.02,0.4,9.0 +7.2,0.19,0.31,1.4,0.046,37.0,135.0,0.9939,3.34,0.57,10.2 +6.7,0.31,0.44,6.7,0.054,29.0,160.0,0.9952,3.04,0.44,9.6 +8.0,0.25,0.13,17.2,0.036,49.0,219.0,0.9996,2.96,0.46,9.7 +9.9,1.005,0.46,1.4,0.046,34.0,185.0,0.9966,3.02,0.49,10.2 +8.1,0.31,0.36,8.2,0.028,29.0,142.0,0.9925,3.01,0.34,13.0 +8.1,0.24,0.38,4.3,0.044,49.0,172.0,0.996,3.37,0.74,10.8 +8.0,0.25,0.13,17.2,0.036,49.0,219.0,0.9996,2.96,0.46,9.7 +6.4,0.29,0.28,11.1,0.063,66.0,169.0,0.9973,2.89,0.57,9.0 +7.2,0.15,0.33,1.1,0.027,16.0,63.0,0.9937,3.37,0.4,9.9 +7.0,0.12,0.32,7.2,0.058,22.0,89.0,0.9966,3.29,0.38,9.2 +7.4,0.32,0.55,16.6,0.056,53.0,238.0,1.0017,2.96,0.58,8.7 +8.5,0.17,0.31,1.0,0.024,13.0,91.0,0.993,2.79,0.37,10.1 +8.5,0.17,0.31,1.0,0.024,13.0,91.0,0.993,2.79,0.37,10.1 +9.5,0.21,0.47,1.3,0.039,21.0,123.0,0.9959,2.9,0.64,9.5 +8.2,0.21,0.48,1.4,0.041,11.0,99.0,0.9958,3.17,0.57,9.9 +7.4,0.32,0.55,16.6,0.056,53.0,238.0,1.0017,2.96,0.58,8.7 +6.8,0.31,0.42,6.9,0.046,50.0,173.0,0.9958,3.19,0.46,9.0 +6.8,0.27,0.28,13.3,0.076,50.0,163.0,0.9979,3.03,0.38,8.6 +7.4,0.21,0.3,8.1,0.047,13.0,114.0,0.9941,3.12,0.35,10.5 +8.0,0.23,0.35,9.2,0.044,53.0,186.0,0.997,3.09,0.56,9.5 +7.6,0.2,0.31,1.4,0.047,41.0,142.0,0.9934,3.43,0.53,10.1 +6.3,0.41,0.3,3.2,0.03,49.0,164.0,0.9927,3.53,0.79,11.7 +8.3,0.49,0.43,2.5,0.036,32.0,116.0,0.9944,3.23,0.47,10.7 +6.3,0.41,0.3,3.2,0.03,49.0,164.0,0.9927,3.53,0.79,11.7 +7.6,0.2,0.26,4.5,0.086,37.0,133.0,0.9963,3.15,0.42,9.2 +7.5,0.26,0.26,18.35,0.084,33.0,139.0,1.0011,3.17,0.39,8.8 +7.5,0.26,0.26,18.35,0.084,33.0,139.0,1.0011,3.17,0.39,8.8 +6.8,0.27,0.35,7.8,0.048,76.0,197.0,0.9959,3.24,0.43,9.5 +6.8,0.28,0.37,7.0,0.057,35.0,208.0,0.9973,3.57,0.55,10.2 +8.4,0.2,0.27,6.3,0.048,30.0,143.0,0.9966,3.25,0.5,9.1 +7.9,0.33,0.26,1.2,0.044,23.0,103.0,0.9932,3.19,0.54,10.5 +7.5,0.38,0.5,12.8,0.042,57.0,184.0,0.9984,3.09,0.46,9.0 +7.6,0.2,0.3,14.2,0.056,53.0,212.5,0.999,3.14,0.46,8.9 +7.6,0.2,0.3,14.2,0.056,53.0,212.5,0.999,3.14,0.46,8.9 +7.6,0.2,0.3,14.2,0.056,53.0,212.5,0.999,3.14,0.46,8.9 +7.6,0.2,0.3,14.2,0.056,53.0,212.5,0.999,3.14,0.46,8.9 +7.6,0.2,0.3,14.2,0.056,53.0,212.5,0.999,3.14,0.46,8.9 +8.1,0.19,0.58,16.65,0.049,48.0,181.0,1.0006,3.2,0.62,9.1 +7.6,0.16,0.41,1.9,0.047,27.0,151.0,0.9937,3.2,0.53,10.1 +8.1,0.22,0.28,7.7,0.043,57.0,176.0,0.9954,3.12,0.55,10.0 +8.0,0.22,0.32,10.4,0.043,63.0,201.0,0.997,3.11,0.53,9.5 +7.1,0.33,0.3,3.3,0.034,30.0,102.0,0.9912,3.08,0.31,12.3 +6.4,0.43,0.27,1.1,0.054,5.0,110.0,0.9939,3.24,0.52,9.1 +7.6,0.2,0.3,14.2,0.056,53.0,212.5,0.999,3.14,0.46,8.9 +7.0,0.12,0.28,6.3,0.057,17.0,103.0,0.9957,3.5,0.44,9.6 +7.4,0.3,0.22,5.25,0.053,33.0,180.0,0.9926,3.13,0.45,11.6 +7.0,0.28,0.33,14.6,0.043,47.0,168.0,0.9994,3.34,0.67,8.8 +8.4,0.2,0.38,11.8,0.055,51.0,170.0,1.0004,3.34,0.82,8.9 +7.0,0.28,0.33,14.6,0.043,47.0,168.0,0.9994,3.34,0.67,8.8 +8.4,0.2,0.38,11.8,0.055,51.0,170.0,1.0004,3.34,0.82,8.9 +8.4,0.2,0.38,11.8,0.055,51.0,170.0,1.0004,3.34,0.82,8.9 +7.3,0.18,0.31,17.3,0.055,32.0,197.0,1.0002,3.13,0.46,9.0 +6.8,0.31,0.09,1.4,0.04,56.0,145.0,0.9922,3.19,0.46,10.0 +6.7,0.31,0.08,1.3,0.038,58.0,147.0,0.9922,3.18,0.46,10.0 +7.6,0.17,0.35,1.6,0.047,43.0,154.0,0.9934,3.36,0.69,11.1 +7.4,0.3,0.22,5.25,0.053,33.0,180.0,0.9926,3.13,0.45,11.6 +7.4,0.26,0.31,2.4,0.043,58.0,178.0,0.9941,3.42,0.68,10.6 +7.0,0.28,0.33,14.6,0.043,47.0,168.0,0.9994,3.34,0.67,8.8 +8.4,0.2,0.38,11.8,0.055,51.0,170.0,1.0004,3.34,0.82,8.9 +5.6,0.18,0.31,1.5,0.038,16.0,84.0,0.9924,3.34,0.58,10.1 +7.2,0.15,0.39,1.8,0.043,21.0,159.0,0.9948,3.52,0.47,10.0 +8.0,0.4,0.33,7.7,0.034,27.0,98.0,0.9935,3.18,0.41,12.2 +7.0,0.25,0.56,2.0,0.035,20.0,95.0,0.9918,3.23,0.53,11.0 +7.2,0.15,0.39,1.8,0.043,21.0,159.0,0.9948,3.52,0.47,10.0 +6.8,0.18,0.46,1.4,0.064,37.0,160.0,0.9924,3.37,0.45,11.1 +6.6,0.32,0.22,16.7,0.046,38.0,133.0,0.9979,3.22,0.67,10.4 +9.0,0.55,0.3,8.1,0.026,14.0,71.0,0.993,2.94,0.36,11.8 +6.9,0.19,0.39,8.0,0.028,22.0,84.0,0.994,3.11,0.66,10.8 +6.3,0.41,0.33,4.7,0.023,28.0,110.0,0.991,3.3,0.38,12.5 +9.0,0.55,0.3,8.1,0.026,14.0,71.0,0.993,2.94,0.36,11.8 +7.0,0.2,0.34,2.1,0.049,12.0,136.0,0.9922,3.25,0.46,11.6 +6.6,0.32,0.22,16.7,0.046,38.0,133.0,0.9979,3.22,0.67,10.4 +7.7,0.26,0.34,6.4,0.05,36.0,163.0,0.9937,3.19,0.7,11.5 +6.3,0.21,0.28,1.5,0.051,46.0,142.0,0.9928,3.23,0.42,10.1 +7.6,0.34,0.39,7.6,0.04,45.0,215.0,0.9965,3.11,0.53,9.2 +6.3,0.21,0.28,1.5,0.051,46.0,142.0,0.9928,3.23,0.42,10.1 +8.0,0.43,0.4,12.4,0.168,29.0,190.0,0.9991,3.07,0.64,9.2 +7.5,0.3,0.71,1.3,0.16,44.0,149.0,0.9948,3.08,0.42,8.9 +6.4,0.26,0.4,1.7,0.179,5.0,60.0,0.9925,3.09,0.54,10.1 +6.9,0.32,0.15,8.1,0.046,51.0,180.0,0.9958,3.13,0.45,8.9 +8.9,0.21,0.34,7.1,0.037,33.0,150.0,0.9962,3.1,0.45,9.7 +7.6,0.34,0.39,7.6,0.04,45.0,215.0,0.9965,3.11,0.53,9.2 +9.5,0.42,0.41,2.3,0.034,22.0,145.0,0.9951,3.06,0.52,11.0 +7.6,0.29,0.26,6.5,0.042,32.0,160.0,0.9944,3.14,0.47,10.7 +6.5,0.25,0.2,1.4,0.024,29.0,101.0,0.9916,3.24,0.54,10.8 +7.2,0.23,0.33,12.7,0.049,50.0,183.0,0.9987,3.41,0.4,9.8 +7.9,0.35,0.36,1.6,0.038,11.0,124.0,0.9928,3.25,0.48,11.0 +8.8,0.2,0.28,1.1,0.018,18.0,72.0,0.9926,2.97,0.35,10.4 +5.7,0.27,0.32,1.2,0.046,20.0,155.0,0.9934,3.8,0.41,10.2 +7.6,0.29,0.26,6.5,0.042,32.0,160.0,0.9944,3.14,0.47,10.7 +5.5,0.14,0.27,4.6,0.029,22.0,104.0,0.9949,3.34,0.44,9.0 +8.7,0.24,0.35,0.6,0.042,11.0,71.0,0.9926,3.08,0.38,10.6 +6.7,0.3,0.45,10.6,0.032,56.0,212.0,0.997,3.22,0.59,9.5 +5.5,0.14,0.27,4.6,0.029,22.0,104.0,0.9949,3.34,0.44,9.0 +5.6,0.13,0.27,4.8,0.028,22.0,104.0,0.9948,3.34,0.45,9.2 +7.4,0.18,0.34,2.7,0.03,30.0,107.0,0.992,2.97,0.53,11.0 +5.7,0.385,0.04,12.6,0.034,22.0,115.0,0.9964,3.28,0.63,9.9 +8.7,0.24,0.35,0.6,0.042,11.0,71.0,0.9926,3.08,0.38,10.6 +8.3,0.33,0.43,9.2,0.046,22.0,126.0,0.9982,3.38,0.47,9.3 +6.8,0.34,0.44,6.6,0.052,28.0,156.0,0.9955,3.14,0.41,9.6 +6.8,0.33,0.44,7.0,0.05,29.0,155.0,0.9955,3.14,0.42,9.5 +6.3,0.28,0.24,8.45,0.031,32.0,172.0,0.9958,3.39,0.57,9.7 +11.8,0.23,0.38,11.1,0.034,15.0,123.0,0.9997,2.93,0.55,9.7 +6.8,0.21,0.27,18.15,0.042,41.0,146.0,1.0001,3.3,0.36,8.7 +6.8,0.21,0.27,18.15,0.042,41.0,146.0,1.0001,3.3,0.36,8.7 +8.6,0.485,0.29,4.1,0.026,19.0,101.0,0.9918,3.01,0.38,12.4 +8.6,0.485,0.29,4.1,0.026,19.0,101.0,0.9918,3.01,0.38,12.4 +7.3,0.29,0.29,4.6,0.029,27.0,155.0,0.9931,3.07,0.26,10.6 +6.8,0.21,0.27,18.15,0.042,41.0,146.0,1.0001,3.3,0.36,8.7 +6.7,0.31,0.31,4.9,0.031,20.0,151.0,0.9926,3.36,0.82,12.0 +7.3,0.29,0.37,8.3,0.044,45.0,227.0,0.9966,3.12,0.47,9.0 +5.7,0.46,0.46,1.4,0.04,31.0,169.0,0.9932,3.13,0.47,8.8 +6.8,0.28,0.44,11.5,0.04,58.0,223.0,0.9969,3.22,0.56,9.5 +6.7,0.23,0.33,1.8,0.036,23.0,96.0,0.9925,3.32,0.4,10.8 +6.9,0.17,0.25,1.6,0.047,34.0,132.0,0.9914,3.16,0.48,11.4 +7.6,0.18,0.36,2.4,0.049,38.0,123.0,0.996,3.6,0.46,10.3 +6.6,0.22,0.28,4.9,0.042,51.0,180.0,0.9952,3.3,0.75,9.5 +7.8,0.27,0.28,1.8,0.05,21.0,127.0,0.9934,3.15,0.44,9.9 +7.7,0.28,0.29,4.3,0.051,25.0,142.0,0.9939,3.16,0.39,10.2 +7.6,0.29,0.29,4.4,0.051,26.0,146.0,0.9939,3.16,0.39,10.2 +5.7,0.32,0.18,1.4,0.029,26.0,104.0,0.9906,3.44,0.37,11.0 +7.1,0.33,0.25,1.6,0.03,25.0,126.0,0.9901,3.22,0.34,12.1 +7.3,0.34,0.3,1.3,0.057,25.0,173.0,0.9948,3.26,0.51,9.1 +6.5,0.19,0.26,5.2,0.04,31.0,140.0,0.995,3.26,0.68,9.5 +6.6,0.23,0.27,5.6,0.043,43.0,164.0,0.9953,3.27,0.76,9.5 +6.6,0.27,0.29,5.3,0.045,57.0,189.0,0.9953,3.31,0.79,9.8 +6.6,0.22,0.28,4.9,0.042,51.0,180.0,0.9952,3.3,0.75,9.5 +7.6,0.18,0.36,2.4,0.049,38.0,123.0,0.996,3.6,0.46,10.3 +6.8,0.36,0.32,1.6,0.039,10.0,124.0,0.9948,3.3,0.67,9.6 +7.0,0.22,0.39,2.1,0.055,39.0,198.0,0.9951,3.52,0.54,10.2 +5.9,0.17,0.3,1.4,0.042,25.0,119.0,0.9931,3.68,0.72,10.5 +7.4,0.45,0.32,7.1,0.044,17.0,117.0,0.9962,3.32,0.41,10.4 +6.8,0.36,0.32,1.6,0.039,10.0,124.0,0.9948,3.3,0.67,9.6 +7.5,0.42,0.14,10.7,0.046,18.0,95.0,0.9959,3.22,0.33,10.7 +7.5,0.33,0.32,11.1,0.036,25.0,119.0,0.9962,3.15,0.34,10.5 +9.4,0.3,0.32,10.7,0.029,14.0,111.0,0.9958,2.85,0.42,10.6 +7.9,0.17,0.32,1.6,0.053,47.0,150.0,0.9948,3.29,0.76,9.6 +7.9,0.17,0.32,1.6,0.053,47.0,150.0,0.9948,3.29,0.76,9.6 +8.2,0.17,0.32,1.5,0.05,17.0,101.0,0.994,3.14,0.58,9.5 +8.3,0.17,0.31,1.5,0.049,48.0,153.0,0.9942,3.12,0.58,9.4 +8.7,0.15,0.3,1.6,0.046,29.0,130.0,0.9942,3.22,0.38,9.8 +7.9,0.17,0.32,1.6,0.053,47.0,150.0,0.9948,3.29,0.76,9.6 +7.2,0.25,0.19,8.0,0.044,51.0,172.0,0.9964,3.16,0.44,9.2 +7.2,0.24,0.19,7.7,0.045,53.0,176.0,0.9958,3.17,0.38,9.5 +5.3,0.76,0.03,2.7,0.043,27.0,93.0,0.9932,3.34,0.38,9.2 +6.6,0.22,0.53,15.1,0.052,22.0,136.0,0.9986,2.94,0.35,9.4 +6.6,0.22,0.53,15.1,0.052,22.0,136.0,0.9986,2.94,0.35,9.4 +8.4,0.28,0.4,8.9,0.048,33.0,146.0,0.9988,3.4,0.46,9.3 +6.8,0.32,0.34,6.0,0.05,5.0,129.0,0.9953,3.19,0.4,9.1 +6.7,0.24,0.33,12.3,0.046,31.0,145.0,0.9983,3.36,0.4,9.5 +7.4,0.18,0.36,13.1,0.056,72.0,163.0,1.0,3.42,0.35,9.1 +6.0,0.16,0.3,6.7,0.043,43.0,153.0,0.9951,3.63,0.46,10.6 +6.7,0.24,0.33,12.3,0.046,31.0,145.0,0.9983,3.36,0.4,9.5 +6.8,0.28,0.35,2.3,0.042,16.0,85.0,0.9906,3.19,0.56,12.4 +6.2,0.34,0.3,11.1,0.047,28.0,237.0,0.9981,3.18,0.49,8.7 +6.0,0.27,0.15,1.5,0.056,35.0,128.0,0.9936,3.12,0.45,8.8 +6.0,0.16,0.3,6.7,0.043,43.0,153.0,0.9951,3.63,0.46,10.6 +6.8,0.32,0.34,6.0,0.05,5.0,129.0,0.9953,3.19,0.4,9.1 +8.5,0.24,0.47,15.2,0.057,40.0,234.0,1.0005,3.02,0.66,9.0 +8.1,0.24,0.33,10.2,0.048,46.0,141.0,0.9972,3.16,0.48,10.3 +7.4,0.18,0.36,13.1,0.056,72.0,163.0,1.0,3.42,0.35,9.1 +7.7,0.23,0.31,10.7,0.038,59.0,186.0,0.9969,3.12,0.55,9.5 +6.5,0.22,0.25,17.1,0.05,44.0,138.0,1.0001,3.3,0.37,8.8 +6.5,0.22,0.25,17.1,0.05,44.0,138.0,1.0001,3.3,0.37,8.8 +6.5,0.22,0.25,17.1,0.05,44.0,138.0,1.0001,3.3,0.37,8.8 +5.7,0.33,0.15,1.9,0.05,20.0,93.0,0.9934,3.38,0.62,9.9 +7.7,0.23,0.31,10.7,0.038,59.0,186.0,0.9969,3.12,0.55,9.5 +6.5,0.22,0.25,17.1,0.05,44.0,138.0,1.0001,3.3,0.37,8.8 +6.8,0.2,0.27,1.2,0.034,19.0,68.0,0.9902,3.14,0.37,11.7 +7.7,0.26,0.32,1.2,0.04,26.0,117.0,0.993,3.21,0.56,10.8 +6.4,0.2,0.32,3.1,0.041,18.0,126.0,0.9914,3.43,0.42,12.0 +8.0,0.16,0.36,1.5,0.033,14.0,122.0,0.9941,3.2,0.39,10.3 +6.8,0.25,0.27,10.7,0.076,47.0,154.0,0.9967,3.05,0.38,9.0 +7.7,0.39,0.28,4.9,0.035,36.0,109.0,0.9918,3.19,0.58,12.2 +6.9,0.26,0.33,12.6,0.051,59.0,173.0,0.998,3.39,0.38,9.9 +6.8,0.25,0.27,10.7,0.076,47.0,154.0,0.9967,3.05,0.38,9.0 +7.7,0.39,0.28,4.9,0.035,36.0,109.0,0.9918,3.19,0.58,12.2 +6.0,0.28,0.22,12.15,0.048,42.0,163.0,0.9957,3.2,0.46,10.1 +6.5,0.43,0.28,12.0,0.056,23.0,174.0,0.9986,3.31,0.55,9.3 +9.1,0.33,0.38,1.7,0.062,50.5,344.0,0.9958,3.1,0.7,9.5 +5.9,0.5,0.05,2.6,0.054,36.0,146.0,0.9948,3.43,0.5,9.2 +6.8,0.28,0.39,1.4,0.036,15.0,115.0,0.9918,3.27,0.72,11.7 +7.0,0.35,0.24,1.9,0.04,21.0,144.0,0.9923,3.35,0.38,11.0 +7.1,0.22,0.32,16.9,0.056,49.0,158.0,0.9998,3.37,0.38,9.6 +7.1,0.22,0.32,16.9,0.056,49.0,158.0,0.9998,3.37,0.38,9.6 +8.3,0.24,0.27,2.1,0.03,22.0,162.0,0.9914,2.99,0.68,11.9 +6.8,0.26,0.32,7.0,0.041,38.0,118.0,0.9939,3.25,0.52,10.8 +7.2,0.16,0.26,7.1,0.054,41.0,224.0,0.9966,3.38,0.55,10.1 +7.9,0.18,0.36,5.9,0.058,31.0,132.0,0.995,3.25,0.52,10.9 +7.2,0.16,0.26,7.1,0.054,41.0,224.0,0.9966,3.38,0.55,10.1 +5.5,0.24,0.32,8.7,0.06,19.0,102.0,0.994,3.27,0.31,10.4 +7.1,0.33,0.64,13.2,0.056,12.0,105.0,0.9972,3.05,0.39,9.2 +7.7,0.28,0.35,15.3,0.056,31.0,117.0,0.9998,3.27,0.5,9.6 +7.7,0.28,0.35,15.3,0.056,31.0,117.0,0.9998,3.27,0.5,9.6 +7.5,0.26,0.52,13.2,0.047,64.0,179.0,0.9982,3.1,0.46,9.0 +6.5,0.14,0.32,2.7,0.037,18.0,89.0,0.9924,3.4,0.74,11.5 +8.2,0.21,0.32,10.65,0.053,53.0,145.0,0.9972,3.17,0.48,10.2 +7.2,0.2,0.31,10.0,0.054,49.0,165.0,0.997,3.4,0.42,9.9 +7.2,0.115,0.3,6.8,0.056,26.0,105.0,0.9954,3.44,0.4,9.6 +6.4,0.29,0.2,15.6,0.04,20.0,142.0,0.9962,3.1,0.54,10.6 +7.1,0.33,0.64,13.2,0.056,12.0,105.0,0.9972,3.05,0.39,9.2 +6.8,0.24,0.34,5.1,0.038,31.0,99.0,0.9921,3.24,0.46,11.8 +7.0,0.24,0.34,3.0,0.035,36.0,102.0,0.9905,3.18,0.43,12.2 +7.7,0.28,0.35,15.3,0.056,31.0,117.0,0.9998,3.27,0.5,9.6 +7.0,0.22,0.33,2.1,0.052,15.0,76.0,0.993,3.2,0.41,10.6 +7.5,0.18,0.39,1.9,0.054,23.0,91.0,0.9941,3.27,0.45,10.3 +9.8,0.93,0.45,8.6,0.052,34.0,187.0,0.9994,3.12,0.59,10.2 +7.8,0.29,0.33,8.75,0.035,33.0,181.0,0.9962,3.11,0.46,10.7 +7.9,0.28,0.32,3.6,0.038,9.0,76.0,0.992,3.05,0.31,11.7 +8.5,0.25,0.27,4.7,0.031,31.0,92.0,0.9922,3.01,0.33,12.0 +7.4,0.18,0.27,1.3,0.048,26.0,105.0,0.994,3.52,0.66,10.6 +6.3,0.24,0.37,1.8,0.031,6.0,61.0,0.9897,3.3,0.34,12.2 +6.0,0.33,0.38,9.7,0.04,29.0,124.0,0.9954,3.47,0.48,11.0 +6.8,0.37,0.28,4.0,0.03,29.0,79.0,0.99,3.23,0.46,12.4 +9.9,0.49,0.23,2.4,0.087,19.0,115.0,0.9948,2.77,0.44,9.4 +8.5,0.25,0.27,4.7,0.031,31.0,92.0,0.9922,3.01,0.33,12.0 +8.4,0.22,0.28,18.8,0.028,55.0,130.0,0.998,2.96,0.35,11.6 +7.0,0.35,0.31,1.8,0.069,15.0,162.0,0.9944,3.18,0.47,9.4 +7.0,0.35,0.31,1.8,0.069,15.0,162.0,0.9944,3.18,0.47,9.4 +7.4,0.19,0.3,12.8,0.053,48.5,229.0,0.9986,3.14,0.49,9.1 +7.4,0.19,0.3,12.8,0.053,48.5,229.0,0.9986,3.14,0.49,9.1 +7.4,0.19,0.3,12.8,0.053,48.5,229.0,0.9986,3.14,0.49,9.1 +7.4,0.19,0.3,12.8,0.053,48.5,229.0,0.9986,3.14,0.49,9.1 +7.4,0.19,0.3,12.8,0.053,48.5,229.0,0.9986,3.14,0.49,9.1 +6.9,0.32,0.13,7.8,0.042,11.0,117.0,0.996,3.23,0.37,9.2 +7.6,0.32,0.58,16.75,0.05,43.0,163.0,0.9999,3.15,0.54,9.2 +7.4,0.19,0.3,12.8,0.053,48.5,229.0,0.9986,3.14,0.49,9.1 +7.4,0.19,0.3,12.8,0.053,48.5,212.0,0.9986,3.14,0.49,9.1 +6.9,0.32,0.13,7.8,0.042,11.0,117.0,0.996,3.23,0.37,9.2 +6.0,0.34,0.24,5.4,0.06,23.0,126.0,0.9951,3.25,0.44,9.0 +7.6,0.32,0.58,16.75,0.05,43.0,163.0,0.9999,3.15,0.54,9.2 +7.7,0.24,0.31,1.3,0.047,33.0,106.0,0.993,3.22,0.55,10.8 +8.0,0.36,0.43,10.1,0.053,29.0,146.0,0.9982,3.4,0.46,9.5 +7.4,0.29,0.25,3.8,0.044,30.0,114.0,0.992,3.11,0.4,11.0 +6.6,0.32,0.27,10.9,0.041,37.0,146.0,0.9963,3.24,0.47,10.0 +6.3,0.3,0.24,6.6,0.04,38.0,141.0,0.995,3.22,0.47,9.5 +6.4,0.33,0.24,9.8,0.041,29.0,109.0,0.9956,3.29,0.47,10.1 +7.5,0.18,0.31,11.7,0.051,24.0,94.0,0.997,3.19,0.44,9.5 +6.5,0.39,0.81,1.2,0.217,14.0,74.0,0.9936,3.08,0.53,9.5 +6.8,0.25,0.18,1.4,0.056,13.0,137.0,0.9935,3.11,0.42,9.5 +6.4,0.18,0.32,9.6,0.052,24.0,90.0,0.9963,3.35,0.49,9.4 +7.1,0.18,0.32,12.2,0.048,36.0,125.0,0.9967,2.92,0.54,9.4 +7.6,0.27,0.42,2.6,0.044,29.0,110.0,0.9912,3.31,0.51,12.7 +9.2,0.23,0.35,10.7,0.037,34.0,145.0,0.9981,3.09,0.32,9.7 +7.9,0.28,0.41,4.9,0.058,31.0,153.0,0.9966,3.27,0.51,9.7 +7.1,0.18,0.32,12.2,0.048,36.0,125.0,0.9967,2.92,0.54,9.4 +6.4,0.18,0.32,9.6,0.052,24.0,90.0,0.9963,3.35,0.49,9.4 +6.8,0.25,0.18,1.4,0.056,13.0,137.0,0.9935,3.11,0.42,9.5 +7.0,0.22,0.26,1.1,0.037,20.0,71.0,0.9902,3.1,0.38,11.7 +7.3,0.18,0.29,1.0,0.036,26.0,101.0,0.99,3.09,0.37,11.7 +7.1,0.26,0.19,8.2,0.051,53.0,187.0,0.996,3.16,0.52,9.7 +6.6,0.25,0.42,11.3,0.049,77.0,231.0,0.9966,3.24,0.52,9.5 +6.4,0.24,0.23,7.3,0.069,31.0,157.0,0.9962,3.25,0.53,9.1 +6.0,0.28,0.27,2.3,0.051,23.0,147.0,0.994,3.23,0.67,10.3 +7.1,0.26,0.19,8.2,0.051,53.0,187.0,0.996,3.16,0.52,9.7 +7.8,0.24,0.38,2.1,0.058,14.0,167.0,0.994,3.21,0.55,9.9 +7.6,0.27,0.33,2.0,0.059,19.0,175.0,0.9944,3.22,0.56,9.9 +7.7,0.39,0.34,10.0,0.056,35.0,178.0,0.9974,3.26,0.6,10.2 +8.9,0.24,0.33,15.75,0.035,16.0,132.0,0.996,3.0,0.37,12.1 +6.6,0.23,0.24,3.9,0.045,36.0,138.0,0.9922,3.15,0.64,11.3 +7.1,0.26,0.3,2.0,0.031,13.0,128.0,0.9917,3.19,0.49,11.4 +7.0,0.32,0.35,1.5,0.039,24.0,125.0,0.9918,3.17,0.64,12.2 +7.4,0.24,0.26,1.6,0.058,53.0,150.0,0.9936,3.18,0.5,9.9 +6.9,0.21,0.33,1.4,0.056,35.0,136.0,0.9938,3.63,0.78,10.3 +7.0,0.32,0.35,1.5,0.039,24.0,125.0,0.9918,3.17,0.64,12.2 +7.4,0.17,0.29,1.4,0.047,23.0,107.0,0.9939,3.52,0.65,10.4 +7.1,0.26,0.3,2.0,0.031,13.0,128.0,0.9917,3.19,0.49,11.4 +8.5,0.28,0.34,13.8,0.041,32.0,161.0,0.9981,3.13,0.4,9.9 +7.8,0.3,0.37,1.3,0.051,16.0,96.0,0.9941,3.32,0.62,10.0 +8.1,0.25,0.38,3.8,0.051,18.0,129.0,0.9928,3.21,0.38,11.5 +7.7,0.28,0.29,6.9,0.041,29.0,163.0,0.9952,3.44,0.6,10.5 +6.5,0.24,0.36,2.2,0.027,36.0,134.0,0.9898,3.28,0.36,12.5 +7.0,0.22,0.32,1.6,0.045,40.0,120.0,0.9914,2.98,0.44,10.5 +8.5,0.28,0.34,13.8,0.041,32.0,161.0,0.9981,3.13,0.4,9.9 +8.0,0.45,0.28,10.8,0.051,25.0,157.0,0.9957,3.06,0.47,11.4 +6.9,0.23,0.33,12.8,0.056,44.0,169.0,0.998,3.42,0.42,9.8 +8.0,0.45,0.28,10.8,0.051,25.0,157.0,0.9957,3.06,0.47,11.4 +7.6,0.23,0.26,15.3,0.067,32.0,166.0,0.9986,3.03,0.44,9.2 +7.7,0.28,0.58,12.1,0.046,60.0,177.0,0.9983,3.08,0.46,8.9 +7.7,0.27,0.61,12.0,0.046,64.0,179.0,0.9982,3.07,0.46,8.9 +7.1,0.2,0.36,11.6,0.042,45.0,124.0,0.997,2.92,0.59,9.5 +6.9,0.25,0.35,9.2,0.034,42.0,150.0,0.9947,3.21,0.36,11.5 +7.1,0.2,0.36,11.6,0.042,45.0,124.0,0.997,2.92,0.59,9.5 +6.9,0.25,0.35,9.2,0.034,42.0,150.0,0.9947,3.21,0.36,11.5 +8.4,0.2,0.31,2.8,0.054,16.0,89.0,0.99416,2.96,0.45,9.5 +6.5,0.39,0.35,1.6,0.049,10.0,164.0,0.99516,3.35,0.51,9.7 +7.2,0.23,0.38,6.1,0.067,20.0,90.0,0.99496,3.17,0.79,9.7 +6.9,0.44,0.42,8.5,0.048,10.0,147.0,0.9974,3.32,0.46,9.5 +7.1,0.28,0.19,7.8,0.04,48.0,184.0,0.99579,3.16,0.5,9.4 +6.4,0.34,0.2,14.9,0.06,37.0,162.0,0.9983,3.13,0.45,9.0 +6.1,0.15,0.29,6.2,0.046,39.0,151.0,0.99471,3.6,0.44,10.6 +6.9,0.44,0.42,8.5,0.048,10.0,147.0,0.9974,3.32,0.46,9.5 +7.2,0.29,0.18,8.2,0.042,41.0,180.0,0.99644,3.16,0.49,9.1 +7.1,0.28,0.19,7.8,0.04,48.0,184.0,0.99579,3.16,0.5,9.4 +6.1,0.23,0.45,10.6,0.094,49.0,169.0,0.99699,3.05,0.54,8.8 +6.7,0.23,0.42,11.2,0.047,52.0,171.0,0.99758,3.54,0.74,10.4 +7.0,0.36,0.14,11.6,0.043,35.0,228.0,0.9977,3.13,0.51,8.9 +7.5,0.31,0.24,7.1,0.031,28.0,141.0,0.99397,3.16,0.38,10.6 +6.4,0.34,0.2,14.9,0.06,37.0,162.0,0.9983,3.13,0.45,9.0 +6.1,0.15,0.29,6.2,0.046,39.0,151.0,0.99471,3.6,0.44,10.6 +7.4,0.2,0.29,1.7,0.047,16.0,100.0,0.99243,3.28,0.45,10.6 +6.3,0.27,0.18,7.7,0.048,45.0,186.0,0.9962,3.23,0.47,9.0 +9.2,0.34,0.54,17.3,0.06,46.0,235.0,1.00182,3.08,0.61,8.8 +7.4,0.18,0.29,1.4,0.042,34.0,101.0,0.99384,3.54,0.6,10.5 +7.2,0.29,0.2,7.7,0.046,51.0,174.0,0.99582,3.16,0.52,9.5 +6.3,0.27,0.18,7.7,0.048,45.0,186.0,0.9962,3.23,0.47,9.0 +6.2,0.26,0.19,3.4,0.049,47.0,172.0,0.9924,3.14,0.43,10.4 +7.3,0.21,0.21,1.6,0.046,35.0,133.0,0.99466,3.38,0.46,10.0 +7.1,0.14,0.35,1.4,0.039,24.0,128.0,0.99212,2.97,0.68,10.4 +7.2,0.39,0.54,1.4,0.157,34.0,132.0,0.99449,3.11,0.53,9.0 +7.6,0.48,0.28,10.4,0.049,57.0,205.0,0.99748,3.24,0.45,9.3 +7.2,0.39,0.54,1.4,0.157,34.0,132.0,0.99449,3.11,0.53,9.0 +7.6,0.48,0.28,10.4,0.049,57.0,205.0,0.99748,3.24,0.45,9.3 +6.5,0.36,0.31,4.1,0.061,20.0,134.0,0.99475,3.18,0.45,9.0 +8.5,0.25,0.31,2.8,0.032,11.0,61.0,0.99189,3.06,0.44,11.5 +6.9,0.3,0.21,15.7,0.056,49.0,159.0,0.99827,3.11,0.48,9.0 +6.6,0.19,0.43,10.9,0.045,53.0,154.0,0.99752,3.52,0.77,10.4 +6.9,0.3,0.21,15.7,0.056,49.0,159.0,0.99827,3.11,0.48,9.0 +9.4,0.42,0.32,6.5,0.027,20.0,167.0,0.99479,3.08,0.43,10.6 +6.6,0.19,0.43,10.9,0.045,53.0,154.0,0.99752,3.52,0.77,10.4 +6.3,0.2,0.3,5.9,0.034,35.0,152.0,0.99642,3.47,0.4,8.5 +8.5,0.19,0.56,17.3,0.055,47.0,169.0,1.00047,3.07,0.67,9.3 +7.3,0.19,0.25,1.4,0.051,41.0,107.0,0.99382,3.53,0.66,10.5 +6.7,0.25,0.26,13.5,0.06,50.0,156.0,0.99784,3.39,0.46,9.9 +6.2,0.25,0.28,8.5,0.035,28.0,108.0,0.99486,3.4,0.42,10.4 +6.1,0.46,0.32,6.2,0.053,10.0,94.0,0.99537,3.35,0.47,10.1 +7.3,0.19,0.25,1.4,0.051,41.0,107.0,0.99382,3.53,0.66,10.5 +7.5,0.29,0.26,14.95,0.067,47.0,178.0,0.99838,3.04,0.49,9.2 +6.7,0.31,0.18,7.7,0.043,57.0,200.0,0.99566,3.17,0.44,9.4 +7.4,0.14,0.3,1.3,0.033,25.0,91.0,0.99268,3.53,0.39,10.6 +6.7,0.31,0.18,7.7,0.043,57.0,200.0,0.99566,3.17,0.44,9.4 +7.1,0.4,0.52,1.3,0.148,45.0,149.0,0.99468,3.08,0.56,8.7 +6.4,0.16,0.25,1.3,0.047,20.0,77.0,0.9933,3.61,0.54,10.2 +6.3,0.16,0.22,1.3,0.046,18.0,66.0,0.99307,3.61,0.55,10.3 +7.4,0.33,0.26,15.6,0.049,67.0,210.0,0.99907,3.06,0.68,9.5 +7.4,0.33,0.26,15.6,0.049,67.0,210.0,0.99907,3.06,0.68,9.5 +7.4,0.33,0.26,15.6,0.049,67.0,210.0,0.99907,3.06,0.68,9.5 +7.4,0.33,0.26,15.6,0.049,67.0,210.0,0.99907,3.06,0.68,9.5 +6.6,0.41,0.24,4.9,0.158,47.0,144.0,0.99471,3.17,0.49,9.4 +6.7,0.43,0.23,5.0,0.157,49.0,145.0,0.99471,3.17,0.49,9.4 +7.4,0.33,0.26,15.6,0.049,67.0,210.0,0.99907,3.06,0.68,9.5 +7.3,0.4,0.28,6.5,0.037,26.0,97.0,0.99148,3.16,0.58,12.6 +7.4,0.18,0.24,1.4,0.047,21.0,106.0,0.99383,3.52,0.64,10.5 +8.6,0.17,0.28,2.7,0.047,38.0,150.0,0.99365,3.1,0.56,10.8 +6.5,0.32,0.23,1.2,0.054,39.0,208.0,0.99272,3.18,0.46,9.9 +7.3,0.4,0.28,6.5,0.037,26.0,97.0,0.99148,3.16,0.58,12.6 +7.0,0.32,0.31,6.4,0.031,38.0,115.0,0.99235,3.38,0.58,12.2 +7.5,0.42,0.19,6.9,0.041,62.0,150.0,0.99508,3.23,0.37,10.0 +6.9,0.28,0.31,7.2,0.04,47.0,168.0,0.9946,3.29,0.57,10.6 +6.5,0.29,0.42,10.6,0.042,66.0,202.0,0.99674,3.24,0.53,9.5 +6.3,0.41,0.18,3.5,0.027,23.0,109.0,0.99018,3.34,0.54,12.8 +7.0,0.32,0.31,6.4,0.031,38.0,115.0,0.99235,3.38,0.58,12.2 +7.3,0.3,0.33,2.3,0.043,28.0,125.0,0.99084,3.34,0.44,12.6 +6.6,0.22,0.28,12.05,0.058,25.0,125.0,0.99856,3.45,0.45,9.4 +6.0,0.26,0.18,7.0,0.055,50.0,194.0,0.99591,3.21,0.43,9.0 +6.9,0.44,0.18,11.8,0.051,26.0,126.0,0.9975,3.23,0.48,9.1 +7.5,0.42,0.2,1.4,0.06,15.0,168.0,0.9944,3.06,0.4,9.4 +7.0,0.36,0.3,5.0,0.04,40.0,143.0,0.99173,3.33,0.42,12.2 +5.6,0.295,0.2,2.2,0.049,18.0,134.0,0.99378,3.21,0.68,10.0 +6.8,0.21,0.55,14.6,0.053,34.0,159.0,0.99805,2.93,0.44,9.2 +9.4,0.28,0.3,1.6,0.045,36.0,139.0,0.99534,3.11,0.49,9.3 +8.1,0.28,0.34,1.3,0.035,11.0,126.0,0.99232,3.14,0.5,9.8 +6.8,0.21,0.55,14.6,0.053,34.0,159.0,0.99805,2.93,0.44,9.2 +7.0,0.22,0.26,2.8,0.036,44.0,132.0,0.99078,3.34,0.41,12.0 +9.4,0.28,0.3,1.6,0.045,36.0,139.0,0.99534,3.11,0.49,9.3 +6.8,0.32,0.3,3.3,0.029,15.0,80.0,0.99061,3.33,0.63,12.6 +7.0,0.19,0.33,6.3,0.032,42.0,127.0,0.99182,3.31,0.38,12.2 +7.7,0.42,0.38,8.1,0.061,49.0,144.0,0.9966,3.4,0.58,11.0 +7.4,0.2,0.31,1.6,0.038,34.0,116.0,0.9912,3.25,0.39,12.0 +7.5,0.24,0.62,10.6,0.045,51.0,153.0,0.99779,3.16,0.44,8.8 +7.5,0.26,0.59,11.8,0.046,58.0,164.0,0.99814,3.17,0.46,8.9 +6.6,0.4,0.32,1.7,0.035,39.0,84.0,0.99096,3.59,0.48,12.7 +8.0,0.2,0.3,8.1,0.037,42.0,130.0,0.99379,3.1,0.67,11.8 +4.6,0.445,0.0,1.4,0.053,11.0,178.0,0.99426,3.79,0.55,10.2 +6.1,0.41,0.04,1.3,0.036,23.0,121.0,0.99228,3.24,0.61,9.9 +7.6,0.2,0.34,1.8,0.041,42.0,148.0,0.99335,3.35,0.66,11.1 +6.9,0.3,0.21,7.2,0.045,54.0,190.0,0.99595,3.22,0.48,9.4 +7.0,0.35,0.17,1.1,0.049,7.0,119.0,0.99297,3.13,0.36,9.7 +6.9,0.35,0.55,11.95,0.038,22.0,111.0,0.99687,3.11,0.29,9.7 +7.0,0.35,0.17,1.1,0.049,7.0,119.0,0.99297,3.13,0.36,9.7 +6.9,0.35,0.55,11.95,0.038,22.0,111.0,0.99687,3.11,0.29,9.7 +7.6,0.3,0.4,2.2,0.054,29.0,175.0,0.99445,3.19,0.53,9.8 +7.5,0.38,0.29,12.7,0.05,25.0,209.0,0.9986,3.25,0.59,9.3 +7.5,0.3,0.32,1.4,0.032,31.0,161.0,0.99154,2.95,0.42,10.5 +6.3,0.4,0.32,10.6,0.049,38.0,209.0,0.9981,3.47,0.59,9.3 +6.8,0.37,0.28,1.9,0.024,64.0,106.0,0.98993,3.45,0.6,12.6 +7.5,0.23,0.35,17.8,0.058,128.0,212.0,1.00241,3.44,0.43,8.9 +8.3,0.27,0.34,10.2,0.048,50.0,118.0,0.99716,3.18,0.51,10.3 +6.8,0.26,0.22,4.8,0.041,110.0,198.0,0.99437,3.29,0.67,10.6 +6.5,0.28,0.35,9.8,0.067,61.0,180.0,0.9972,3.15,0.57,9.0 +7.2,0.34,0.3,8.4,0.051,40.0,167.0,0.99756,3.48,0.62,9.7 +7.0,0.23,0.26,7.2,0.041,21.0,90.0,0.99509,3.22,0.55,9.5 +7.7,0.29,0.29,4.8,0.06,27.0,156.0,0.99572,3.49,0.59,10.3 +7.2,0.34,0.3,8.4,0.051,40.0,167.0,0.99756,3.48,0.62,9.7 +7.7,0.4,0.27,4.5,0.034,27.0,95.0,0.99175,3.21,0.59,12.3 +6.7,0.17,0.27,1.4,0.032,39.0,149.0,0.99254,3.4,0.52,10.5 +7.0,0.23,0.26,7.2,0.041,21.0,90.0,0.99509,3.22,0.55,9.5 +8.1,0.24,0.26,11.0,0.043,41.0,211.0,0.99676,3.11,0.49,10.0 +7.7,0.28,0.63,11.1,0.039,58.0,179.0,0.9979,3.08,0.44,8.8 +7.5,0.23,0.29,2.6,0.031,24.0,98.0,0.99194,3.0,0.54,10.9 +8.3,0.26,0.31,2.0,0.029,14.0,141.0,0.99077,2.95,0.77,12.2 +7.9,0.46,0.4,10.1,0.168,19.0,184.0,0.99782,3.06,0.62,9.5 +7.9,0.31,0.22,13.3,0.048,46.0,212.0,0.99942,3.47,0.59,10.0 +7.9,0.25,0.34,11.4,0.04,53.0,202.0,0.99708,3.11,0.57,9.6 +6.1,0.28,0.16,1.3,0.06,36.0,126.0,0.99353,3.13,0.46,8.7 +7.0,0.18,0.26,1.4,0.044,46.0,89.0,0.99256,3.39,0.48,10.7 +6.5,0.21,0.28,1.4,0.046,26.0,66.0,0.99199,3.43,0.48,11.1 +7.6,0.48,0.33,7.0,0.024,14.0,130.0,0.9918,3.25,0.45,12.5 +7.1,0.34,0.32,2.0,0.051,29.0,130.0,0.99354,3.3,0.5,10.4 +8.9,0.21,0.37,1.2,0.028,20.0,93.0,0.99244,3.2,0.37,11.5 +7.4,0.32,0.27,12.9,0.04,60.0,221.0,0.99831,3.05,0.66,9.4 +6.0,0.495,0.27,5.0,0.157,17.0,129.0,0.99396,3.03,0.36,9.3 +8.1,0.25,0.34,10.1,0.05,30.0,121.0,0.99724,3.17,0.49,10.1 +8.2,0.25,0.46,3.75,0.05,14.0,102.0,0.99524,3.28,0.58,9.7 +6.5,0.18,0.29,1.7,0.035,39.0,144.0,0.9927,3.49,0.5,10.5 +6.7,0.24,0.26,12.6,0.053,44.0,182.0,0.99802,3.42,0.42,9.7 +6.6,0.32,0.24,1.3,0.06,42.5,204.0,0.99512,3.59,0.51,9.2 +7.6,0.32,0.35,1.6,0.092,24.0,138.0,0.99438,3.19,0.44,9.8 +7.4,0.33,0.44,7.6,0.05,40.0,227.0,0.99679,3.12,0.52,9.0 +7.2,0.3,0.3,8.1,0.05,40.0,188.0,0.99652,3.15,0.49,9.1 +7.4,0.34,0.3,14.9,0.037,70.0,169.0,0.99698,3.25,0.37,10.4 +6.1,0.16,0.29,6.0,0.03,29.0,144.0,0.99474,3.68,0.46,10.7 +6.3,0.1,0.24,6.0,0.039,25.0,107.0,0.99511,3.59,0.49,10.5 +6.2,0.45,0.73,7.2,0.099,47.0,202.0,0.99582,3.21,0.43,9.2 +6.0,0.33,0.18,3.0,0.036,5.0,85.0,0.99125,3.28,0.4,11.5 +7.6,0.48,0.37,1.2,0.034,5.0,57.0,0.99256,3.05,0.54,10.4 +7.2,0.2,0.3,2.0,0.039,43.0,188.0,0.9911,3.3,0.41,12.0 +7.0,0.32,0.29,4.9,0.036,41.0,150.0,0.99168,3.38,0.43,12.2 +7.2,0.2,0.3,2.0,0.039,43.0,188.0,0.9911,3.3,0.41,12.0 +7.0,0.22,0.29,8.9,0.05,24.0,90.0,0.99556,3.29,0.46,9.8 +9.4,0.23,0.56,16.45,0.063,52.5,282.0,1.00098,3.1,0.51,9.3 +6.4,0.27,0.19,2.0,0.084,21.0,191.0,0.99516,3.49,0.63,9.6 +6.4,0.27,0.19,1.9,0.085,21.0,196.0,0.99516,3.49,0.64,9.5 +7.0,0.23,0.42,5.1,0.042,37.0,144.0,0.99518,3.5,0.59,10.2 +6.9,0.15,0.28,4.4,0.029,14.0,107.0,0.99347,3.24,0.46,10.4 +6.7,0.26,0.29,5.8,0.025,26.0,74.0,0.9929,3.28,0.53,11.0 +6.9,0.15,0.28,4.4,0.029,14.0,107.0,0.99347,3.24,0.46,10.4 +7.6,0.2,0.68,12.9,0.042,56.0,160.0,0.99841,3.05,0.41,8.7 +6.9,0.3,0.29,1.3,0.053,24.0,189.0,0.99362,3.29,0.54,9.9 +6.9,0.3,0.3,1.3,0.053,24.0,186.0,0.99361,3.29,0.54,9.9 +7.6,0.21,0.35,1.2,0.041,7.0,106.0,0.9914,3.06,0.45,11.3 +6.8,0.46,0.26,2.7,0.042,28.0,83.0,0.99114,3.38,0.51,12.0 +7.0,0.28,0.26,1.7,0.042,34.0,130.0,0.9925,3.43,0.5,10.7 +6.5,0.24,0.29,8.2,0.043,32.0,156.0,0.99453,3.13,0.7,10.1 +6.4,0.17,0.34,1.5,0.091,42.0,135.0,0.9938,3.25,0.49,9.6 +6.4,0.17,0.34,1.5,0.093,43.0,136.0,0.9938,3.25,0.49,9.6 +6.3,0.695,0.55,12.9,0.056,58.0,252.0,0.99806,3.29,0.49,8.7 +7.0,0.27,0.29,3.9,0.059,28.0,199.0,0.9961,3.54,0.59,10.3 +8.4,0.3,0.25,17.75,0.047,25.0,218.0,1.00016,2.98,0.66,9.1 +6.5,0.19,0.27,4.9,0.037,13.0,101.0,0.9916,3.17,0.41,11.8 +8.0,0.36,0.39,1.6,0.024,26.0,93.0,0.99116,3.15,0.49,11.9 +6.1,0.16,0.24,1.4,0.046,17.0,77.0,0.99319,3.66,0.57,10.3 +9.2,0.19,0.42,2.0,0.047,16.0,104.0,0.99517,3.09,0.66,10.0 +9.2,0.16,0.49,2.0,0.044,18.0,107.0,0.99514,3.1,0.53,10.2 +8.0,0.26,0.28,8.2,0.038,72.0,202.0,0.99566,3.12,0.56,10.0 +8.8,0.33,0.36,2.1,0.034,19.0,125.0,0.99166,2.96,0.98,12.7 +9.8,0.16,0.46,1.8,0.046,23.0,130.0,0.99587,3.04,0.67,9.6 +6.6,0.23,0.18,8.5,0.044,59.0,188.0,0.99558,3.16,0.49,9.5 +7.9,0.44,0.26,4.45,0.033,23.0,100.0,0.99117,3.17,0.52,12.7 +7.6,0.31,0.27,5.8,0.036,23.0,109.0,0.99399,3.34,0.54,11.0 +7.5,0.705,0.1,13.0,0.044,44.0,214.0,0.99741,3.1,0.5,9.1 +7.1,0.21,0.28,2.7,0.034,23.0,111.0,0.99405,3.35,0.64,10.2 +7.0,0.16,0.26,7.3,0.047,30.0,220.0,0.99622,3.38,0.58,10.1 +8.0,0.27,0.25,19.1,0.045,50.0,208.0,1.00051,3.05,0.5,9.2 +6.3,0.38,0.17,8.8,0.08,50.0,212.0,0.99803,3.47,0.66,9.4 +7.1,0.21,0.28,2.7,0.034,23.0,111.0,0.99405,3.35,0.64,10.2 +6.2,0.38,0.18,7.4,0.095,28.0,195.0,0.99773,3.53,0.71,9.2 +8.2,0.24,0.3,2.3,0.05,23.0,106.0,0.99397,2.98,0.5,10.0 +7.0,0.16,0.26,6.85,0.047,30.0,220.0,0.99622,3.38,0.58,10.1 +7.3,0.815,0.09,11.4,0.044,45.0,204.0,0.99713,3.15,0.46,9.0 +6.3,0.41,0.16,0.9,0.032,25.0,98.0,0.99274,3.16,0.42,9.5 +6.1,0.36,0.41,19.35,0.07,67.0,207.0,1.00118,3.39,0.53,9.1 +8.1,0.4,0.32,7.9,0.031,23.0,118.0,0.99176,3.05,0.46,13.3 +6.8,0.26,0.43,11.75,0.045,53.0,198.0,0.9969,3.26,0.55,9.5 +6.2,0.44,0.18,7.7,0.096,28.0,210.0,0.99771,3.56,0.72,9.2 +7.2,0.24,0.29,3.0,0.036,17.0,117.0,0.99411,3.36,0.68,10.1 +6.2,0.44,0.18,7.7,0.096,28.0,210.0,0.99771,3.56,0.72,9.2 +7.2,0.24,0.29,3.0,0.036,17.0,117.0,0.99411,3.36,0.68,10.1 +7.3,0.22,0.26,1.5,0.04,32.0,172.0,0.99194,3.27,0.48,11.2 +8.1,0.34,0.28,7.5,0.04,70.0,230.0,0.99558,3.14,0.55,9.8 +7.3,0.22,0.26,1.5,0.04,32.0,172.0,0.99194,3.27,0.48,11.2 +8.1,0.34,0.28,7.5,0.04,70.0,230.0,0.99558,3.14,0.55,9.8 +6.4,0.28,0.17,8.3,0.042,61.0,195.0,0.99577,3.22,0.46,9.4 +6.3,0.29,0.14,7.05,0.045,50.0,177.0,0.99564,3.23,0.42,9.0 +6.4,0.27,0.17,8.4,0.044,60.0,198.0,0.99578,3.21,0.47,9.4 +7.4,0.35,0.2,13.9,0.054,63.0,229.0,0.99888,3.11,0.5,8.9 +8.3,0.28,0.27,17.5,0.045,48.0,253.0,1.00014,3.02,0.56,9.1 +6.4,0.35,0.35,5.6,0.034,9.0,148.0,0.99441,3.17,0.5,9.8 +6.9,0.43,0.28,9.4,0.056,29.0,183.0,0.99594,3.17,0.43,9.4 +8.0,0.26,0.28,4.8,0.05,34.0,150.0,0.99437,3.13,0.5,10.0 +6.9,0.43,0.28,9.4,0.056,29.0,183.0,0.99594,3.17,0.43,9.4 +7.3,0.27,0.37,9.7,0.042,36.0,130.0,0.9979,3.48,0.75,9.9 +6.8,0.46,0.26,6.3,0.147,49.0,159.0,0.99434,3.14,0.47,10.0 +7.2,0.2,0.28,1.6,0.028,13.0,168.0,0.99203,3.17,1.06,11.5 +7.6,0.285,0.32,14.6,0.063,32.0,201.0,0.998,3.0,0.45,9.2 +6.6,0.32,0.33,2.5,0.052,40.0,219.5,0.99316,3.15,0.6,10.0 +7.6,0.285,0.32,14.6,0.063,32.0,201.0,0.998,3.0,0.45,9.2 +6.6,0.34,0.34,2.6,0.051,40.5,210.0,0.99314,3.15,0.61,10.0 +6.6,0.32,0.33,2.5,0.052,40.0,210.0,0.99316,3.15,0.6,10.0 +6.5,0.27,0.26,8.2,0.042,21.0,133.0,0.99612,3.43,0.64,9.8 +6.6,0.26,0.27,1.5,0.04,19.0,114.0,0.99295,3.36,0.62,10.5 +6.7,0.27,0.26,2.3,0.043,61.0,181.0,0.99394,3.45,0.63,10.6 +6.6,0.56,0.15,10.0,0.037,38.0,157.0,0.99642,3.28,0.52,9.4 +6.6,0.56,0.15,10.0,0.037,38.0,157.0,0.99642,3.28,0.52,9.4 +7.3,0.19,0.27,1.6,0.027,35.0,136.0,0.99248,3.38,0.54,11.0 +6.3,0.2,0.26,1.6,0.027,36.0,141.0,0.99268,3.53,0.56,10.8 +7.1,0.29,0.3,16.0,0.036,58.0,201.0,0.99954,3.3,0.67,9.0 +7.8,0.32,0.33,10.4,0.031,47.0,194.0,0.99692,3.07,0.58,9.6 +8.1,0.33,0.36,7.4,0.037,36.0,156.0,0.99592,3.19,0.54,10.6 +8.1,0.33,0.36,7.4,0.037,36.0,156.0,0.99592,3.19,0.54,10.6 +7.8,0.32,0.33,10.4,0.031,47.0,194.0,0.99692,3.07,0.58,9.6 +6.6,0.33,0.24,16.05,0.045,31.0,147.0,0.99822,3.08,0.52,9.2 +6.6,0.33,0.24,16.05,0.045,31.0,147.0,0.99822,3.08,0.52,9.2 +8.2,0.26,0.33,2.6,0.053,11.0,71.0,0.99402,2.89,0.49,9.5 +8.3,0.25,0.33,2.5,0.053,12.0,72.0,0.99404,2.89,0.48,9.5 +7.0,0.26,0.26,10.8,0.039,37.0,184.0,0.99787,3.47,0.58,10.3 +6.0,0.26,0.15,1.2,0.053,35.0,124.0,0.99347,3.08,0.46,8.8 +7.5,0.28,0.78,12.1,0.041,53.0,161.0,0.99838,2.98,0.44,8.7 +7.5,0.27,0.79,11.95,0.04,51.0,159.0,0.99839,2.98,0.44,8.7 +7.0,0.28,0.32,1.7,0.038,27.0,128.0,0.99375,3.2,0.62,10.2 +5.2,0.16,0.34,0.8,0.029,26.0,77.0,0.99155,3.25,0.51,10.1 +6.8,0.34,0.1,1.4,0.049,29.0,118.0,0.9936,3.21,0.41,9.5 +7.6,0.25,0.34,1.3,0.056,34.0,176.0,0.99434,3.1,0.51,9.5 +5.6,0.35,0.4,6.3,0.022,23.0,174.0,0.9922,3.54,0.5,11.6 +8.8,0.24,0.23,10.3,0.032,12.0,97.0,0.99571,3.13,0.4,10.7 +6.0,0.29,0.21,15.55,0.043,20.0,142.0,0.99658,3.11,0.54,10.1 +6.1,0.27,0.31,1.5,0.035,17.0,83.0,0.99076,3.32,0.44,11.1 +7.4,0.56,0.09,1.5,0.071,19.0,117.0,0.99496,3.22,0.53,9.8 +6.8,0.29,0.49,1.4,0.142,52.0,148.0,0.9937,3.08,0.49,9.0 +6.1,0.27,0.31,1.5,0.035,17.0,83.0,0.99076,3.32,0.44,11.1 +6.3,0.27,0.37,7.9,0.047,58.0,215.0,0.99542,3.19,0.48,9.5 +6.6,0.24,0.3,13.0,0.052,18.0,143.0,0.99825,3.37,0.49,9.4 +6.8,0.32,0.3,1.0,0.049,22.0,113.0,0.99289,3.24,0.61,10.2 +6.4,0.37,0.37,4.85,0.041,39.5,216.5,0.99432,3.1,0.5,9.8 +6.2,0.26,0.37,7.1,0.047,54.0,201.0,0.99523,3.19,0.48,9.5 +6.3,0.27,0.37,7.9,0.047,58.0,215.0,0.99542,3.19,0.48,9.5 +6.4,0.3,0.16,7.5,0.05,55.0,191.0,0.9959,3.17,0.49,9.0 +8.0,0.28,0.32,7.6,0.045,61.0,204.0,0.99543,3.1,0.55,10.1 +6.7,0.24,0.32,10.3,0.079,37.0,122.0,0.99662,3.02,0.45,8.8 +7.9,0.27,0.27,1.7,0.034,25.0,122.0,0.99088,2.97,0.51,11.9 +7.9,0.27,0.27,1.7,0.034,25.0,122.0,0.99088,2.97,0.51,11.9 +6.1,0.28,0.24,19.95,0.074,32.0,174.0,0.99922,3.19,0.44,9.3 +7.7,0.39,0.49,7.7,0.036,11.0,110.0,0.9966,3.33,0.76,10.0 +6.0,0.2,0.24,5.3,0.075,49.0,201.0,0.99466,3.21,0.43,9.5 +6.1,0.28,0.24,19.95,0.074,32.0,174.0,0.99922,3.19,0.44,9.3 +7.6,0.31,0.23,12.7,0.054,20.0,139.0,0.99836,3.16,0.5,9.7 +7.6,0.31,0.23,12.7,0.054,20.0,139.0,0.99836,3.16,0.5,9.7 +6.3,0.18,0.22,1.5,0.043,45.0,155.0,0.99238,3.19,0.48,10.2 +8.6,0.23,0.25,11.3,0.031,13.0,96.0,0.99645,3.11,0.4,10.8 +6.8,0.21,0.36,18.1,0.046,32.0,133.0,1.0,3.27,0.48,8.8 +6.8,0.21,0.36,18.1,0.046,32.0,133.0,1.0,3.27,0.48,8.8 +6.9,0.26,0.31,7.0,0.039,37.0,175.0,0.99376,3.32,0.49,11.4 +6.8,0.21,0.36,18.1,0.046,32.0,133.0,1.0,3.27,0.48,8.8 +6.4,0.31,0.4,6.4,0.039,39.0,191.0,0.99513,3.14,0.52,9.8 +8.6,0.34,0.36,1.4,0.045,11.0,119.0,0.99556,3.17,0.47,9.4 +8.6,0.34,0.36,1.4,0.045,11.0,119.0,0.99556,3.17,0.47,9.4 +8.5,0.3,0.28,3.1,0.054,54.0,174.0,0.99543,3.21,0.43,9.4 +7.4,0.4,0.41,14.1,0.053,37.0,194.0,0.99886,3.2,0.63,9.4 +6.6,0.32,0.34,7.7,0.044,63.0,212.0,0.99526,3.22,0.48,9.7 +7.1,0.34,0.31,5.2,0.032,36.0,140.0,0.99166,3.35,0.47,12.3 +6.6,0.26,0.25,11.6,0.045,45.0,178.0,0.99691,3.33,0.43,9.8 +8.0,0.27,0.57,10.4,0.053,18.0,134.0,0.99732,3.12,0.68,9.0 +6.2,0.28,0.45,7.5,0.045,46.0,203.0,0.99573,3.26,0.46,9.2 +6.2,0.3,0.49,11.2,0.058,68.0,215.0,0.99656,3.19,0.6,9.4 +5.6,0.175,0.29,0.8,0.043,20.0,67.0,0.99112,3.28,0.48,9.9 +6.9,0.34,0.36,1.4,0.032,13.0,145.0,0.99214,3.07,0.52,9.8 +6.9,0.34,0.3,4.7,0.029,34.0,148.0,0.99165,3.36,0.49,12.3 +7.1,0.12,0.3,3.1,0.018,15.0,37.0,0.99004,3.02,0.52,11.9 +7.1,0.32,0.29,4.0,0.038,33.0,170.0,0.99463,3.27,0.64,10.2 +7.3,0.51,0.29,11.3,0.034,61.0,224.0,0.99683,3.14,0.56,9.5 +7.1,0.12,0.3,3.1,0.018,15.0,37.0,0.99004,3.02,0.52,11.9 +6.3,0.24,0.55,8.1,0.04,67.0,216.0,0.99596,3.24,0.5,9.2 +7.5,0.41,0.23,14.8,0.054,28.0,174.0,0.99898,3.18,0.49,9.7 +6.5,0.18,0.33,1.4,0.029,35.0,138.0,0.99114,3.36,0.6,11.5 +7.3,0.17,0.24,8.1,0.121,32.0,162.0,0.99508,3.17,0.38,10.4 +8.2,0.2,0.38,3.5,0.053,41.0,174.0,0.99306,3.22,0.41,11.6 +7.5,0.41,0.23,14.8,0.054,28.0,174.0,0.99898,3.18,0.49,9.7 +7.3,0.17,0.24,8.1,0.121,32.0,162.0,0.99508,3.17,0.38,10.4 +6.5,0.18,0.33,1.4,0.029,35.0,138.0,0.99114,3.36,0.6,11.5 +7.3,0.16,0.35,1.5,0.036,29.0,108.0,0.99342,3.27,0.51,10.2 +6.4,0.16,0.37,1.5,0.037,27.0,109.0,0.99345,3.38,0.5,9.8 +6.6,0.42,0.13,12.8,0.044,26.0,158.0,0.99772,3.24,0.47,9.0 +5.8,0.3,0.12,1.6,0.036,57.0,163.0,0.99239,3.38,0.59,10.5 +6.7,0.54,0.27,7.1,0.049,8.0,178.0,0.99502,3.16,0.38,9.4 +6.7,0.54,0.27,7.1,0.049,8.0,178.0,0.99502,3.16,0.38,9.4 +6.4,0.22,0.3,11.2,0.046,53.0,149.0,0.99479,3.21,0.34,10.8 +6.8,0.23,0.3,1.7,0.043,19.0,95.0,0.99207,3.17,0.46,10.7 +9.0,0.26,0.34,6.7,0.029,21.0,162.0,0.99497,3.08,0.5,10.6 +6.5,0.23,0.25,17.3,0.046,15.0,110.0,0.99828,3.15,0.42,9.2 +5.9,0.28,0.14,8.6,0.032,30.0,142.0,0.99542,3.28,0.44,9.5 +5.9,0.28,0.14,8.6,0.032,30.0,142.0,0.99542,3.28,0.44,9.5 +6.2,0.27,0.18,1.5,0.028,20.0,111.0,0.99228,3.41,0.5,10.0 +9.0,0.29,0.34,12.1,0.03,34.0,177.0,0.99706,3.13,0.47,10.6 +9.0,0.26,0.34,6.7,0.029,21.0,162.0,0.99497,3.08,0.5,10.6 +8.9,0.27,0.34,10.7,0.029,19.5,166.0,0.99669,3.13,0.48,10.6 +6.5,0.23,0.25,17.3,0.046,15.0,110.0,0.99828,3.15,0.42,9.2 +6.9,0.32,0.3,1.8,0.036,28.0,117.0,0.99269,3.24,0.48,11.0 +7.2,0.22,0.24,1.4,0.041,17.0,159.0,0.99196,3.25,0.53,11.2 +6.7,0.5,0.38,7.5,0.046,26.0,175.0,0.99662,3.32,0.54,9.6 +6.2,0.33,0.14,4.8,0.052,27.0,128.0,0.99475,3.21,0.48,9.4 +6.3,0.26,0.42,7.1,0.045,62.0,209.0,0.99544,3.2,0.53,9.5 +7.5,0.2,0.47,16.9,0.052,51.0,188.0,0.99944,3.09,0.62,9.3 +6.2,0.33,0.14,4.8,0.052,27.0,128.0,0.99475,3.21,0.48,9.4 +6.3,0.26,0.42,7.1,0.045,62.0,209.0,0.99544,3.2,0.53,9.5 +6.6,0.36,0.52,11.3,0.046,8.0,110.0,0.9966,3.07,0.46,9.4 +6.3,0.13,0.42,1.1,0.043,63.0,146.0,0.99066,3.13,0.72,11.2 +6.4,0.15,0.44,1.2,0.043,67.0,150.0,0.9907,3.14,0.73,11.2 +6.3,0.13,0.42,1.1,0.043,63.0,146.0,0.99066,3.13,0.72,11.2 +7.6,0.23,0.64,12.9,0.033,54.0,170.0,0.998,3.0,0.53,8.8 +6.4,0.15,0.44,1.2,0.043,67.0,150.0,0.9907,3.14,0.73,11.2 +6.3,0.13,0.42,1.1,0.043,63.0,146.0,0.99066,3.13,0.72,11.2 +5.7,0.255,0.65,1.2,0.079,17.0,137.0,0.99307,3.2,0.42,9.4 +6.9,0.32,0.26,2.3,0.03,11.0,103.0,0.99106,3.06,0.42,11.1 +6.9,0.28,0.22,10.0,0.052,36.0,131.0,0.99696,3.08,0.46,9.6 +6.9,0.32,0.26,2.3,0.03,11.0,103.0,0.99106,3.06,0.42,11.1 +5.7,0.255,0.65,1.2,0.079,17.0,137.0,0.99307,3.2,0.42,9.4 +6.6,0.41,0.16,1.4,0.037,28.0,160.0,0.99167,2.95,0.45,10.6 +7.3,0.37,0.16,14.9,0.048,59.0,240.0,0.99902,3.13,0.45,8.9 +6.9,0.21,0.24,1.8,0.021,17.0,80.0,0.98992,3.15,0.46,12.3 +6.6,0.24,0.28,1.8,0.028,39.0,132.0,0.99182,3.34,0.46,11.4 +6.8,0.28,0.36,7.0,0.043,60.0,207.0,0.99556,3.16,0.49,9.6 +6.6,0.24,0.24,8.6,0.034,25.0,135.0,0.99582,3.33,0.59,10.3 +6.6,0.24,0.28,1.8,0.028,39.0,132.0,0.99182,3.34,0.46,11.4 +7.0,0.16,0.32,1.1,0.032,29.0,80.0,0.98972,3.23,0.36,12.1 +7.0,0.14,0.28,1.3,0.026,10.0,56.0,0.99352,3.46,0.45,9.9 +6.3,0.34,0.36,4.9,0.035,31.0,185.0,0.9946,3.15,0.49,9.7 +6.8,0.26,0.24,1.9,0.043,70.0,154.0,0.99273,3.18,0.52,10.5 +6.7,0.17,0.42,10.4,0.038,85.0,182.0,0.99628,3.04,0.44,8.9 +6.5,0.27,0.4,10.0,0.039,74.0,227.0,0.99582,3.18,0.5,9.4 +6.7,0.25,0.36,8.6,0.037,63.0,206.0,0.99553,3.18,0.5,9.6 +5.8,0.3,0.27,1.7,0.014,45.0,104.0,0.98914,3.4,0.56,12.6 +6.4,0.28,0.56,1.7,0.156,49.0,106.0,0.99354,3.1,0.37,9.2 +7.7,0.3,0.26,18.95,0.053,36.0,174.0,0.99976,3.2,0.5,10.4 +6.8,0.18,0.3,12.8,0.062,19.0,171.0,0.99808,3.0,0.52,9.0 +6.8,0.18,0.3,12.8,0.062,19.0,171.0,0.99808,3.0,0.52,9.0 +6.8,0.18,0.3,12.8,0.062,19.0,171.0,0.99808,3.0,0.52,9.0 +6.8,0.18,0.3,12.8,0.062,19.0,171.0,0.99808,3.0,0.52,9.0 +6.8,0.18,0.3,12.8,0.062,19.0,171.0,0.99808,3.0,0.52,9.0 +6.8,0.18,0.3,12.8,0.062,19.0,171.0,0.99808,3.0,0.52,9.0 +5.1,0.14,0.25,0.7,0.039,15.0,89.0,0.9919,3.22,0.43,9.2 +6.8,0.18,0.3,12.8,0.062,19.0,171.0,0.99808,3.0,0.52,9.0 +7.2,0.615,0.1,1.4,0.068,25.0,154.0,0.99499,3.2,0.48,9.7 +6.9,0.13,0.28,13.3,0.05,47.0,132.0,0.99655,3.34,0.42,10.1 +6.7,0.34,0.3,8.5,0.059,24.0,152.0,0.99615,3.46,0.64,11.0 +7.3,0.32,0.29,1.5,0.038,32.0,144.0,0.99296,3.2,0.55,10.8 +6.3,0.21,0.29,11.7,0.048,49.0,147.0,0.99482,3.22,0.38,10.8 +5.4,0.5,0.13,5.0,0.028,12.0,107.0,0.99079,3.48,0.88,13.5 +8.2,0.52,0.34,1.2,0.042,18.0,167.0,0.99366,3.24,0.39,10.6 +7.8,0.28,0.31,2.1,0.046,28.0,208.0,0.99434,3.23,0.64,9.8 +6.4,0.22,0.34,1.4,0.023,56.0,115.0,0.98958,3.18,0.7,11.7 +7.8,0.28,0.31,2.1,0.046,28.0,208.0,0.99434,3.23,0.64,9.8 +6.9,0.32,0.27,16.0,0.034,58.0,185.0,0.99938,3.34,0.6,9.0 +6.8,0.11,0.42,1.1,0.042,51.0,132.0,0.99059,3.18,0.74,11.3 +6.2,0.26,0.32,15.3,0.031,64.0,185.0,0.99835,3.31,0.61,9.4 +6.4,0.22,0.34,1.4,0.023,56.0,115.0,0.98958,3.18,0.7,11.7 +6.7,0.3,0.29,2.8,0.025,37.0,107.0,0.99159,3.31,0.63,11.3 +6.7,0.3,0.29,2.8,0.025,37.0,107.0,0.99159,3.31,0.63,11.3 +7.1,0.2,0.3,0.9,0.019,4.0,28.0,0.98931,3.2,0.36,12.0 +7.2,0.2,0.36,2.5,0.028,22.0,157.0,0.9938,3.48,0.49,10.6 +8.9,0.26,0.33,8.1,0.024,47.0,202.0,0.99558,3.13,0.46,10.8 +7.5,0.25,0.32,8.2,0.024,53.0,209.0,0.99563,3.12,0.46,10.8 +7.1,0.2,0.3,0.9,0.019,4.0,28.0,0.98931,3.2,0.36,12.0 +6.3,0.27,0.46,11.1,0.053,44.0,177.0,0.99691,3.18,0.67,9.4 +6.5,0.3,0.39,7.8,0.038,61.0,219.0,0.9959,3.19,0.5,9.4 +6.7,0.3,0.29,2.8,0.025,37.0,107.0,0.99159,3.31,0.63,11.3 +6.6,0.36,0.52,10.1,0.05,29.0,140.0,0.99628,3.07,0.4,9.4 +6.15,0.21,0.37,3.2,0.021,20.0,80.0,0.99076,3.39,0.47,12.0 +6.5,0.18,0.41,14.2,0.039,47.0,129.0,0.99678,3.28,0.72,10.3 +6.5,0.18,0.41,14.2,0.039,47.0,129.0,0.99678,3.28,0.72,10.3 +6.5,0.18,0.41,14.2,0.039,47.0,129.0,0.99678,3.28,0.72,10.3 +6.6,0.26,0.21,2.9,0.026,48.0,126.0,0.99089,3.22,0.38,11.3 +6.6,0.35,0.35,6.0,0.063,31.0,150.0,0.99537,3.1,0.47,9.4 +6.5,0.28,0.28,20.4,0.041,40.0,144.0,1.0002,3.14,0.38,8.7 +6.6,0.36,0.52,10.1,0.05,29.0,140.0,0.99628,3.07,0.4,9.4 +6.6,0.26,0.21,2.9,0.026,48.0,126.0,0.99089,3.22,0.38,11.3 +6.5,0.18,0.41,14.2,0.039,47.0,129.0,0.99678,3.28,0.72,10.3 +6.15,0.21,0.37,3.2,0.021,20.0,80.0,0.99076,3.39,0.47,12.0 +4.5,0.19,0.21,0.95,0.033,89.0,159.0,0.99332,3.34,0.42,8.0 +8.0,0.24,0.26,1.7,0.033,36.0,136.0,0.99316,3.44,0.51,10.4 +7.8,0.17,0.23,1.7,0.029,39.0,128.0,0.99272,3.37,0.41,10.7 +7.0,0.24,0.24,9.0,0.03,42.0,219.0,0.99636,3.47,0.46,10.2 +5.8,0.6,0.0,1.3,0.044,72.0,197.0,0.99202,3.56,0.43,10.9 +5.9,0.445,0.26,1.4,0.027,23.0,109.0,0.99148,3.3,0.36,10.5 +6.7,0.28,0.28,2.4,0.012,36.0,100.0,0.99064,3.26,0.39,11.7 +6.8,0.44,0.2,16.0,0.065,61.0,186.0,0.99884,3.13,0.45,8.6 +7.2,0.24,0.27,11.4,0.034,40.0,174.0,0.99773,3.2,0.44,9.0 +8.7,0.31,0.73,14.35,0.044,27.0,191.0,1.00013,2.96,0.88,8.7 +8.2,0.32,0.26,2.1,0.062,26.0,87.0,0.98974,3.1,0.47,12.8 +7.2,0.24,0.27,11.4,0.034,40.0,174.0,0.99773,3.2,0.44,9.0 +8.7,0.31,0.73,14.35,0.044,27.0,191.0,1.00013,2.96,0.88,8.7 +7.5,0.13,0.38,1.1,0.023,42.0,104.0,0.99112,3.28,0.53,11.8 +9.2,0.14,0.37,1.1,0.034,36.0,84.0,0.99136,3.05,0.55,11.6 +7.4,0.2,0.37,1.2,0.028,28.0,89.0,0.99132,3.14,0.61,11.8 +6.1,0.15,0.35,15.8,0.042,55.0,158.0,0.99642,3.24,0.37,10.6 +7.6,0.23,0.4,5.2,0.066,14.0,91.0,0.99488,3.17,0.8,9.7 +8.1,0.33,0.22,5.2,0.047,24.0,151.0,0.99527,3.22,0.47,10.3 +7.15,0.17,0.24,9.6,0.119,56.0,178.0,0.99578,3.15,0.44,10.2 +6.7,0.12,0.3,5.2,0.048,38.0,113.0,0.99352,3.33,0.44,10.1 +5.7,0.18,0.36,1.2,0.046,9.0,71.0,0.99199,3.7,0.68,10.9 +5.8,0.15,0.28,0.8,0.037,43.0,127.0,0.99198,3.24,0.51,9.3 +6.6,0.23,0.29,14.45,0.057,29.0,144.0,0.99756,3.33,0.54,10.2 +7.15,0.17,0.24,9.6,0.119,56.0,178.0,0.99578,3.15,0.44,10.2 +7.0,0.34,0.39,6.9,0.066,43.0,162.0,0.99561,3.11,0.53,9.5 +6.4,0.68,0.26,3.4,0.069,25.0,146.0,0.99347,3.18,0.4,9.3 +7.3,0.22,0.31,2.3,0.018,45.0,80.0,0.98936,3.06,0.34,12.9 +6.4,0.28,0.27,11.0,0.042,45.0,148.0,0.99786,3.14,0.46,8.7 +6.9,0.4,0.22,5.95,0.081,76.0,303.0,0.99705,3.4,0.57,9.4 +6.8,0.19,0.23,5.1,0.034,71.0,204.0,0.9942,3.23,0.69,10.1 +7.1,0.23,0.24,5.4,0.039,60.0,196.0,0.9948,3.19,0.78,10.0 +6.45,0.14,0.42,1.2,0.05,51.0,129.0,0.99116,3.27,0.69,11.1 +6.5,0.15,0.44,12.6,0.052,65.0,158.0,0.99688,3.26,0.7,10.3 +7.1,0.15,0.34,1.0,0.033,27.0,73.0,0.98974,3.24,0.41,12.2 +6.7,0.33,0.34,6.6,0.067,35.0,156.0,0.99542,3.11,0.48,9.3 +7.2,0.3,0.26,1.5,0.041,46.0,178.0,0.99154,3.19,0.56,11.3 +7.0,0.23,0.33,1.0,0.043,46.0,110.0,0.99118,3.04,0.65,10.8 +8.0,0.13,0.25,1.1,0.033,15.0,86.0,0.99044,2.98,0.39,11.2 +6.2,0.21,0.34,6.6,0.03,36.0,91.0,0.9914,3.32,0.45,12.5 +8.3,0.4,0.41,8.2,0.05,15.0,122.0,0.9979,3.39,0.49,9.3 +5.9,0.34,0.31,2.0,0.03,38.0,142.0,0.98892,3.4,0.41,12.9 +6.6,0.12,0.25,1.4,0.039,21.0,131.0,0.99114,3.2,0.45,11.2 +9.6,0.655,0.21,2.0,0.039,21.0,120.0,0.99188,3.0,1.0,12.6 +6.8,0.26,0.4,7.5,0.046,45.0,179.0,0.99583,3.2,0.49,9.3 +5.9,0.34,0.31,2.0,0.03,38.0,142.0,0.98892,3.4,0.41,12.9 +5.9,0.3,0.3,2.0,0.03,38.0,142.0,0.98892,3.41,0.41,12.9 +7.0,0.15,0.3,13.3,0.049,46.0,120.0,0.99704,3.2,0.36,9.5 +7.9,0.37,0.31,2.85,0.037,5.0,24.0,0.9911,3.19,0.36,11.9 +7.2,0.35,0.25,5.6,0.032,23.0,120.0,0.99334,2.93,0.66,10.3 +7.2,0.32,0.24,5.6,0.033,23.0,120.0,0.99334,2.92,0.66,10.3 +7.6,0.1,0.33,1.0,0.031,33.0,93.0,0.99094,3.06,0.68,11.2 +6.2,0.25,0.31,3.2,0.03,32.0,150.0,0.99014,3.18,0.31,12.0 +7.1,0.31,0.17,1.0,0.042,21.0,144.0,0.99304,3.13,0.4,9.6 +7.6,0.18,0.28,7.1,0.041,29.0,110.0,0.99652,3.2,0.42,9.2 +8.0,0.17,0.29,2.4,0.029,52.0,119.0,0.98944,3.03,0.33,12.9 +7.2,0.19,0.27,11.2,0.061,46.0,149.0,0.99772,2.99,0.59,9.3 +7.6,0.32,0.25,9.5,0.03,15.0,136.0,0.99367,3.1,0.44,12.1 +7.1,0.31,0.17,1.0,0.042,21.0,144.0,0.99304,3.13,0.4,9.6 +6.6,0.21,0.29,1.8,0.026,35.0,128.0,0.99183,3.37,0.48,11.2 +7.0,0.16,0.36,2.6,0.029,28.0,98.0,0.99126,3.11,0.37,11.2 +8.0,0.17,0.29,2.4,0.029,52.0,119.0,0.98944,3.03,0.33,12.9 +6.6,0.24,0.38,8.0,0.042,56.0,187.0,0.99577,3.21,0.46,9.2 +7.2,0.19,0.27,11.2,0.061,46.0,149.0,0.99772,2.99,0.59,9.3 +7.6,0.18,0.28,7.1,0.041,29.0,110.0,0.99652,3.2,0.42,9.2 +6.9,0.3,0.25,3.3,0.041,26.0,124.0,0.99428,3.18,0.5,9.3 +6.2,0.28,0.27,10.3,0.03,26.0,108.0,0.99388,3.2,0.36,10.7 +6.9,0.31,0.32,1.2,0.024,20.0,166.0,0.99208,3.05,0.54,9.8 +6.7,0.23,0.25,1.6,0.036,28.0,143.0,0.99256,3.3,0.54,10.3 +6.2,0.28,0.27,10.3,0.03,26.0,108.0,0.99388,3.2,0.36,10.7 +5.7,0.23,0.28,9.65,0.025,26.0,121.0,0.9925,3.28,0.38,11.3 +6.5,0.22,0.5,16.4,0.048,36.0,182.0,0.99904,3.02,0.49,8.8 +7.0,0.18,0.37,1.5,0.043,16.0,104.0,0.99216,3.18,0.5,10.8 +6.9,0.31,0.32,1.2,0.024,20.0,166.0,0.99208,3.05,0.54,9.8 +6.9,0.3,0.25,3.3,0.041,26.0,124.0,0.99428,3.18,0.5,9.3 +6.5,0.46,0.31,5.0,0.027,15.0,72.0,0.99165,3.26,0.6,11.5 +6.5,0.23,0.36,16.3,0.038,43.0,133.0,0.99924,3.26,0.41,8.8 +6.5,0.23,0.36,16.3,0.038,43.0,133.0,0.99924,3.26,0.41,8.8 +6.5,0.23,0.36,16.3,0.038,43.0,133.0,0.99924,3.26,0.41,8.8 +6.6,0.26,0.38,6.5,0.17,68.0,201.0,0.9956,3.19,0.38,9.4 +6.7,0.26,0.39,6.4,0.171,64.0,200.0,0.99562,3.19,0.38,9.4 +7.5,0.28,0.39,10.2,0.045,59.0,209.0,0.9972,3.16,0.63,9.6 +6.5,0.23,0.36,16.3,0.038,43.0,133.0,0.99924,3.26,0.41,8.8 +6.8,0.23,0.42,7.4,0.044,56.0,189.0,0.9958,3.22,0.48,9.3 +7.8,0.25,0.34,13.7,0.044,66.0,184.0,0.99976,3.22,0.75,8.9 +7.8,0.25,0.34,13.7,0.044,66.0,184.0,0.99976,3.22,0.75,8.9 +5.6,0.2,0.22,1.3,0.049,25.0,155.0,0.99296,3.74,0.43,10.0 +6.4,0.21,0.44,7.4,0.045,47.0,182.0,0.9957,3.24,0.46,9.1 +6.8,0.23,0.42,7.4,0.044,56.0,189.0,0.9958,3.22,0.48,9.3 +6.8,0.24,0.37,7.45,0.043,59.0,188.0,0.99579,3.2,0.5,9.4 +7.8,0.25,0.28,7.2,0.04,46.0,179.0,0.99541,3.14,0.6,10.1 +7.8,0.25,0.34,13.7,0.044,66.0,184.0,0.99976,3.22,0.75,8.9 +6.8,0.16,0.29,10.4,0.046,59.0,143.0,0.99518,3.2,0.4,10.8 +5.2,0.28,0.29,1.1,0.028,18.0,69.0,0.99168,3.24,0.54,10.0 +7.5,0.18,0.31,6.5,0.029,53.0,160.0,0.99276,3.03,0.38,10.9 +7.5,0.26,0.3,4.6,0.027,29.0,92.0,0.99085,3.15,0.38,12.0 +8.2,0.37,0.64,13.9,0.043,22.0,171.0,0.99873,2.99,0.8,9.3 +7.6,0.4,0.27,5.2,0.03,32.0,101.0,0.99172,3.22,0.62,12.3 +7.5,0.26,0.25,1.7,0.038,29.0,129.0,0.99312,3.45,0.56,10.4 +7.5,0.18,0.31,6.5,0.029,53.0,160.0,0.99276,3.03,0.38,10.9 +6.9,0.23,0.32,16.4,0.045,62.0,153.0,0.9972,3.22,0.42,10.5 +5.3,0.2,0.31,3.6,0.036,22.0,91.0,0.99278,3.41,0.5,9.8 +6.5,0.17,0.31,1.5,0.041,34.0,121.0,0.99092,3.06,0.46,10.5 +6.5,0.35,0.28,12.4,0.051,86.0,213.0,0.9962,3.16,0.51,9.9 +6.5,0.29,0.31,1.7,0.035,24.0,79.0,0.99053,3.27,0.69,11.4 +6.8,0.3,0.22,6.2,0.06,41.0,190.0,0.99858,3.18,0.51,9.2 +7.9,0.51,0.36,6.2,0.051,30.0,173.0,0.9984,3.09,0.53,9.7 +7.9,0.51,0.34,2.6,0.049,13.0,135.0,0.99335,3.09,0.51,10.0 +6.5,0.29,0.31,1.7,0.035,24.0,79.0,0.99053,3.27,0.69,11.4 +7.1,0.29,0.28,9.3,0.048,50.0,141.0,0.9949,3.13,0.49,10.3 +6.5,0.35,0.28,12.4,0.051,86.0,213.0,0.9962,3.16,0.51,9.9 +6.5,0.17,0.31,1.5,0.041,34.0,121.0,0.99092,3.06,0.46,10.5 +7.4,0.2,0.28,9.1,0.047,29.0,95.0,0.99532,3.16,0.47,9.8 +6.9,0.615,0.42,12.0,0.067,24.0,131.0,0.99727,3.19,0.34,9.3 +6.8,0.32,0.28,4.8,0.034,25.0,100.0,0.99026,3.08,0.47,12.4 +6.3,0.2,0.19,12.3,0.048,54.0,145.0,0.99668,3.16,0.42,9.3 +6.9,0.615,0.42,12.0,0.067,24.0,131.0,0.99727,3.19,0.34,9.3 +8.0,0.23,0.28,2.7,0.048,49.0,165.0,0.9952,3.26,0.72,9.5 +6.7,0.27,0.33,3.6,0.034,9.0,45.0,0.99144,3.08,0.4,10.5 +6.7,0.27,0.33,3.6,0.034,9.0,45.0,0.99144,3.08,0.4,10.5 +6.7,0.44,0.22,4.3,0.032,19.0,99.0,0.99015,3.26,0.53,12.8 +7.0,0.34,0.3,1.8,0.045,44.0,142.0,0.9914,2.99,0.45,10.8 +7.3,0.26,0.33,11.8,0.057,48.0,127.0,0.99693,3.1,0.55,10.0 +5.8,0.17,0.34,1.8,0.045,96.0,170.0,0.99035,3.38,0.9,11.8 +7.3,0.26,0.33,11.8,0.057,48.0,127.0,0.99693,3.1,0.55,10.0 +5.8,0.17,0.34,1.8,0.045,96.0,170.0,0.99035,3.38,0.9,11.8 +6.8,0.17,0.36,1.4,0.036,38.0,108.0,0.99006,3.19,0.66,12.0 +7.1,0.43,0.3,6.6,0.025,15.0,138.0,0.99126,3.18,0.46,12.6 +5.8,0.315,0.27,1.55,0.026,15.0,70.0,0.98994,3.37,0.4,11.9 +5.9,0.17,0.28,0.7,0.027,5.0,28.0,0.98985,3.13,0.32,10.6 +6.6,0.34,0.18,6.4,0.082,47.0,240.0,0.9971,3.42,0.48,9.2 +8.6,0.33,0.34,11.8,0.059,42.0,240.0,0.99882,3.17,0.52,10.0 +5.6,0.12,0.26,4.3,0.038,18.0,97.0,0.99477,3.36,0.46,9.2 +5.8,0.13,0.26,5.1,0.039,19.0,103.0,0.99478,3.36,0.47,9.3 +7.7,0.18,0.35,5.8,0.055,25.0,144.0,0.99576,3.24,0.54,10.2 +7.7,0.16,0.36,5.9,0.054,25.0,148.0,0.99578,3.25,0.54,10.2 +6.0,0.26,0.15,1.3,0.06,51.0,154.0,0.99354,3.14,0.51,8.7 +7.3,0.32,0.35,1.4,0.05,8.0,163.0,0.99244,3.24,0.42,10.7 +7.7,0.3,0.34,1.2,0.048,4.0,119.0,0.99084,3.18,0.34,12.1 +7.9,0.16,0.3,7.4,0.05,58.0,152.0,0.99612,3.12,0.37,9.5 +6.4,0.27,0.29,10.8,0.028,17.0,118.0,0.99356,3.18,0.37,11.2 +6.9,0.16,0.37,1.8,0.034,36.0,95.0,0.98952,2.93,0.59,12.0 +7.9,0.16,0.3,7.4,0.05,58.0,152.0,0.99612,3.12,0.37,9.5 +7.7,0.3,0.34,1.2,0.048,4.0,119.0,0.99084,3.18,0.34,12.1 +7.3,0.32,0.35,1.4,0.05,8.0,163.0,0.99244,3.24,0.42,10.7 +6.4,0.44,0.44,14.4,0.048,29.0,228.0,0.99955,3.26,0.54,8.8 +6.3,0.2,0.24,1.7,0.052,36.0,135.0,0.99374,3.8,0.66,10.8 +6.2,0.29,0.32,3.6,0.026,39.0,138.0,0.9892,3.31,0.37,13.1 +7.6,0.39,0.32,3.6,0.035,22.0,93.0,0.99144,3.08,0.6,12.5 +7.0,0.36,0.32,10.05,0.045,37.0,131.0,0.99352,3.09,0.33,11.7 +7.0,0.36,0.32,10.05,0.045,37.0,131.0,0.99352,3.09,0.33,11.7 +7.0,0.36,0.32,10.5,0.045,35.0,135.0,0.9935,3.09,0.33,11.6 +7.6,0.2,0.36,1.9,0.043,24.0,111.0,0.99237,3.29,0.54,11.3 +7.6,0.39,0.32,3.6,0.035,22.0,93.0,0.99144,3.08,0.6,12.5 +6.7,0.2,0.37,1.65,0.025,42.0,103.0,0.99022,3.11,0.45,11.4 +6.2,0.235,0.34,1.9,0.036,4.0,117.0,0.99032,3.4,0.44,12.2 +7.8,0.965,0.6,65.8,0.074,8.0,160.0,1.03898,3.39,0.69,11.7 +7.1,0.2,0.31,6.85,0.053,32.0,211.0,0.99587,3.31,0.59,10.4 +7.1,0.2,0.31,7.4,0.053,32.0,211.0,0.99587,3.31,0.59,10.4 +7.1,0.2,0.31,7.4,0.053,32.0,211.0,0.99587,3.31,0.59,10.4 +6.4,0.24,0.25,20.2,0.083,35.0,157.0,0.99976,3.17,0.5,9.1 +8.0,0.3,0.36,11.0,0.034,8.0,70.0,0.99354,3.05,0.41,12.2 +6.4,0.24,0.25,20.2,0.083,35.0,157.0,0.99976,3.17,0.5,9.1 +6.9,0.4,0.42,6.2,0.066,41.0,176.0,0.99552,3.12,0.54,9.4 +6.9,0.4,0.43,6.2,0.065,42.0,178.0,0.99552,3.11,0.53,9.4 +7.1,0.2,0.31,6.85,0.053,32.0,211.0,0.99587,3.31,0.59,10.4 +6.6,0.25,0.51,8.0,0.047,61.0,189.0,0.99604,3.22,0.49,9.2 +6.8,0.26,0.44,8.2,0.046,52.0,183.0,0.99584,3.2,0.51,9.4 +6.5,0.37,0.3,2.2,0.033,39.0,107.0,0.98894,3.22,0.53,13.5 +6.8,0.35,0.53,10.1,0.053,37.0,151.0,0.9963,3.07,0.4,9.4 +6.4,0.22,0.32,7.2,0.028,15.0,83.0,0.993,3.13,0.55,10.9 +6.5,0.37,0.3,2.2,0.033,39.0,107.0,0.98894,3.22,0.53,13.5 +6.8,0.35,0.53,10.1,0.053,37.0,151.0,0.9963,3.07,0.4,9.4 +6.9,0.31,0.32,1.6,0.036,34.0,114.0,0.99068,3.19,0.45,11.4 +6.7,0.16,0.37,1.3,0.036,45.0,125.0,0.98964,3.19,0.51,12.4 +6.6,0.25,0.51,8.0,0.047,61.0,189.0,0.99604,3.22,0.49,9.2 +6.8,0.26,0.44,8.2,0.046,52.0,183.0,0.99584,3.2,0.51,9.4 +5.6,0.15,0.31,5.3,0.038,8.0,79.0,0.9923,3.3,0.39,10.5 +5.5,0.15,0.32,14.0,0.031,16.0,99.0,0.99437,3.26,0.38,11.5 +6.4,0.22,0.32,7.2,0.028,15.0,83.0,0.993,3.13,0.55,10.9 +7.3,0.2,0.26,1.6,0.04,36.0,123.0,0.99238,3.34,0.44,10.8 +7.5,0.17,0.71,11.8,0.038,52.0,148.0,0.99801,3.03,0.46,8.9 +7.5,0.18,0.72,9.6,0.039,53.0,151.0,0.99802,3.03,0.46,8.9 +7.0,0.27,0.48,6.1,0.042,60.0,184.0,0.99566,3.2,0.5,9.4 +5.8,0.32,0.31,2.7,0.049,25.0,153.0,0.99067,3.44,0.73,12.2 +7.8,0.26,0.31,3.6,0.025,22.0,100.0,0.99066,2.99,0.47,12.1 +7.4,0.3,0.32,1.7,0.03,23.0,128.0,0.9929,3.17,0.66,10.9 +6.7,0.16,0.34,1.6,0.026,27.0,109.0,0.9934,3.34,0.58,10.1 +5.8,0.32,0.31,2.7,0.049,25.0,153.0,0.99067,3.44,0.73,12.2 +6.7,0.19,0.39,1.0,0.032,14.0,71.0,0.98912,3.31,0.38,13.0 +6.6,0.36,0.24,0.9,0.038,15.0,72.0,0.99066,3.23,0.39,11.0 +7.2,0.17,0.41,1.6,0.052,24.0,126.0,0.99228,3.19,0.49,10.8 +6.7,0.19,0.39,1.0,0.032,14.0,71.0,0.98912,3.31,0.38,13.0 +6.0,0.11,0.47,10.6,0.052,69.0,148.0,0.9958,2.91,0.34,9.3 +6.0,0.21,0.34,2.0,0.042,63.0,123.0,0.99052,3.44,0.42,11.4 +6.7,0.325,0.82,1.2,0.152,49.0,120.0,0.99312,2.99,0.38,9.2 +6.6,0.4,0.46,6.2,0.056,42.0,241.0,0.9968,3.5,0.6,9.9 +6.5,0.2,0.24,9.2,0.044,25.0,150.0,0.99502,3.22,0.44,10.5 +7.6,0.27,0.34,5.0,0.04,18.0,56.0,0.99084,3.06,0.48,12.4 +7.2,0.26,0.4,6.3,0.047,52.0,172.0,0.99573,3.18,0.53,9.5 +6.3,0.25,0.22,3.3,0.048,41.0,161.0,0.99256,3.16,0.5,10.5 +6.5,0.22,0.45,8.0,0.053,52.0,196.0,0.9959,3.23,0.48,9.1 +6.4,0.14,0.31,1.2,0.034,53.0,138.0,0.99084,3.38,0.35,11.5 +6.4,0.14,0.31,1.2,0.034,53.0,138.0,0.99084,3.38,0.35,11.5 +7.1,0.26,0.32,16.2,0.044,31.0,170.0,0.99644,3.17,0.37,11.2 +6.6,0.22,0.34,11.6,0.05,59.0,140.0,0.99526,3.22,0.4,10.8 +6.6,0.45,0.43,7.2,0.064,31.0,186.0,0.9954,3.12,0.44,9.4 +6.6,0.17,0.3,1.1,0.031,13.0,73.0,0.99095,3.17,0.58,11.0 +7.2,0.44,0.28,3.4,0.048,22.0,112.0,0.99188,3.21,0.37,11.3 +6.2,0.15,0.27,1.4,0.041,51.0,117.0,0.9909,3.28,0.38,11.2 +6.3,0.25,0.22,3.3,0.048,41.0,161.0,0.99256,3.16,0.5,10.5 +6.5,0.22,0.45,8.0,0.053,52.0,196.0,0.9959,3.23,0.48,9.1 +7.3,0.26,0.3,9.3,0.05,35.0,154.0,0.99581,3.21,0.5,10.4 +6.9,0.15,0.29,2.3,0.033,14.0,82.0,0.99132,3.1,0.58,11.2 +5.8,0.22,0.29,0.9,0.034,34.0,89.0,0.98936,3.14,0.36,11.1 +6.5,0.37,0.33,3.5,0.036,23.0,92.0,0.99136,3.18,0.38,11.2 +5.5,0.375,0.38,1.7,0.036,17.0,98.0,0.99142,3.29,0.39,10.5 +5.9,0.2,0.4,1.3,0.047,23.0,92.0,0.99232,3.2,0.45,10.0 +5.9,0.22,0.38,1.3,0.046,24.0,90.0,0.99232,3.2,0.47,10.0 +8.0,0.22,0.31,5.6,0.049,24.0,97.0,0.993,3.1,0.42,10.9 +6.5,0.22,0.29,7.4,0.028,16.0,87.0,0.99311,3.15,0.56,10.9 +6.9,0.15,0.29,2.3,0.033,14.0,82.0,0.99132,3.1,0.58,11.2 +5.8,0.2,0.34,1.0,0.035,40.0,86.0,0.98993,3.5,0.42,11.7 +6.6,0.31,0.07,1.5,0.033,55.0,144.0,0.99208,3.16,0.42,10.0 +7.7,0.43,0.37,10.0,0.169,22.0,210.0,0.99776,3.02,0.64,9.5 +6.7,0.24,0.29,14.9,0.053,55.0,136.0,0.99839,3.03,0.52,9.0 +7.3,0.23,0.34,9.3,0.052,19.0,86.0,0.99574,3.04,0.56,10.0 +7.9,0.2,0.39,1.0,0.041,37.0,154.0,0.99093,3.08,0.43,11.9 +5.3,0.16,0.39,1.0,0.028,40.0,101.0,0.99156,3.57,0.59,10.6 +6.4,0.21,0.28,5.9,0.047,29.0,101.0,0.99278,3.15,0.4,11.0 +6.9,0.33,0.26,5.0,0.027,46.0,143.0,0.9924,3.25,0.43,11.2 +5.6,0.18,0.58,1.25,0.034,29.0,129.0,0.98984,3.51,0.6,12.0 +6.6,0.29,0.31,3.9,0.027,39.0,96.0,0.99035,3.24,0.6,12.6 +6.9,0.33,0.26,5.0,0.027,46.0,143.0,0.9924,3.25,0.43,11.2 +6.6,0.21,0.36,0.8,0.034,48.0,113.0,0.99165,3.24,0.68,10.5 +7.3,0.21,0.33,1.0,0.037,66.0,144.0,0.9923,3.11,0.52,10.2 +6.4,0.21,0.28,5.9,0.047,29.0,101.0,0.99278,3.15,0.4,11.0 +5.1,0.11,0.32,1.6,0.028,12.0,90.0,0.99008,3.57,0.52,12.2 +6.5,0.15,0.32,1.3,0.036,19.0,76.0,0.98964,3.18,0.41,12.3 +5.3,0.16,0.39,1.0,0.028,40.0,101.0,0.99156,3.57,0.59,10.6 +5.6,0.19,0.46,1.1,0.032,33.0,115.0,0.9909,3.36,0.5,10.4 +5.6,0.18,0.58,1.25,0.034,29.0,129.0,0.98984,3.51,0.6,12.0 +6.7,0.48,0.32,1.4,0.021,22.0,121.0,0.9889,3.15,0.53,12.7 +6.2,0.23,0.23,1.2,0.018,18.0,128.0,0.99178,3.05,0.28,10.6 +6.0,0.17,0.29,5.0,0.028,25.0,108.0,0.99076,3.14,0.34,12.3 +6.7,0.48,0.32,1.4,0.021,22.0,121.0,0.9889,3.15,0.53,12.7 +6.7,0.15,0.38,1.7,0.037,20.0,84.0,0.99046,3.09,0.53,11.4 +4.2,0.17,0.36,1.8,0.029,93.0,161.0,0.98999,3.65,0.89,12.0 +5.8,0.21,0.32,1.6,0.045,38.0,95.0,0.98946,3.23,0.94,12.4 +5.4,0.23,0.36,1.5,0.03,74.0,121.0,0.98976,3.24,0.99,12.1 +6.7,0.15,0.38,1.7,0.037,20.0,84.0,0.99046,3.09,0.53,11.4 +6.4,0.22,0.31,13.9,0.04,57.0,135.0,0.99672,3.21,0.38,10.7 +6.5,0.15,0.55,5.9,0.045,75.0,162.0,0.99482,2.97,0.4,9.3 +5.9,0.32,0.33,2.1,0.027,35.0,138.0,0.98945,3.37,0.42,12.7 +5.7,0.37,0.3,1.1,0.029,24.0,88.0,0.98883,3.18,0.39,11.7 +7.9,0.25,0.35,6.7,0.039,22.0,64.0,0.99362,2.93,0.49,10.7 +7.2,0.21,0.28,2.7,0.033,38.0,94.0,0.99075,2.99,0.43,11.8 +7.0,0.24,0.3,6.7,0.039,37.0,125.0,0.99436,3.2,0.39,9.9 +6.8,0.475,0.33,3.95,0.047,16.0,81.0,0.98988,3.23,0.53,13.4 +7.0,0.28,0.32,7.75,0.032,30.0,114.0,0.99158,3.12,0.64,12.8 +6.9,0.4,0.3,10.6,0.033,24.0,87.0,0.99265,3.15,0.45,12.8 +6.6,0.41,0.31,1.6,0.042,18.0,101.0,0.99195,3.13,0.41,10.5 +6.4,0.2,0.28,2.5,0.032,24.0,84.0,0.99168,3.31,0.55,11.5 +8.5,0.22,0.34,0.7,0.04,5.0,25.0,0.9918,3.04,0.37,10.5 +8.4,0.36,0.36,11.1,0.032,21.0,132.0,0.99313,2.95,0.39,13.0 +5.2,0.285,0.29,5.15,0.035,64.0,138.0,0.9895,3.19,0.34,12.4 +6.9,0.2,0.3,4.7,0.041,40.0,148.0,0.9932,3.16,0.35,10.2 +6.7,0.42,0.46,9.7,0.054,67.0,234.0,0.99848,3.23,0.5,9.0 +6.2,0.16,0.34,1.7,0.038,85.0,153.0,0.9909,3.33,0.86,12.0 +6.4,0.125,0.36,1.4,0.044,22.0,68.0,0.99014,3.15,0.5,11.7 +6.4,0.44,0.26,2.0,0.054,20.0,180.0,0.9952,3.58,0.57,10.0 +7.0,0.31,0.39,7.5,0.055,42.0,218.0,0.99652,3.37,0.54,10.3 +6.7,0.42,0.46,9.7,0.054,67.0,234.0,0.99848,3.23,0.5,9.0 +8.6,0.18,0.28,0.8,0.032,25.0,78.0,0.99104,2.99,0.38,11.1 +6.2,0.21,0.26,13.1,0.05,59.0,150.0,0.99772,3.31,0.46,9.0 +6.1,0.16,0.37,1.1,0.031,37.0,97.0,0.9922,3.4,0.58,10.5 +6.5,0.22,0.32,2.2,0.028,36.0,92.0,0.99076,3.27,0.59,11.9 +6.2,0.36,0.14,8.9,0.036,38.0,155.0,0.99622,3.27,0.5,9.4 +5.7,0.21,0.25,1.1,0.035,26.0,81.0,0.9902,3.31,0.52,11.4 +6.4,0.25,0.32,0.9,0.034,40.0,114.0,0.99114,3.31,0.58,10.8 +7.6,0.31,0.26,1.7,0.073,40.0,157.0,0.9938,3.1,0.46,9.8 +6.6,0.26,0.46,6.9,0.047,59.0,183.0,0.99594,3.2,0.45,9.3 +5.7,0.21,0.25,1.1,0.035,26.0,81.0,0.9902,3.31,0.52,11.4 +6.2,0.2,0.31,1.0,0.031,22.0,73.0,0.99035,3.24,0.52,11.3 +6.2,0.18,0.3,1.0,0.031,23.0,73.0,0.99032,3.23,0.52,11.3 +6.1,0.37,0.2,7.6,0.031,49.0,170.0,0.99558,3.22,0.48,9.5 +6.2,0.36,0.14,8.9,0.036,38.0,155.0,0.99622,3.27,0.5,9.4 +6.5,0.22,0.32,2.2,0.028,36.0,92.0,0.99076,3.27,0.59,11.9 +7.7,0.18,0.3,1.2,0.046,49.0,199.0,0.99413,3.03,0.38,9.3 +6.9,0.14,0.38,1.0,0.041,22.0,81.0,0.99043,3.03,0.54,11.4 +6.9,0.14,0.38,1.0,0.041,22.0,81.0,0.99043,3.03,0.54,11.4 +6.0,0.44,0.26,3.1,0.053,57.0,128.0,0.98982,3.22,0.39,12.7 +7.1,0.36,0.4,1.95,0.033,26.0,118.0,0.98934,3.2,0.45,13.5 +5.7,0.28,0.28,2.2,0.019,15.0,65.0,0.9902,3.06,0.52,11.2 +6.4,0.16,0.32,8.75,0.038,38.0,118.0,0.99449,3.19,0.41,10.7 +7.4,0.28,0.4,11.9,0.032,13.0,92.0,0.99629,3.01,0.46,10.8 +6.7,0.39,0.31,2.7,0.054,27.0,202.0,0.9948,3.46,0.57,10.5 +6.5,0.44,0.47,5.45,0.014,44.0,137.0,0.98984,3.13,0.32,13.0 +6.9,0.22,0.31,6.3,0.029,41.0,131.0,0.99326,3.08,0.49,10.8 +6.6,0.22,0.29,14.4,0.046,39.0,118.0,0.99834,3.05,0.5,9.1 +7.7,0.25,0.3,7.8,0.038,67.0,196.0,0.99555,3.1,0.5,10.1 +5.2,0.155,0.33,1.6,0.028,13.0,59.0,0.98975,3.3,0.84,11.9 +7.0,0.31,0.31,9.1,0.036,45.0,140.0,0.99216,2.98,0.31,12.0 +7.0,0.31,0.31,9.1,0.036,45.0,140.0,0.99216,2.98,0.31,12.0 +6.6,0.22,0.29,14.4,0.046,39.0,118.0,0.99834,3.05,0.5,9.1 +5.6,0.21,0.4,1.3,0.041,81.0,147.0,0.9901,3.22,0.95,11.6 +5.2,0.155,0.33,1.6,0.028,13.0,59.0,0.98975,3.3,0.84,11.9 +6.4,0.25,0.32,11.3,0.038,69.0,192.0,0.99573,3.14,0.5,10.2 +6.9,0.22,0.31,6.3,0.029,41.0,131.0,0.99326,3.08,0.49,10.8 +5.3,0.21,0.29,0.7,0.028,11.0,66.0,0.99215,3.3,0.4,9.8 +7.1,0.27,0.28,1.25,0.023,3.0,89.0,0.98993,2.95,0.3,11.4 +5.2,0.17,0.27,0.7,0.03,11.0,68.0,0.99218,3.3,0.41,9.8 +7.7,0.25,0.3,7.8,0.038,67.0,196.0,0.99555,3.1,0.5,10.1 +7.0,0.12,0.29,10.3,0.039,41.0,98.0,0.99564,3.19,0.38,9.8 +7.0,0.12,0.29,10.3,0.039,41.0,98.0,0.99564,3.19,0.38,9.8 +7.1,0.29,0.34,7.8,0.036,49.0,128.0,0.99397,3.21,0.4,10.7 +7.2,0.3,0.3,8.7,0.022,14.0,111.0,0.99576,3.11,0.61,10.6 +6.8,0.26,0.46,8.3,0.037,49.0,173.0,0.99601,3.17,0.47,9.3 +7.0,0.12,0.29,10.3,0.039,41.0,98.0,0.99564,3.19,0.38,9.8 +7.1,0.29,0.34,7.8,0.036,49.0,128.0,0.99397,3.21,0.4,10.7 +4.9,0.33,0.31,1.2,0.016,39.0,150.0,0.98713,3.33,0.59,14.0 +5.1,0.29,0.28,8.3,0.026,27.0,107.0,0.99308,3.36,0.37,11.0 +5.1,0.29,0.28,8.3,0.026,27.0,107.0,0.99308,3.36,0.37,11.0 +6.8,0.26,0.48,6.2,0.049,55.0,182.0,0.99582,3.21,0.45,9.4 +6.0,0.28,0.52,5.0,0.078,30.0,139.0,0.99494,3.1,0.36,9.0 +6.0,0.28,0.25,1.8,0.042,8.0,108.0,0.9929,3.08,0.55,9.0 +7.2,0.2,0.22,1.6,0.044,17.0,101.0,0.99471,3.37,0.53,10.0 +6.1,0.27,0.25,1.8,0.041,9.0,109.0,0.9929,3.08,0.54,9.0 +6.0,0.28,0.25,1.8,0.042,8.0,108.0,0.9929,3.08,0.55,9.0 +6.4,0.29,0.3,2.9,0.036,25.0,79.0,0.99037,3.29,0.6,12.4 +7.4,0.35,0.24,6.0,0.042,28.0,123.0,0.99304,3.14,0.44,11.3 +8.1,0.12,0.38,0.9,0.034,36.0,86.0,0.99026,2.8,0.55,12.0 +6.4,0.12,0.3,1.1,0.031,37.0,94.0,0.98986,3.01,0.56,11.7 +7.2,0.2,0.22,1.6,0.044,17.0,101.0,0.99471,3.37,0.53,10.0 +7.3,0.4,0.26,5.45,0.016,26.0,90.0,0.98951,2.84,0.54,13.2 +7.7,0.11,0.34,14.05,0.04,41.0,114.0,0.99634,3.07,0.59,11.0 +6.9,0.23,0.41,8.0,0.03,30.0,114.0,0.99368,3.22,0.54,11.0 +6.9,0.38,0.38,13.1,0.112,14.0,94.0,0.99792,3.02,0.48,9.2 +7.5,0.38,0.29,4.9,0.021,38.0,113.0,0.99026,3.08,0.48,13.0 +5.8,0.19,0.24,1.3,0.044,38.0,128.0,0.99362,3.77,0.6,10.6 +5.5,0.34,0.26,2.2,0.021,31.0,119.0,0.98919,3.55,0.49,13.0 +6.6,0.23,0.3,14.9,0.051,33.0,118.0,0.99835,3.04,0.54,9.0 +6.6,0.23,0.3,14.9,0.051,33.0,118.0,0.99835,3.04,0.54,9.0 +8.4,0.31,0.31,0.95,0.021,52.0,148.0,0.99038,2.93,0.32,11.5 +6.7,0.2,0.3,1.4,0.025,17.0,76.0,0.99104,3.11,0.44,11.0 +8.4,0.31,0.31,0.95,0.021,52.0,148.0,0.99038,2.93,0.32,11.5 +7.3,0.26,0.24,1.7,0.05,10.0,112.0,0.99286,3.11,0.43,9.9 +6.3,0.22,0.22,5.6,0.039,31.0,128.0,0.99296,3.12,0.46,10.4 +6.6,0.23,0.3,14.9,0.051,33.0,118.0,0.99835,3.04,0.54,9.0 +7.5,0.19,0.4,7.1,0.056,50.0,110.0,0.9954,3.06,0.52,9.9 +8.0,0.14,0.33,1.2,0.045,71.0,162.0,0.9914,3.07,0.47,11.0 +6.8,0.32,0.39,9.6,0.026,34.0,124.0,0.99286,3.18,0.35,12.1 +6.6,0.23,0.2,11.4,0.044,45.0,131.0,0.99604,2.96,0.51,9.7 +6.6,0.23,0.2,11.4,0.044,45.0,131.0,0.99604,2.96,0.51,9.7 +6.7,0.36,0.26,7.9,0.034,39.0,123.0,0.99119,2.99,0.3,12.2 +6.1,0.38,0.42,5.0,0.016,31.0,113.0,0.99007,3.15,0.31,12.4 +8.5,0.23,0.28,11.1,0.033,30.0,97.0,0.99507,3.03,0.39,10.5 +7.0,0.2,0.31,8.0,0.05,29.0,213.0,0.99596,3.28,0.57,10.4 +6.0,0.26,0.32,3.8,0.029,48.0,180.0,0.99011,3.15,0.34,12.0 +6.9,0.3,0.3,10.55,0.037,4.0,28.0,0.99184,3.07,0.32,12.7 +6.7,0.18,0.28,10.2,0.039,29.0,115.0,0.99469,3.11,0.45,10.9 +6.7,0.18,0.28,10.2,0.039,29.0,115.0,0.99469,3.11,0.45,10.9 +6.8,0.18,0.28,9.8,0.039,29.0,113.0,0.99406,3.11,0.45,10.9 +7.2,0.19,0.31,6.3,0.034,17.0,103.0,0.99305,3.15,0.52,11.4 +6.2,0.16,0.32,1.1,0.036,74.0,184.0,0.99096,3.22,0.41,11.0 +5.0,0.27,0.32,4.5,0.032,58.0,178.0,0.98956,3.45,0.31,12.6 +6.3,0.37,0.28,6.3,0.034,45.0,152.0,0.9921,3.29,0.46,11.6 +6.6,0.2,0.27,10.9,0.038,29.0,130.0,0.99496,3.11,0.44,10.5 +6.8,0.18,0.28,9.8,0.039,29.0,113.0,0.99406,3.11,0.45,10.9 +6.8,0.18,0.28,9.8,0.039,29.0,113.0,0.99406,3.11,0.45,10.9 +6.6,0.28,0.34,0.8,0.037,42.0,119.0,0.9888,3.03,0.37,12.5 +6.5,0.35,0.36,0.8,0.034,32.0,111.0,0.98942,3.11,0.5,12.1 +6.9,0.25,0.33,1.2,0.035,35.0,158.0,0.99082,3.02,0.58,11.3 +6.0,0.32,0.3,1.3,0.025,18.0,112.0,0.98802,3.07,0.64,13.3 +6.8,0.18,0.28,9.8,0.039,29.0,113.0,0.99406,3.11,0.45,10.9 +6.7,0.18,0.28,10.2,0.039,29.0,115.0,0.99469,3.11,0.45,10.9 +6.6,0.2,0.27,10.9,0.038,29.0,130.0,0.99496,3.11,0.44,10.5 +6.3,0.37,0.28,6.3,0.034,45.0,152.0,0.9921,3.29,0.46,11.6 +7.2,0.19,0.31,6.3,0.034,17.0,103.0,0.99305,3.15,0.52,11.4 +6.3,0.18,0.36,1.2,0.034,26.0,111.0,0.99074,3.16,0.51,11.0 +6.9,0.3,0.36,0.9,0.037,40.0,156.0,0.98968,3.08,0.36,12.1 +6.2,0.16,0.32,1.1,0.036,74.0,184.0,0.99096,3.22,0.41,11.0 +5.0,0.27,0.32,4.5,0.032,58.0,178.0,0.98956,3.45,0.31,12.6 +5.0,0.3,0.33,3.7,0.03,54.0,173.0,0.9887,3.36,0.3,13.0 +6.5,0.2,0.5,18.1,0.054,50.0,221.0,0.99941,2.94,0.64,8.8 +6.7,0.25,0.31,1.35,0.061,30.5,218.0,0.99388,3.16,0.53,9.5 +6.6,0.22,0.36,5.5,0.029,30.0,105.0,0.99206,3.2,0.47,11.8 +6.8,0.25,0.37,3.1,0.026,29.0,93.0,0.99035,3.14,0.45,12.2 +7.0,0.13,0.37,12.85,0.042,36.0,105.0,0.99581,3.05,0.55,10.7 +7.0,0.45,0.34,19.8,0.04,12.0,67.0,0.9976,3.07,0.38,11.0 +7.2,0.32,0.3,8.25,0.02,14.0,104.0,0.99362,2.99,0.44,11.4 +7.0,0.13,0.37,12.85,0.042,36.0,105.0,0.99581,3.05,0.55,10.7 +5.9,0.34,0.3,3.8,0.035,57.0,135.0,0.99016,3.09,0.34,12.0 +6.8,0.22,0.31,6.9,0.037,33.0,121.0,0.99176,3.02,0.39,11.9 +7.2,0.32,0.3,8.25,0.02,14.0,104.0,0.99362,2.99,0.44,11.4 +8.4,0.32,0.35,11.7,0.029,3.0,46.0,0.99439,3.02,0.34,11.8 +6.8,0.27,0.29,4.6,0.046,6.0,88.0,0.99458,3.34,0.48,10.6 +8.0,0.74,0.21,4.0,0.05,24.0,133.0,0.99418,3.06,0.38,9.7 +7.0,0.45,0.34,19.8,0.04,12.0,67.0,0.9976,3.07,0.38,11.0 +7.0,0.13,0.37,12.85,0.042,36.0,105.0,0.99581,3.05,0.55,10.7 +5.4,0.22,0.29,1.2,0.045,69.0,152.0,0.99178,3.76,0.63,11.0 +8.4,0.22,0.3,8.9,0.024,17.0,118.0,0.99456,2.99,0.34,10.5 +7.4,0.32,0.22,11.7,0.035,44.0,150.0,0.99578,3.1,0.45,10.4 +7.5,0.18,0.37,6.2,0.05,21.0,138.0,0.99546,3.2,0.55,10.5 +7.1,0.47,0.29,14.8,0.024,22.0,142.0,0.99518,3.12,0.48,12.0 +7.1,0.47,0.29,14.8,0.024,22.0,142.0,0.99518,3.12,0.48,12.0 +5.8,0.19,0.25,10.8,0.042,33.0,124.0,0.99646,3.22,0.41,9.2 +6.7,0.14,0.46,1.6,0.036,15.0,92.0,0.99264,3.37,0.49,10.9 +6.8,0.24,0.38,8.3,0.045,50.0,185.0,0.99578,3.15,0.5,9.5 +6.9,0.25,0.47,8.4,0.042,36.0,156.0,0.99604,3.15,0.55,9.4 +6.0,0.24,0.33,2.5,0.026,31.0,85.0,0.99014,3.13,0.5,11.3 +6.8,0.29,0.34,3.5,0.054,26.0,189.0,0.99489,3.42,0.58,10.4 +6.3,0.33,0.42,17.2,0.037,57.0,170.0,0.99884,3.26,0.57,9.4 +6.5,0.23,0.45,2.1,0.027,43.0,104.0,0.99054,3.02,0.52,11.3 +6.3,0.27,0.29,12.2,0.044,59.0,196.0,0.99782,3.14,0.4,8.8 +6.3,0.2,0.37,11.8,0.045,58.0,130.0,0.99519,3.2,0.35,10.8 +6.2,0.33,0.41,16.8,0.037,58.0,173.0,0.99882,3.25,0.57,9.4 +6.3,0.33,0.42,17.2,0.037,57.0,170.0,0.99884,3.26,0.57,9.4 +7.2,0.21,1.0,1.1,0.154,46.0,114.0,0.9931,2.95,0.43,9.2 +6.0,0.27,0.3,14.7,0.044,15.0,144.0,0.99666,3.12,0.53,10.3 +5.7,0.12,0.26,5.5,0.034,21.0,99.0,0.99324,3.09,0.57,9.9 +6.9,0.24,0.37,6.1,0.027,38.0,112.0,0.99086,3.19,0.34,12.4 +7.7,0.18,0.53,1.2,0.041,42.0,167.0,0.9908,3.11,0.44,11.9 +7.1,0.17,0.43,1.3,0.023,33.0,132.0,0.99067,3.11,0.56,11.7 +7.5,0.33,0.38,8.7,0.126,49.0,199.0,0.99711,2.98,0.57,9.4 +6.2,0.255,0.24,1.7,0.039,138.5,272.0,0.99452,3.53,0.53,9.6 +7.5,0.33,0.38,8.7,0.126,49.0,199.0,0.99711,2.98,0.57,9.4 +5.6,0.2,0.66,10.2,0.043,78.0,175.0,0.9945,2.98,0.43,10.4 +7.6,0.17,0.36,4.5,0.042,26.0,102.0,0.99427,3.09,0.47,9.5 +5.8,0.15,0.31,5.9,0.036,7.0,73.0,0.99152,3.2,0.43,11.9 +6.3,0.25,0.44,1.7,0.024,36.0,116.0,0.98935,3.18,0.4,12.5 +6.9,0.28,0.41,1.4,0.016,6.0,55.0,0.98876,3.16,0.4,13.4 +7.2,0.27,0.37,5.4,0.026,27.0,114.0,0.99174,3.13,0.84,12.7 +6.2,0.25,0.38,7.9,0.045,54.0,208.0,0.99572,3.17,0.46,9.1 +8.5,0.19,0.48,1.1,0.026,23.0,58.0,0.99184,2.9,0.5,10.5 +6.2,0.25,0.54,7.0,0.046,58.0,176.0,0.99454,3.19,0.7,10.4 +6.2,0.25,0.54,7.0,0.046,58.0,176.0,0.99454,3.19,0.7,10.4 +6.8,0.28,0.43,7.6,0.03,30.0,110.0,0.99164,3.08,0.59,12.5 +6.2,0.25,0.54,7.0,0.046,58.0,176.0,0.99454,3.19,0.7,10.4 +7.4,0.21,0.8,12.3,0.038,77.0,183.0,0.99778,2.95,0.48,9.0 +7.0,0.15,0.38,15.3,0.045,54.0,120.0,0.9975,3.18,0.42,9.8 +7.4,0.21,0.8,12.3,0.038,77.0,183.0,0.99778,2.95,0.48,9.0 +7.3,0.28,0.42,1.2,0.033,29.0,142.0,0.99205,3.17,0.43,10.7 +6.1,0.18,0.38,2.3,0.033,28.0,111.0,0.98962,3.16,0.49,12.4 +7.0,0.53,0.43,6.1,0.029,6.0,76.0,0.99118,3.08,0.5,12.5 +6.8,0.28,0.43,7.6,0.03,30.0,110.0,0.99164,3.08,0.59,12.5 +6.5,0.36,0.38,10.2,0.028,20.0,82.0,0.99274,3.1,0.43,12.1 +7.5,0.25,0.47,4.1,0.041,95.0,163.0,0.99184,2.92,0.59,11.3 +6.7,0.24,0.41,2.9,0.039,48.0,122.0,0.99052,3.25,0.43,12.0 +6.6,0.25,0.33,8.5,0.042,29.0,141.0,0.99546,3.28,0.6,10.4 +6.4,0.15,0.4,1.5,0.042,23.0,87.0,0.98972,3.11,0.46,12.2 +6.3,0.28,0.3,3.1,0.039,24.0,115.0,0.9942,3.05,0.43,8.6 +6.2,0.25,0.38,7.9,0.045,54.0,208.0,0.99572,3.17,0.46,9.1 +7.1,0.28,0.35,3.5,0.028,35.0,91.0,0.99022,2.96,0.33,12.1 +6.6,0.35,0.34,4.9,0.032,9.0,125.0,0.99253,3.32,0.81,12.0 +8.5,0.19,0.48,1.1,0.026,23.0,58.0,0.99184,2.9,0.5,10.5 +6.2,0.25,0.54,7.0,0.046,58.0,176.0,0.99454,3.19,0.7,10.4 +6.0,0.35,0.51,1.2,0.029,10.0,102.0,0.9903,3.46,0.42,11.9 +5.8,0.31,0.32,4.5,0.024,28.0,94.0,0.98906,3.25,0.52,13.7 +6.6,0.17,0.35,2.6,0.03,33.0,78.0,0.99146,3.22,0.72,11.3 +8.5,0.23,0.4,9.9,0.036,24.0,88.0,0.9951,3.02,0.42,10.5 +5.8,0.31,0.32,4.5,0.024,28.0,94.0,0.98906,3.25,0.52,13.7 +6.1,0.2,0.34,9.5,0.041,38.0,201.0,0.995,3.14,0.44,10.1 +6.3,0.37,0.37,1.5,0.024,12.0,76.0,0.98876,2.94,0.39,12.3 +6.2,0.36,0.38,3.2,0.031,20.0,89.0,0.98956,3.06,0.33,12.0 +6.6,0.17,0.35,2.6,0.03,33.0,78.0,0.99146,3.22,0.72,11.3 +6.3,0.28,0.47,11.2,0.04,61.0,183.0,0.99592,3.12,0.51,9.5 +7.6,0.27,0.52,3.2,0.043,28.0,152.0,0.99129,3.02,0.53,11.4 +7.0,0.25,0.45,2.3,0.045,40.0,118.0,0.99064,3.16,0.48,11.9 +9.7,0.24,0.49,4.9,0.032,3.0,18.0,0.99368,2.85,0.54,10.0 +9.7,0.24,0.49,4.9,0.032,3.0,18.0,0.99368,2.85,0.54,10.0 +6.8,0.13,0.39,1.4,0.034,19.0,102.0,0.99121,3.23,0.6,11.3 +6.6,0.78,0.5,1.5,0.045,30.0,133.0,0.99104,3.25,0.48,11.7 +5.1,0.33,0.27,6.7,0.022,44.0,129.0,0.99221,3.36,0.39,11.0 +6.7,0.34,0.4,2.1,0.033,34.0,111.0,0.98924,2.97,0.48,12.2 +6.7,0.14,0.51,4.3,0.028,57.0,124.0,0.99176,2.91,0.54,10.7 +7.0,0.26,0.34,10.9,0.038,25.0,84.0,0.99432,3.11,0.34,10.9 +6.5,0.29,0.26,7.0,0.04,18.0,113.0,0.99366,3.17,0.38,10.2 +7.0,0.25,0.45,2.3,0.045,40.0,118.0,0.99064,3.16,0.48,11.9 +7.6,0.21,0.49,2.5,0.047,20.0,130.0,0.99178,3.15,0.48,11.1 +7.7,0.26,0.51,2.6,0.045,26.0,159.0,0.99126,3.0,0.5,11.2 +7.6,0.27,0.52,3.2,0.043,28.0,152.0,0.99129,3.02,0.53,11.4 +7.7,0.25,0.49,2.5,0.047,31.0,169.0,0.99252,3.07,0.57,10.6 +7.6,0.35,0.46,14.7,0.047,33.0,151.0,0.99709,3.03,0.53,10.3 +6.9,0.3,0.36,4.5,0.054,31.0,203.0,0.99513,3.4,0.57,10.4 +6.7,0.24,0.46,2.2,0.033,19.0,111.0,0.99045,3.1,0.62,11.9 +6.5,0.23,0.39,1.9,0.036,41.0,98.0,0.99,3.19,0.43,11.9 +7.6,0.23,0.34,1.6,0.043,24.0,129.0,0.99305,3.12,0.7,10.4 +6.5,0.24,0.39,17.3,0.052,22.0,126.0,0.99888,3.11,0.47,9.2 +6.3,0.17,0.32,4.2,0.04,37.0,117.0,0.99182,3.24,0.43,11.3 +6.3,0.17,0.32,4.2,0.04,37.0,117.0,0.99182,3.24,0.43,11.3 +6.7,0.21,0.37,2.5,0.034,35.0,89.0,0.9913,3.25,0.5,11.0 +6.5,0.23,0.39,1.9,0.036,41.0,98.0,0.99,3.19,0.43,11.9 +5.9,0.28,0.39,1.4,0.031,47.0,147.0,0.98836,3.08,0.64,12.9 +5.9,0.19,0.37,0.8,0.027,3.0,21.0,0.9897,3.09,0.31,10.8 +6.2,0.25,0.42,8.0,0.049,53.0,206.0,0.99586,3.16,0.47,9.1 +7.6,0.23,0.34,1.6,0.043,24.0,129.0,0.99305,3.12,0.7,10.4 +5.6,0.18,0.27,1.7,0.03,31.0,103.0,0.98892,3.35,0.37,12.9 +5.5,0.18,0.22,5.5,0.037,10.0,86.0,0.99156,3.46,0.44,12.2 +6.5,0.24,0.39,17.3,0.052,22.0,126.0,0.99888,3.11,0.47,9.2 +7.4,0.23,0.38,8.6,0.052,41.0,150.0,0.99534,3.06,0.46,10.3 +7.2,0.17,0.37,6.9,0.059,47.0,128.0,0.99322,3.08,0.46,11.0 +7.6,0.3,0.38,2.1,0.043,10.0,98.0,0.99296,3.17,0.65,11.0 +5.0,0.24,0.21,2.2,0.039,31.0,100.0,0.99098,3.69,0.62,11.7 +6.1,0.21,0.38,1.5,0.039,37.0,122.0,0.98972,3.2,0.43,12.0 +6.5,0.33,0.38,2.5,0.047,30.0,148.0,0.98964,3.17,0.43,12.7 +6.3,0.35,0.26,17.6,0.061,59.0,198.0,0.99918,3.11,0.49,8.8 +6.3,0.17,0.32,4.2,0.04,37.0,117.0,0.99182,3.24,0.43,11.3 +6.6,0.25,0.35,2.9,0.034,38.0,121.0,0.99008,3.19,0.4,12.8 +6.5,0.16,0.33,4.8,0.043,45.0,114.0,0.992,3.18,0.44,11.2 +6.6,0.39,0.39,11.9,0.057,51.0,221.0,0.99851,3.26,0.51,8.9 +5.6,0.19,0.27,0.9,0.04,52.0,103.0,0.99026,3.5,0.39,11.2 +6.2,0.25,0.39,1.3,0.051,42.0,135.0,0.9906,3.23,0.4,11.1 +6.9,0.22,0.43,6.4,0.042,34.0,115.0,0.99293,3.05,0.51,10.8 +6.2,0.19,0.29,4.3,0.045,33.0,126.0,0.99658,3.18,0.42,9.3 +6.6,0.39,0.39,11.9,0.057,51.0,221.0,0.99851,3.26,0.51,8.9 +5.9,0.33,0.32,8.1,0.038,9.0,34.0,0.9911,3.22,0.36,12.7 +7.8,0.17,0.5,1.3,0.045,35.0,140.0,0.9904,3.16,0.4,12.0 +5.5,0.19,0.27,0.9,0.04,52.0,103.0,0.99026,3.5,0.39,11.2 +6.2,0.23,0.36,17.2,0.039,37.0,130.0,0.99946,3.23,0.43,8.8 +6.2,0.23,0.36,17.2,0.039,37.0,130.0,0.99946,3.23,0.43,8.8 +6.2,0.23,0.36,17.2,0.039,37.0,130.0,0.99946,3.23,0.43,8.8 +7.2,0.32,0.4,8.7,0.038,45.0,154.0,0.99568,3.2,0.47,10.4 +6.2,0.23,0.36,17.2,0.039,37.0,130.0,0.99946,3.23,0.43,8.8 +7.2,0.32,0.4,8.7,0.038,45.0,154.0,0.99568,3.2,0.47,10.4 +5.8,0.39,0.47,7.5,0.027,12.0,88.0,0.9907,3.38,0.45,14.0 +6.2,0.23,0.36,17.2,0.039,37.0,130.0,0.99946,3.23,0.43,8.8 +7.6,0.25,1.23,4.6,0.035,51.0,294.0,0.99018,3.03,0.43,13.1 +5.8,0.29,0.33,3.7,0.029,30.0,88.0,0.98994,3.25,0.42,12.3 +7.2,0.4,0.38,2.2,0.03,40.0,109.0,0.99075,3.27,0.46,12.6 +6.8,0.39,0.34,7.4,0.02,38.0,133.0,0.99212,3.18,0.44,12.0 +6.1,0.17,0.42,15.1,0.033,28.0,124.0,0.99684,2.87,0.47,9.5 +6.8,0.39,0.34,7.4,0.02,38.0,133.0,0.99212,3.18,0.44,12.0 +7.1,0.36,0.37,4.8,0.019,39.0,114.0,0.99036,3.08,0.49,12.7 +6.9,0.19,0.32,7.9,0.042,30.0,130.0,0.99456,3.4,0.39,10.5 +6.5,0.34,0.46,1.0,0.023,6.0,80.0,0.98865,3.15,0.54,12.9 +6.1,0.17,0.42,15.1,0.033,28.0,124.0,0.99684,2.87,0.47,9.5 +6.8,0.39,0.34,7.4,0.02,38.0,133.0,0.99212,3.18,0.44,12.0 +7.1,0.36,0.37,4.8,0.019,39.0,114.0,0.99036,3.08,0.49,12.7 +7.8,0.3,0.36,4.6,0.024,20.0,198.0,0.99222,3.06,0.66,11.9 +6.1,0.68,0.52,1.4,0.037,32.0,123.0,0.99022,3.24,0.45,12.0 +5.2,0.34,0.37,6.2,0.031,42.0,133.0,0.99076,3.25,0.41,12.5 +5.6,0.28,0.4,6.1,0.034,36.0,118.0,0.99144,3.21,0.43,12.1 +6.2,0.19,0.38,5.1,0.019,22.0,82.0,0.98961,3.05,0.36,12.5 +5.7,0.16,0.26,6.3,0.043,28.0,113.0,0.9936,3.06,0.58,9.9 +7.6,0.17,0.46,0.9,0.036,63.0,147.0,0.99126,3.02,0.41,10.7 +7.3,0.2,0.39,2.3,0.048,24.0,87.0,0.99044,2.94,0.35,12.0 +6.7,0.33,0.36,6.6,0.042,34.0,116.0,0.99123,2.97,0.31,12.2 +6.7,0.33,0.34,7.5,0.036,39.0,124.0,0.99123,2.99,0.32,12.4 +6.9,0.36,0.35,8.6,0.038,37.0,125.0,0.9916,3.0,0.32,12.4 +7.8,0.21,0.34,11.9,0.039,55.0,140.0,0.9959,3.02,0.31,10.3 +7.3,0.2,0.39,2.3,0.048,24.0,87.0,0.99044,2.94,0.35,12.0 +5.6,0.41,0.22,7.1,0.05,44.0,154.0,0.9931,3.3,0.4,10.5 +7.6,0.15,0.35,4.3,0.051,23.0,98.0,0.99422,3.1,0.44,9.5 +8.5,0.2,0.4,1.1,0.046,31.0,106.0,0.99194,3.0,0.35,10.5 +6.5,0.24,0.38,1.0,0.027,31.0,90.0,0.98926,3.24,0.36,12.3 +8.3,0.16,0.37,7.9,0.025,38.0,107.0,0.99306,2.93,0.37,11.9 +5.5,0.12,0.33,1.0,0.038,23.0,131.0,0.99164,3.25,0.45,9.8 +6.5,0.24,0.38,1.0,0.027,31.0,90.0,0.98926,3.24,0.36,12.3 +6.2,0.1,0.41,1.0,0.04,17.0,76.0,0.98988,3.14,0.56,11.4 +6.5,0.21,0.4,7.3,0.041,49.0,115.0,0.99268,3.21,0.43,11.0 +8.7,0.3,0.59,1.7,0.046,10.0,70.0,0.99373,3.06,0.56,10.8 +6.7,0.18,0.37,1.3,0.027,42.0,125.0,0.98939,3.24,0.37,12.8 +7.0,0.17,0.36,6.4,0.055,42.0,123.0,0.99318,3.11,0.5,11.0 +6.6,0.19,0.33,1.8,0.035,42.0,148.0,0.99196,3.15,0.36,10.2 +5.8,0.28,0.3,1.5,0.026,31.0,114.0,0.98952,3.32,0.6,12.5 +7.6,0.24,0.44,3.8,0.037,49.0,146.0,0.9911,3.06,0.37,11.6 +8.3,0.16,0.37,7.9,0.025,38.0,107.0,0.99306,2.93,0.37,11.9 +5.5,0.12,0.33,1.0,0.038,23.0,131.0,0.99164,3.25,0.45,9.8 +5.7,0.16,0.32,1.2,0.036,7.0,89.0,0.99111,3.26,0.48,11.0 +7.0,0.21,0.42,5.3,0.037,36.0,123.0,0.99321,3.14,0.52,10.9 +6.4,0.22,0.38,9.1,0.044,35.0,127.0,0.99326,2.97,0.3,11.0 +7.9,0.34,0.44,6.5,0.027,47.0,126.0,0.99124,2.96,0.37,12.5 +6.4,0.22,0.38,9.1,0.044,35.0,127.0,0.99326,2.97,0.3,11.0 +6.8,0.21,0.4,6.3,0.032,40.0,121.0,0.99214,3.18,0.53,12.0 +5.2,0.31,0.36,5.1,0.031,46.0,145.0,0.9897,3.14,0.31,12.4 +7.9,0.34,0.44,6.5,0.027,47.0,126.0,0.99124,2.96,0.37,12.5 +5.6,0.42,0.34,2.4,0.022,34.0,97.0,0.98915,3.22,0.38,12.8 +6.4,0.22,0.38,9.1,0.044,35.0,127.0,0.99326,2.97,0.3,11.0 +6.8,0.28,0.34,7.5,0.035,34.0,177.0,0.99692,3.33,0.43,9.1 +6.8,0.45,0.36,5.0,0.033,28.0,156.0,0.991,3.11,0.4,12.4 +6.6,0.29,0.39,6.75,0.031,22.0,98.0,0.9913,3.15,0.8,12.9 +6.8,0.21,0.42,1.2,0.045,24.0,126.0,0.99234,3.09,0.87,10.9 +6.8,0.25,0.24,1.6,0.045,39.0,164.0,0.99402,3.53,0.58,10.8 +6.4,0.21,0.34,16.05,0.04,56.0,142.0,0.99678,3.11,0.38,10.6 +5.8,0.33,0.23,5.0,0.053,29.0,106.0,0.99458,3.13,0.52,9.0 +8.2,0.3,0.44,12.4,0.043,52.0,154.0,0.99452,3.04,0.33,12.0 +6.4,0.24,0.32,0.95,0.041,23.0,131.0,0.99033,3.25,0.35,11.8 +7.5,0.18,0.45,4.6,0.041,67.0,158.0,0.9927,3.01,0.38,10.6 +5.2,0.335,0.2,1.7,0.033,17.0,74.0,0.99002,3.34,0.48,12.3 +7.1,0.14,0.33,1.0,0.104,20.0,54.0,0.99057,3.19,0.64,11.5 +7.2,0.13,0.46,1.3,0.044,48.0,111.0,0.99127,2.97,0.45,11.1 +5.8,0.33,0.23,5.0,0.053,29.0,106.0,0.99458,3.13,0.52,9.0 +6.5,0.29,0.25,2.5,0.142,8.0,111.0,0.9927,3.0,0.44,9.9 +6.2,0.35,0.31,2.6,0.036,37.0,92.0,0.98938,3.27,0.53,12.8 +9.0,0.38,0.53,2.1,0.102,19.0,76.0,0.99001,2.93,0.57,12.9 +6.6,0.24,0.38,12.75,0.034,8.0,74.0,0.99386,3.1,0.57,12.9 +6.6,0.16,0.34,1.1,0.037,41.0,115.0,0.9899,3.01,0.68,12.0 +8.2,0.3,0.44,12.4,0.043,52.0,154.0,0.99452,3.04,0.33,12.0 +5.7,0.15,0.28,3.7,0.045,57.0,151.0,0.9913,3.22,0.27,11.2 +6.6,0.33,0.4,2.65,0.041,35.0,86.0,0.98916,3.11,0.39,13.3 +5.7,0.2,0.3,2.5,0.046,38.0,125.0,0.99276,3.34,0.5,9.9 +6.8,0.27,0.37,8.2,0.055,52.0,192.0,0.99586,3.11,0.52,9.5 +6.8,0.27,0.42,7.3,0.054,58.0,200.0,0.99556,3.12,0.49,9.4 +6.2,0.2,0.26,1.1,0.047,42.0,119.0,0.99158,3.48,0.6,11.0 +6.7,0.13,0.57,6.6,0.056,60.0,150.0,0.99548,2.96,0.43,9.4 +6.8,0.21,0.37,7.0,0.038,27.0,107.0,0.99206,2.98,0.82,11.5 +6.7,0.31,0.32,14.5,0.038,6.0,79.0,0.99412,3.14,0.34,12.5 +6.2,0.2,0.29,11.8,0.035,21.0,93.0,0.99364,3.18,0.34,11.9 +6.6,0.25,0.34,3.0,0.054,22.0,141.0,0.99338,3.26,0.47,10.4 +5.7,0.15,0.28,3.7,0.045,57.0,151.0,0.9913,3.22,0.27,11.2 +6.9,0.22,0.39,6.0,0.035,44.0,141.0,0.99123,3.11,0.33,12.5 +6.4,0.23,0.35,4.6,0.039,43.0,147.0,0.99216,3.18,0.4,11.0 +7.6,0.27,0.29,2.5,0.059,37.0,115.0,0.99328,3.09,0.37,9.8 +6.6,0.34,0.24,3.3,0.034,29.0,99.0,0.99031,3.1,0.4,12.3 +6.4,0.16,0.42,1.0,0.036,29.0,113.0,0.9908,3.18,0.52,11.0 +5.8,0.3,0.42,1.1,0.036,19.0,113.0,0.98871,3.1,0.46,12.6 +7.0,0.29,0.35,1.4,0.036,42.0,109.0,0.99119,3.31,0.62,11.6 +6.6,0.34,0.24,3.3,0.034,29.0,99.0,0.99031,3.1,0.4,12.3 +6.7,0.21,0.36,8.55,0.02,20.0,86.0,0.99146,3.19,0.22,13.4 +7.6,0.27,0.29,2.5,0.059,37.0,115.0,0.99328,3.09,0.37,9.8 +6.8,0.22,0.41,6.7,0.034,39.0,116.0,0.99245,3.18,0.46,11.5 +7.7,0.27,0.49,3.8,0.037,46.0,139.0,0.99116,3.04,0.38,11.6 +6.4,0.25,0.37,4.5,0.039,41.0,147.0,0.9921,3.18,0.4,11.1 +6.4,0.23,0.35,4.6,0.039,43.0,147.0,0.99216,3.18,0.4,11.0 +6.7,0.13,0.45,4.2,0.043,52.0,131.0,0.99162,3.06,0.54,11.3 +6.7,0.24,0.37,11.3,0.043,64.0,173.0,0.99632,3.08,0.53,9.9 +7.1,0.26,0.37,5.5,0.025,31.0,105.0,0.99082,3.06,0.33,12.6 +5.3,0.3,0.16,4.2,0.029,37.0,100.0,0.9905,3.3,0.36,11.8 +7.1,0.38,0.4,2.2,0.042,54.0,201.0,0.99177,3.03,0.5,11.4 +7.4,0.19,0.31,14.5,0.045,39.0,193.0,0.9986,3.1,0.5,9.2 +7.4,0.19,0.31,14.5,0.045,39.0,193.0,0.9986,3.1,0.5,9.2 +7.4,0.19,0.31,14.5,0.045,39.0,193.0,0.9986,3.1,0.5,9.2 +7.4,0.19,0.31,14.5,0.045,39.0,193.0,0.9986,3.1,0.5,9.2 +7.4,0.19,0.31,14.5,0.045,39.0,193.0,0.9986,3.1,0.5,9.2 +7.4,0.19,0.31,14.5,0.045,39.0,193.0,0.9986,3.1,0.5,9.2 +6.3,0.32,0.32,1.5,0.03,24.0,101.0,0.98923,3.21,0.42,13.0 +7.6,0.19,0.32,18.75,0.047,32.0,193.0,1.00014,3.1,0.5,9.3 +6.5,0.26,0.31,3.6,0.03,36.0,92.0,0.99026,3.22,0.62,12.6 +5.9,0.24,0.12,1.4,0.035,60.0,247.0,0.99358,3.34,0.44,9.6 +4.2,0.215,0.23,5.1,0.041,64.0,157.0,0.99688,3.42,0.44,8.0 +8.1,0.24,0.32,10.5,0.03,34.0,105.0,0.99407,3.11,0.42,11.8 +5.8,0.23,0.2,2.0,0.043,39.0,154.0,0.99226,3.21,0.39,10.2 +7.5,0.33,0.36,2.6,0.051,26.0,126.0,0.99097,3.32,0.53,12.7 +6.6,0.38,0.36,9.2,0.061,42.0,214.0,0.9976,3.31,0.56,9.4 +6.4,0.15,0.29,1.8,0.044,21.0,115.0,0.99166,3.1,0.38,10.2 +6.5,0.32,0.34,5.7,0.044,27.0,91.0,0.99184,3.28,0.6,12.0 +7.5,0.22,0.32,2.4,0.045,29.0,100.0,0.99135,3.08,0.6,11.3 +6.4,0.23,0.32,1.9,0.038,40.0,118.0,0.99074,3.32,0.53,11.8 +6.1,0.22,0.31,1.4,0.039,40.0,129.0,0.99193,3.45,0.59,10.9 +6.5,0.48,0.02,0.9,0.043,32.0,99.0,0.99226,3.14,0.47,9.8 +6.6,0.23,0.3,4.6,0.06,29.0,154.0,0.99142,3.23,0.49,12.2 +6.4,0.16,0.25,1.4,0.057,21.0,125.0,0.99091,3.23,0.44,11.1 +6.6,0.38,0.36,9.2,0.061,42.0,214.0,0.9976,3.31,0.56,9.4 +7.4,0.16,0.32,1.4,0.065,23.0,140.0,0.99134,3.06,0.47,11.4 +6.4,0.15,0.29,1.8,0.044,21.0,115.0,0.99166,3.1,0.38,10.2 +6.5,0.32,0.3,2.3,0.051,20.0,127.0,0.98964,3.13,0.52,12.8 +6.7,0.12,0.36,2.3,0.039,43.0,125.0,0.99229,3.07,0.67,10.1 +6.6,0.2,0.14,4.4,0.184,35.0,168.0,0.99396,2.93,0.45,9.4 +8.0,0.34,0.25,6.4,0.035,38.0,103.0,0.99148,2.91,0.23,12.2 +6.8,0.21,0.31,2.9,0.046,40.0,121.0,0.9913,3.07,0.65,10.9 +6.8,0.23,0.31,2.8,0.047,40.0,122.0,0.99126,3.06,0.64,10.9 +6.8,0.21,0.31,2.9,0.046,40.0,121.0,0.9913,3.07,0.65,10.9 +6.6,0.2,0.14,4.4,0.184,35.0,168.0,0.99396,2.93,0.45,9.4 +6.6,0.28,0.42,8.2,0.044,60.0,196.0,0.99562,3.14,0.48,9.4 +7.8,0.25,0.37,1.0,0.043,10.0,80.0,0.99128,3.08,0.38,11.4 +5.6,0.12,0.33,2.9,0.044,21.0,73.0,0.98896,3.17,0.32,12.9 +6.6,0.28,0.41,7.0,0.046,59.0,194.0,0.99558,3.14,0.48,9.4 +6.8,0.17,0.35,1.8,0.04,29.0,84.0,0.98961,2.91,0.57,12.0 +7.3,0.25,0.28,1.5,0.043,19.0,113.0,0.99338,3.38,0.56,10.1 +6.6,0.28,0.41,7.0,0.046,59.0,194.0,0.99558,3.14,0.48,9.4 +6.6,0.28,0.42,8.2,0.044,60.0,196.0,0.99562,3.14,0.48,9.4 +6.5,0.25,0.5,7.6,0.047,54.0,184.0,0.99572,3.17,0.45,9.2 +6.3,0.24,0.35,2.3,0.039,43.0,109.0,0.99056,3.34,0.44,11.8 +6.8,0.32,0.32,8.7,0.029,31.0,105.0,0.99146,3.0,0.34,12.3 +7.8,0.25,0.37,1.0,0.043,10.0,80.0,0.99128,3.08,0.38,11.4 +5.6,0.12,0.33,2.9,0.044,21.0,73.0,0.98896,3.17,0.32,12.9 +6.6,0.24,0.28,6.7,0.032,26.0,91.0,0.99172,3.13,0.32,12.3 +6.3,0.22,0.34,5.0,0.032,36.0,93.0,0.99012,3.27,0.36,13.5 +6.0,0.32,0.3,1.9,0.033,41.0,142.0,0.98912,3.29,0.42,12.8 +6.3,0.19,0.29,2.0,0.022,33.0,96.0,0.98902,3.04,0.54,12.8 +6.0,0.32,0.3,1.9,0.033,41.0,142.0,0.98912,3.29,0.42,12.8 +9.4,0.24,0.29,8.5,0.037,124.0,208.0,0.99395,2.9,0.38,11.0 +6.4,0.35,0.28,12.6,0.039,19.0,124.0,0.99539,3.2,0.43,10.6 +6.7,0.46,0.27,5.2,0.039,35.0,96.0,0.99129,3.16,0.44,12.4 +6.3,0.3,0.29,2.1,0.048,33.0,142.0,0.98956,3.22,0.46,12.9 +6.0,0.19,0.29,1.1,0.047,67.0,152.0,0.9916,3.54,0.59,11.1 +5.9,0.24,0.28,1.3,0.032,36.0,95.0,0.98889,3.08,0.64,12.9 +7.3,0.145,0.33,1.1,0.042,14.0,64.0,0.99012,3.1,0.37,11.8 +6.6,0.435,0.38,9.2,0.058,66.0,243.0,0.99833,3.23,0.54,9.1 +5.8,0.18,0.37,1.2,0.036,19.0,74.0,0.98853,3.09,0.49,12.7 +5.8,0.18,0.37,1.1,0.036,31.0,96.0,0.98942,3.16,0.48,12.0 +5.6,0.32,0.32,8.3,0.043,32.0,105.0,0.99266,3.24,0.47,11.2 +6.6,0.16,0.35,1.8,0.042,26.0,105.0,0.98962,3.19,0.75,12.4 +5.1,0.21,0.28,1.4,0.047,48.0,148.0,0.99168,3.5,0.49,10.4 +7.5,0.29,0.36,15.7,0.05,29.0,124.0,0.9968,3.06,0.54,10.4 +6.0,0.26,0.33,4.35,0.04,15.0,80.0,0.98934,3.29,0.5,12.7 +5.7,0.26,0.3,1.8,0.039,30.0,105.0,0.98995,3.48,0.52,12.5 +7.1,0.17,0.31,1.6,0.037,15.0,103.0,0.991,3.14,0.5,12.0 +6.9,0.17,0.3,2.0,0.047,13.0,117.0,0.99152,3.16,0.51,11.6 +6.8,0.25,0.28,5.0,0.035,42.0,126.0,0.99048,3.12,0.38,12.6 +6.6,0.17,0.28,1.8,0.042,62.0,178.0,0.99204,3.15,0.42,10.2 +5.8,0.17,0.36,1.3,0.036,11.0,70.0,0.99202,3.43,0.68,10.4 +6.4,0.24,0.29,1.0,0.038,18.0,122.0,0.9906,3.3,0.42,11.5 +6.7,0.21,0.34,1.4,0.049,36.0,112.0,0.99091,3.02,0.5,11.0 +6.7,0.23,0.33,8.1,0.048,45.0,176.0,0.99472,3.11,0.52,10.1 +6.8,0.23,0.32,8.6,0.046,47.0,159.0,0.99452,3.08,0.52,10.5 +6.5,0.22,0.28,3.7,0.059,29.0,151.0,0.99177,3.23,0.41,12.1 +5.1,0.165,0.22,5.7,0.047,42.0,146.0,0.9934,3.18,0.55,9.9 +6.6,0.425,0.25,2.35,0.034,23.0,87.0,0.99082,3.05,0.41,11.4 +6.9,0.38,0.29,13.65,0.048,52.0,189.0,0.99784,3.0,0.6,9.5 +6.9,0.38,0.29,13.65,0.048,52.0,189.0,0.99784,3.0,0.6,9.5 +6.9,0.38,0.29,13.65,0.048,52.0,189.0,0.99784,3.0,0.6,9.5 +7.2,0.27,0.28,15.2,0.046,6.0,41.0,0.99665,3.17,0.39,10.9 +7.6,0.17,0.27,4.6,0.05,23.0,98.0,0.99422,3.08,0.47,9.5 +6.2,0.3,0.31,1.2,0.048,19.0,125.0,0.98999,3.32,0.54,12.6 +7.6,0.17,0.27,4.6,0.05,23.0,98.0,0.99422,3.08,0.47,9.5 +6.5,0.26,0.32,6.65,0.059,34.0,104.0,0.99254,3.18,0.42,11.1 +6.9,0.36,0.28,13.55,0.048,51.0,189.0,0.99782,3.0,0.6,9.5 +6.9,0.38,0.29,13.65,0.048,52.0,189.0,0.99784,3.0,0.6,9.5 +6.8,0.18,0.24,9.8,0.058,64.0,188.0,0.9952,3.13,0.51,10.6 +6.7,0.18,0.24,10.3,0.057,64.0,185.0,0.99519,3.12,0.5,10.6 +6.6,0.16,0.21,6.7,0.055,43.0,157.0,0.99384,3.15,0.52,10.8 +7.2,0.27,0.28,15.2,0.046,6.0,41.0,0.99665,3.17,0.39,10.9 +6.4,0.17,0.27,9.9,0.047,26.0,101.0,0.99596,3.34,0.5,9.9 +7.2,0.22,0.28,7.2,0.06,41.0,132.0,0.9935,3.08,0.59,11.3 +6.0,0.22,0.28,1.1,0.034,47.0,90.0,0.98862,3.22,0.38,12.6 +6.7,0.36,0.28,8.3,0.034,29.0,81.0,0.99151,2.96,0.39,12.5 +6.5,0.43,0.28,11.25,0.032,31.0,87.0,0.9922,3.02,0.38,12.4 +5.9,0.2,0.28,12.8,0.038,29.0,132.0,0.99426,3.31,0.57,11.8 +5.3,0.32,0.23,9.65,0.026,26.0,119.0,0.99168,3.18,0.53,12.2 +6.8,0.2,0.28,12.6,0.048,54.0,136.0,0.99556,3.19,0.37,10.7 +6.0,0.22,0.33,12.2,0.033,25.0,97.0,0.99356,3.17,0.42,11.3 +6.7,0.36,0.28,8.3,0.034,29.0,81.0,0.99151,2.96,0.39,12.5 +6.5,0.43,0.28,11.25,0.032,31.0,87.0,0.9922,3.02,0.38,12.4 +7.1,0.18,0.49,1.3,0.033,12.0,72.0,0.99072,3.05,0.53,11.3 +6.4,0.17,0.27,9.9,0.047,26.0,101.0,0.99596,3.34,0.5,9.9 +7.2,0.22,0.28,7.2,0.06,41.0,132.0,0.9935,3.08,0.59,11.3 +6.0,0.22,0.28,1.1,0.034,47.0,90.0,0.98862,3.22,0.38,12.6 +6.0,0.2,0.26,1.1,0.033,38.0,67.0,0.98954,3.14,0.38,11.5 +7.6,0.2,0.26,4.8,0.033,26.0,76.0,0.99076,2.98,0.49,12.3 +6.2,0.3,0.21,1.1,0.032,31.0,111.0,0.9889,2.97,0.42,12.2 +6.0,0.29,0.25,1.4,0.033,30.0,114.0,0.98794,3.08,0.43,13.2 +6.6,0.18,0.28,1.7,0.041,53.0,161.0,0.99207,3.13,0.45,10.2 +7.0,0.22,0.28,10.6,0.039,32.0,117.0,0.99355,3.05,0.55,11.5 +6.0,0.29,0.25,1.4,0.033,30.0,114.0,0.98794,3.08,0.43,13.2 +6.2,0.3,0.21,1.1,0.032,31.0,111.0,0.9889,2.97,0.42,12.2 +5.6,0.15,0.26,5.55,0.051,51.0,139.0,0.99336,3.47,0.5,11.0 +6.9,0.28,0.24,2.1,0.034,49.0,121.0,0.98882,2.98,0.43,13.2 +5.9,0.19,0.21,1.7,0.045,57.0,135.0,0.99341,3.32,0.44,9.5 +7.8,0.22,0.26,9.0,0.047,38.0,132.0,0.997,3.25,0.53,10.2 +6.6,0.18,0.28,1.7,0.041,53.0,161.0,0.99207,3.13,0.45,10.2 +7.0,0.4,0.25,1.8,0.05,51.0,189.0,0.99174,3.0,0.55,11.4 +6.1,0.28,0.27,4.7,0.03,56.0,140.0,0.99042,3.16,0.42,12.5 +7.6,0.36,0.49,11.3,0.046,87.0,221.0,0.9984,3.01,0.43,9.2 +6.5,0.28,0.34,3.6,0.04,29.0,121.0,0.99111,3.28,0.48,12.1 +6.9,0.19,0.35,6.9,0.045,51.0,125.0,0.9933,3.1,0.44,10.7 +6.5,0.28,0.34,3.6,0.04,29.0,121.0,0.99111,3.28,0.48,12.1 +6.4,0.22,0.32,4.9,0.046,50.0,156.0,0.99316,3.38,0.55,11.2 +6.8,0.23,0.3,6.95,0.044,42.0,179.0,0.9946,3.25,0.56,10.6 +6.4,0.32,0.31,1.9,0.037,34.0,126.0,0.99,3.06,0.45,11.8 +6.1,0.28,0.27,4.7,0.03,56.0,140.0,0.99042,3.16,0.42,12.5 +7.6,0.36,0.49,11.3,0.046,87.0,221.0,0.9984,3.01,0.43,9.2 +8.8,0.39,0.35,1.8,0.096,22.0,80.0,0.99016,2.95,0.54,12.6 +6.6,0.24,0.3,11.3,0.026,11.0,77.0,0.99381,3.13,0.55,12.8 +6.9,0.29,0.3,8.2,0.026,35.0,112.0,0.99144,3.0,0.37,12.3 +6.9,0.28,0.3,8.3,0.026,37.0,113.0,0.99139,2.99,0.38,12.3 +6.7,0.38,0.26,9.55,0.036,35.0,91.0,0.9919,2.98,0.37,12.4 +8.0,0.28,0.3,8.4,0.03,35.0,115.0,0.99192,2.93,0.42,12.3 +6.5,0.25,0.45,7.8,0.048,52.0,188.0,0.99576,3.2,0.53,9.1 +6.6,0.26,0.46,7.8,0.047,48.0,186.0,0.9958,3.2,0.54,9.1 +7.4,0.29,0.28,10.2,0.032,43.0,138.0,0.9951,3.1,0.47,10.6 +6.3,0.19,0.29,5.5,0.042,44.0,189.0,0.99304,3.19,0.47,10.3 +6.1,0.33,0.32,7.8,0.052,52.0,183.0,0.99657,3.39,0.65,9.5 +5.6,0.32,0.33,7.4,0.037,25.0,95.0,0.99268,3.25,0.49,11.1 +7.7,0.46,0.18,3.3,0.054,18.0,143.0,0.99392,3.12,0.51,10.8 +8.8,0.19,0.3,5.0,0.028,34.0,120.0,0.99242,2.94,0.47,11.2 +7.7,0.46,0.18,3.3,0.054,18.0,143.0,0.99392,3.12,0.51,10.8 +8.8,0.27,0.25,5.0,0.024,52.0,99.0,0.9925,2.87,0.49,11.4 +5.8,0.18,0.28,1.3,0.034,9.0,94.0,0.99092,3.21,0.52,11.2 +5.8,0.15,0.32,1.2,0.037,14.0,119.0,0.99137,3.19,0.5,10.2 +5.6,0.32,0.33,7.4,0.037,25.0,95.0,0.99268,3.25,0.49,11.1 +6.1,0.33,0.32,7.8,0.052,52.0,183.0,0.99657,3.39,0.65,9.5 +7.1,0.32,0.3,9.9,0.041,63.0,192.0,0.99642,3.12,0.49,10.2 +6.2,0.23,0.35,0.7,0.051,24.0,111.0,0.9916,3.37,0.43,11.0 +8.9,0.3,0.35,4.6,0.032,32.0,148.0,0.99458,3.15,0.45,11.5 +6.0,0.14,0.17,5.6,0.036,37.0,127.0,0.99373,3.05,0.57,9.8 +6.8,0.24,0.29,9.5,0.042,56.0,157.0,0.99586,3.11,0.51,10.1 +6.7,0.21,0.48,14.8,0.05,31.0,195.0,0.99942,2.95,0.75,8.8 +8.9,0.3,0.35,4.6,0.032,32.0,148.0,0.99458,3.15,0.45,11.5 +6.1,0.3,0.3,2.1,0.031,50.0,163.0,0.9895,3.39,0.43,12.7 +7.2,0.37,0.4,11.6,0.032,34.0,214.0,0.9963,3.1,0.51,9.8 +6.7,0.64,0.3,1.2,0.03,18.0,76.0,0.9892,3.16,0.6,12.9 +7.2,0.37,0.4,11.6,0.032,34.0,214.0,0.9963,3.1,0.51,9.8 +6.1,0.3,0.3,2.1,0.031,50.0,163.0,0.9895,3.39,0.43,12.7 +7.6,0.28,0.49,20.15,0.06,30.0,145.0,1.00196,3.01,0.44,8.5 +6.3,0.29,0.28,4.7,0.059,28.0,81.0,0.99036,3.24,0.56,12.7 +6.2,0.28,0.28,4.3,0.026,22.0,105.0,0.989,2.98,0.64,13.1 +7.1,0.18,0.39,14.5,0.051,48.0,156.0,0.99947,3.35,0.78,9.1 +6.4,0.32,0.27,4.9,0.034,18.0,122.0,0.9916,3.36,0.71,12.5 +7.1,0.17,0.4,14.55,0.047,47.0,156.0,0.99945,3.34,0.78,9.1 +7.1,0.17,0.4,14.55,0.047,47.0,156.0,0.99945,3.34,0.78,9.1 +5.8,0.24,0.26,10.05,0.039,63.0,162.0,0.99375,3.33,0.5,11.2 +6.4,0.32,0.27,4.9,0.034,18.0,122.0,0.9916,3.36,0.71,12.5 +7.1,0.18,0.39,14.5,0.051,48.0,156.0,0.99947,3.35,0.78,9.1 +7.1,0.17,0.4,14.55,0.047,47.0,156.0,0.99945,3.34,0.78,9.1 +7.1,0.18,0.39,15.25,0.047,45.0,158.0,0.99946,3.34,0.77,9.1 +7.8,0.29,0.29,3.15,0.044,41.0,117.0,0.99153,3.24,0.35,11.5 +6.2,0.255,0.27,1.3,0.037,30.0,86.0,0.98834,3.05,0.59,12.9 +8.2,0.34,0.29,5.2,0.076,19.0,92.0,0.99138,2.95,0.39,12.5 +6.5,0.24,0.28,1.1,0.034,26.0,83.0,0.98928,3.25,0.33,12.3 +6.9,0.24,0.23,7.1,0.041,20.0,97.0,0.99246,3.1,0.85,11.4 +6.7,0.4,0.22,8.8,0.052,24.0,113.0,0.99576,3.22,0.45,9.4 +6.7,0.3,0.44,18.5,0.057,65.0,224.0,0.99956,3.11,0.53,9.1 +6.7,0.4,0.22,8.8,0.052,24.0,113.0,0.99576,3.22,0.45,9.4 +6.8,0.17,0.32,1.4,0.04,35.0,106.0,0.99026,3.16,0.66,12.0 +7.1,0.25,0.28,1.2,0.04,31.0,111.0,0.99174,3.18,0.53,11.1 +5.9,0.27,0.27,5.0,0.035,14.0,97.0,0.99058,3.1,0.33,11.8 +6.0,0.16,0.22,1.6,0.042,36.0,106.0,0.9905,3.24,0.32,11.4 +6.7,0.3,0.44,18.75,0.057,65.0,224.0,0.99956,3.11,0.53,9.1 +6.6,0.15,0.32,6.0,0.033,59.0,128.0,0.99192,3.19,0.71,12.1 +7.3,0.34,0.3,9.4,0.057,34.0,178.0,0.99554,3.15,0.44,10.4 +6.0,0.17,0.29,9.7,0.044,33.0,98.0,0.99536,3.12,0.36,9.2 +6.7,0.47,0.29,4.75,0.034,29.0,134.0,0.99056,3.29,0.46,13.0 +6.6,0.15,0.32,6.0,0.033,59.0,128.0,0.99192,3.19,0.71,12.1 +6.6,0.21,0.29,5.35,0.029,43.0,106.0,0.99112,2.93,0.43,11.5 +6.6,0.21,0.29,5.35,0.029,43.0,106.0,0.99112,2.93,0.43,11.5 +8.0,0.24,0.48,6.8,0.047,13.0,134.0,0.99616,3.23,0.7,10.0 +5.6,0.34,0.3,6.9,0.038,23.0,89.0,0.99266,3.25,0.49,11.1 +5.8,0.54,0.0,1.4,0.033,40.0,107.0,0.98918,3.26,0.35,12.4 +7.3,0.23,0.24,0.9,0.031,29.0,86.0,0.98926,2.9,0.38,12.2 +6.0,0.39,0.13,1.2,0.042,60.0,172.0,0.99114,3.06,0.52,10.6 +6.1,0.105,0.31,1.3,0.037,55.0,145.0,0.9912,3.41,0.41,11.1 +5.8,0.32,0.2,2.6,0.027,17.0,123.0,0.98936,3.36,0.78,13.9 +7.6,0.22,0.28,12.0,0.056,68.0,143.0,0.9983,2.99,0.3,9.2 +6.8,0.19,0.4,9.85,0.055,41.0,103.0,0.99532,2.98,0.56,10.5 +6.7,0.24,0.3,3.85,0.042,105.0,179.0,0.99189,3.04,0.59,11.3 +6.8,0.17,0.34,2.0,0.04,38.0,111.0,0.99,3.24,0.45,12.9 +6.2,0.3,0.31,1.6,0.035,40.0,106.0,0.98914,3.26,0.39,12.9 +6.9,0.29,0.41,7.8,0.046,52.0,171.0,0.99537,3.12,0.51,9.6 +6.8,0.19,0.34,1.9,0.04,41.0,108.0,0.99,3.25,0.45,12.9 +6.8,0.17,0.34,2.0,0.04,38.0,111.0,0.99,3.24,0.45,12.9 +6.6,0.24,0.27,10.3,0.047,54.0,219.0,0.99742,3.04,0.45,8.8 +6.6,0.16,0.36,1.1,0.031,27.0,93.0,0.98884,3.23,0.34,13.2 +7.6,0.22,0.28,12.0,0.056,68.0,143.0,0.9983,2.99,0.3,9.2 +6.7,0.24,0.3,3.85,0.042,105.0,179.0,0.99189,3.04,0.59,11.3 +6.8,0.19,0.4,9.85,0.055,41.0,103.0,0.99532,2.98,0.56,10.5 +6.7,0.16,0.36,2.0,0.045,24.0,131.0,0.99284,3.3,0.59,10.5 +6.5,0.3,0.27,4.0,0.038,37.0,97.0,0.99026,3.2,0.6,12.6 +6.5,0.22,0.19,1.1,0.064,36.0,191.0,0.99297,3.05,0.5,9.5 +6.2,0.36,0.45,10.4,0.06,22.0,184.0,0.99711,3.31,0.56,9.8 +6.2,0.37,0.24,6.1,0.032,19.0,86.0,0.98934,3.04,0.26,13.4 +7.6,0.31,0.24,1.8,0.037,39.0,150.0,0.9913,3.05,0.44,11.8 +6.2,0.36,0.45,10.4,0.06,22.0,184.0,0.99711,3.31,0.56,9.8 +5.9,0.32,0.28,4.7,0.039,34.0,94.0,0.98964,3.22,0.57,13.1 +6.5,0.3,0.27,4.0,0.038,37.0,97.0,0.99026,3.2,0.6,12.6 +5.8,0.22,0.3,1.1,0.047,36.0,131.0,0.992,3.26,0.45,10.4 +5.4,0.45,0.27,6.4,0.033,20.0,102.0,0.98944,3.22,0.27,13.4 +6.1,0.36,0.26,8.15,0.035,14.0,88.0,0.99031,3.06,0.27,13.0 +6.2,0.37,0.24,6.1,0.032,19.0,86.0,0.98934,3.04,0.26,13.4 +7.5,0.21,0.32,4.8,0.056,39.0,113.0,0.99393,3.11,0.52,10.2 +6.9,0.28,0.33,1.2,0.039,16.0,98.0,0.9904,3.07,0.39,11.7 +6.5,0.22,0.19,1.1,0.064,36.0,191.0,0.99297,3.05,0.5,9.5 +7.8,0.2,0.2,1.4,0.036,25.0,83.0,0.99088,3.03,0.46,11.7 +6.7,0.28,0.31,7.4,0.041,7.0,81.0,0.99254,3.04,0.47,11.4 +7.6,0.31,0.24,1.8,0.037,39.0,150.0,0.9913,3.05,0.44,11.8 +8.0,0.2,0.44,1.0,0.057,24.0,111.0,0.99158,3.09,0.32,11.2 +6.0,0.28,0.27,15.5,0.036,31.0,134.0,0.99408,3.19,0.44,13.0 +6.0,0.28,0.27,15.5,0.036,31.0,134.0,0.99408,3.19,0.44,13.0 +6.7,0.24,0.36,8.4,0.042,42.0,123.0,0.99473,3.34,0.52,10.9 +6.3,0.22,0.28,2.4,0.042,38.0,102.0,0.98998,3.14,0.37,11.6 +6.0,0.24,0.28,3.95,0.038,61.0,134.0,0.99146,3.3,0.54,11.3 +7.7,0.43,1.0,19.95,0.032,42.0,164.0,0.99742,3.29,0.5,12.0 +6.4,0.3,0.36,2.0,0.052,18.0,141.0,0.99273,3.38,0.53,10.5 +6.1,0.33,0.3,3.0,0.036,30.0,124.0,0.98922,3.31,0.4,13.1 +6.0,0.28,0.27,15.5,0.036,31.0,134.0,0.99408,3.19,0.44,13.0 +6.7,0.24,0.36,8.4,0.042,42.0,123.0,0.99473,3.34,0.52,10.9 +6.7,0.29,0.45,14.3,0.054,30.0,181.0,0.99869,3.14,0.57,9.1 +6.9,0.33,0.31,4.2,0.04,21.0,93.0,0.9896,3.18,0.48,13.4 +6.5,0.16,0.34,1.4,0.029,29.0,133.0,0.99108,3.33,0.64,11.5 +6.0,0.2,0.32,3.0,0.031,26.0,118.0,0.99134,3.38,0.68,11.2 +7.5,0.33,0.28,4.9,0.042,21.0,155.0,0.99385,3.36,0.57,10.9 +7.1,0.36,0.28,2.4,0.036,35.0,115.0,0.98936,3.19,0.44,13.5 +6.7,0.29,0.45,14.3,0.054,30.0,181.0,0.99869,3.14,0.57,9.1 +6.4,0.26,0.25,10.7,0.046,66.0,179.0,0.99606,3.17,0.55,9.9 +7.0,0.22,0.24,11.0,0.041,75.0,167.0,0.99508,2.98,0.56,10.5 +6.5,0.19,0.28,1.4,0.046,22.0,90.0,0.99038,3.18,0.51,11.7 +6.3,0.21,0.31,1.2,0.043,30.0,117.0,0.99158,3.49,0.68,11.0 +7.9,0.35,0.28,12.9,0.032,13.0,63.0,0.9932,2.99,0.43,13.0 +7.7,0.38,0.23,10.8,0.03,28.0,95.0,0.99164,2.93,0.41,13.6 +6.8,0.19,0.33,1.3,0.031,22.0,87.0,0.98987,3.08,0.62,12.3 +7.2,0.33,0.34,2.0,0.044,61.0,171.0,0.98947,3.25,0.53,13.3 +6.6,0.29,0.29,1.8,0.036,38.0,102.0,0.98819,3.08,0.42,13.7 +7.5,0.2,0.41,1.2,0.05,26.0,131.0,0.99133,3.19,0.52,11.1 +6.9,0.33,0.62,7.5,0.038,46.0,132.0,0.99143,3.23,0.43,13.4 +6.0,0.23,0.15,9.7,0.048,101.0,207.0,0.99571,3.05,0.3,9.1 +5.9,0.23,0.24,3.8,0.038,61.0,152.0,0.99139,3.31,0.5,11.3 +6.6,0.32,0.41,7.2,0.048,55.0,178.0,0.99537,3.2,0.46,9.4 +6.0,0.23,0.15,9.7,0.048,101.0,207.0,0.99571,3.05,0.3,9.1 +5.3,0.36,0.27,6.3,0.028,40.0,132.0,0.99186,3.37,0.4,11.6 +5.3,0.36,0.27,6.3,0.028,40.0,132.0,0.99186,3.37,0.4,11.6 +8.9,0.27,0.28,0.8,0.024,29.0,128.0,0.98984,3.01,0.35,12.4 +7.6,0.23,0.29,8.6,0.053,65.0,146.0,0.9963,3.11,0.32,9.8 +6.9,0.75,0.13,6.3,0.036,19.0,50.0,0.99312,3.09,0.25,11.1 +7.1,0.35,0.27,3.1,0.034,28.0,134.0,0.9897,3.26,0.38,13.1 +7.2,0.31,0.35,7.2,0.046,45.0,178.0,0.9955,3.14,0.53,9.7 +6.4,0.28,0.44,7.1,0.048,49.0,179.0,0.99528,3.15,0.48,9.2 +7.2,0.23,0.46,6.4,0.036,17.0,85.0,0.99279,3.1,0.78,11.7 +6.6,0.22,0.3,14.7,0.045,50.0,136.0,0.99704,3.14,0.37,10.6 +7.2,0.31,0.35,7.2,0.046,45.0,178.0,0.9955,3.14,0.53,9.7 +6.4,0.28,0.44,7.1,0.048,49.0,179.0,0.99528,3.15,0.48,9.2 +7.2,0.24,0.28,1.9,0.032,30.0,92.0,0.9914,3.1,0.39,10.9 +6.2,0.27,0.47,1.2,0.146,28.0,105.0,0.99224,3.23,0.51,10.1 +6.5,0.28,0.25,4.8,0.029,54.0,128.0,0.99074,3.17,0.44,12.2 +7.2,0.27,0.31,1.2,0.031,27.0,80.0,0.98892,3.03,0.33,12.7 +7.8,0.28,0.25,3.4,0.024,27.0,99.0,0.98959,2.98,0.37,13.0 +8.1,0.26,0.27,4.3,0.03,43.0,123.0,0.99212,3.16,0.33,11.2 +6.6,0.23,0.37,8.5,0.036,46.0,153.0,0.99576,3.2,0.48,9.4 +6.0,0.33,0.2,1.8,0.031,49.0,159.0,0.9919,3.41,0.53,11.0 +6.0,0.33,0.2,1.8,0.031,49.0,159.0,0.9919,3.41,0.53,11.0 +7.3,0.2,0.29,19.5,0.039,69.0,237.0,1.00037,3.1,0.48,9.2 +6.6,0.23,0.37,8.5,0.036,46.0,153.0,0.99576,3.2,0.48,9.4 +7.3,0.2,0.29,19.9,0.039,69.0,237.0,1.00037,3.1,0.48,9.2 +6.2,0.47,0.19,8.3,0.029,24.0,142.0,0.992,3.22,0.45,12.3 +6.0,0.33,0.2,1.8,0.031,49.0,159.0,0.9919,3.41,0.53,11.0 +7.2,0.14,0.32,1.1,0.022,48.0,116.0,0.99218,3.04,0.67,10.0 +5.7,0.22,0.22,16.65,0.044,39.0,110.0,0.99855,3.24,0.48,9.0 +5.7,0.22,0.22,16.65,0.044,39.0,110.0,0.99855,3.24,0.48,9.0 +5.7,0.22,0.22,16.65,0.044,39.0,110.0,0.99855,3.24,0.48,9.0 +8.1,0.2,0.28,0.9,0.023,49.0,87.0,0.99062,2.92,0.36,11.1 +5.8,0.14,0.15,6.1,0.042,27.0,123.0,0.99362,3.06,0.6,9.9 +4.8,0.21,0.21,10.2,0.037,17.0,112.0,0.99324,3.66,0.48,12.2 +8.1,0.2,0.28,0.9,0.023,49.0,87.0,0.99062,2.92,0.36,11.1 +5.7,0.22,0.22,16.65,0.044,39.0,110.0,0.99855,3.24,0.48,9.0 +7.5,0.34,0.24,3.85,0.031,5.0,34.0,0.99098,3.01,0.36,11.8 +6.6,0.64,0.28,4.4,0.032,19.0,78.0,0.99036,3.11,0.62,12.9 +7.0,0.48,0.12,4.5,0.05,23.0,86.0,0.99398,2.86,0.35,9.0 +7.6,0.37,0.34,3.2,0.028,42.0,162.0,0.9903,3.01,0.33,12.4 +7.0,0.48,0.12,4.5,0.05,23.0,86.0,0.99398,2.86,0.35,9.0 +6.6,0.64,0.28,4.4,0.032,19.0,78.0,0.99036,3.11,0.62,12.9 +8.0,0.25,0.27,9.7,0.036,15.0,85.0,0.99406,2.99,0.36,11.2 +7.6,0.38,0.28,4.2,0.029,7.0,112.0,0.9906,3.0,0.41,12.6 +6.9,0.26,0.27,4.2,0.031,20.0,80.0,0.99089,3.12,0.39,11.5 +7.8,0.15,0.34,1.1,0.035,31.0,93.0,0.99096,3.07,0.72,11.3 +8.0,0.25,0.27,9.7,0.036,15.0,85.0,0.99406,2.99,0.36,11.2 +6.9,0.26,0.27,4.2,0.031,20.0,80.0,0.99089,3.12,0.39,11.5 +5.9,0.655,0.0,5.6,0.033,8.0,31.0,0.9936,3.32,0.51,10.5 +7.6,0.38,0.28,4.2,0.029,7.0,112.0,0.9906,3.0,0.41,12.6 +7.8,0.31,0.4,1.6,0.027,20.0,87.0,0.9911,3.15,0.48,11.9 +8.1,0.17,0.21,1.6,0.036,24.0,119.0,0.99396,3.18,0.52,10.1 +6.8,0.18,0.28,1.1,0.027,32.0,112.0,0.99089,3.15,0.45,11.0 +7.4,0.28,0.36,14.6,0.048,35.0,161.0,0.9968,3.14,0.56,10.6 +7.3,0.23,0.27,2.6,0.035,39.0,120.0,0.99138,3.04,0.59,11.3 +6.7,0.22,0.22,1.2,0.038,5.0,124.0,0.99098,3.1,0.37,11.2 +7.4,0.25,0.28,7.25,0.028,14.0,78.0,0.99238,2.94,0.37,11.5 +7.5,0.3,0.21,6.55,0.026,33.0,143.0,0.99244,2.92,0.35,11.1 +7.2,0.26,0.24,7.0,0.023,19.0,130.0,0.99176,3.14,0.49,12.8 +6.3,0.32,0.32,1.5,0.037,12.0,76.0,0.98993,3.3,0.46,12.3 +7.7,0.24,0.3,1.4,0.041,15.0,102.0,0.9929,3.26,0.53,10.4 +7.4,0.25,0.28,7.25,0.028,14.0,78.0,0.99238,2.94,0.37,11.5 +7.0,0.24,0.35,1.0,0.032,42.0,104.0,0.98988,3.16,0.37,11.7 +5.8,0.28,0.28,4.2,0.044,52.0,158.0,0.992,3.35,0.44,10.7 +6.8,0.19,0.71,17.5,0.042,21.0,114.0,0.99784,2.85,0.5,9.5 +6.8,0.19,0.71,17.5,0.042,21.0,114.0,0.99784,2.85,0.5,9.5 +6.8,0.19,0.71,17.5,0.042,21.0,114.0,0.99784,2.85,0.5,9.5 +6.6,0.19,0.35,1.5,0.037,37.0,107.0,0.99006,3.18,0.68,12.0 +6.4,0.28,0.36,1.3,0.053,28.0,186.0,0.99211,3.31,0.45,10.8 +5.6,0.28,0.27,3.9,0.043,52.0,158.0,0.99202,3.35,0.44,10.7 +5.6,0.28,0.28,4.2,0.044,52.0,158.0,0.992,3.35,0.44,10.7 +6.8,0.19,0.32,7.6,0.049,37.0,107.0,0.99332,3.12,0.44,10.7 +7.2,0.16,0.29,1.0,0.031,40.0,123.0,0.98958,3.12,0.4,12.1 +6.6,0.17,0.28,1.1,0.034,55.0,108.0,0.98939,3.0,0.52,11.9 +6.6,0.19,0.28,11.8,0.042,54.0,137.0,0.99492,3.18,0.37,10.8 +5.8,0.2,0.24,1.4,0.033,65.0,169.0,0.99043,3.59,0.56,12.3 +6.6,0.39,0.38,9.7,0.053,49.0,226.0,0.99787,3.3,0.57,9.4 +6.8,0.12,0.3,12.9,0.049,32.0,88.0,0.99654,3.2,0.35,9.9 +6.6,0.295,0.24,1.6,0.039,29.0,140.0,0.99304,3.35,0.61,10.4 +6.6,0.26,0.24,7.2,0.038,28.0,137.0,0.9952,3.35,0.6,10.4 +7.0,0.32,0.27,7.1,0.027,37.0,122.0,0.99165,3.15,0.6,12.6 +7.4,0.36,0.23,1.9,0.017,31.0,69.0,0.9892,2.93,0.36,12.5 +6.7,0.35,0.48,8.8,0.056,35.0,167.0,0.99628,3.04,0.47,9.4 +6.4,0.38,0.24,7.2,0.047,41.0,151.0,0.99604,3.11,0.6,9.2 +6.8,0.14,0.18,1.4,0.047,30.0,90.0,0.99164,3.27,0.54,11.2 +7.0,0.16,0.25,14.3,0.044,27.0,149.0,0.998,2.91,0.46,9.2 +7.0,0.16,0.25,14.3,0.044,27.0,149.0,0.998,2.91,0.46,9.2 +6.7,0.35,0.48,8.8,0.056,35.0,167.0,0.99628,3.04,0.47,9.4 +6.8,0.14,0.18,1.4,0.047,30.0,90.0,0.99164,3.27,0.54,11.2 +6.8,0.16,0.18,1.8,0.046,31.0,114.0,0.99226,3.27,0.55,10.8 +7.0,0.16,0.25,14.3,0.044,27.0,149.0,0.998,2.91,0.46,9.2 +6.4,0.38,0.24,7.2,0.047,41.0,151.0,0.99604,3.11,0.6,9.2 +7.2,0.24,0.3,1.2,0.037,11.0,95.0,0.98914,2.96,0.36,12.5 +7.7,0.32,0.61,11.8,0.041,66.0,188.0,0.99794,3.0,0.54,9.3 +7.0,0.29,0.33,0.9,0.041,20.0,117.0,0.99048,3.21,0.5,11.4 +7.1,0.27,0.24,12.6,0.044,48.0,118.0,0.99726,3.04,0.56,10.0 +6.8,0.45,0.28,26.05,0.031,27.0,122.0,1.00295,3.06,0.42,10.6 +6.3,0.2,0.26,4.7,0.04,108.0,168.0,0.99278,3.07,0.75,10.7 +7.1,0.27,0.24,12.6,0.044,48.0,118.0,0.99726,3.04,0.56,10.0 +7.2,0.24,0.3,1.2,0.037,11.0,95.0,0.98914,2.96,0.36,12.5 +6.8,0.45,0.28,26.05,0.031,27.0,122.0,1.00295,3.06,0.42,10.6 +6.6,0.36,0.28,6.1,0.029,12.0,93.0,0.99054,3.19,0.27,12.8 +7.7,0.32,0.61,11.8,0.041,66.0,188.0,0.99794,3.0,0.54,9.3 +7.0,0.29,0.33,0.9,0.041,20.0,117.0,0.99048,3.21,0.5,11.4 +6.4,0.37,0.2,5.6,0.117,61.0,183.0,0.99459,3.24,0.43,9.5 +6.4,0.38,0.2,5.3,0.117,57.0,181.0,0.99459,3.24,0.43,9.5 +6.4,0.36,0.2,5.7,0.118,61.0,172.0,0.9946,3.24,0.43,9.5 +6.6,0.3,0.25,8.0,0.036,21.0,124.0,0.99362,3.06,0.38,10.8 +6.6,0.3,0.25,8.0,0.036,21.0,124.0,0.99362,3.06,0.38,10.8 +6.5,0.21,0.51,17.6,0.045,34.0,125.0,0.99966,3.2,0.47,8.8 +6.6,0.3,0.25,8.0,0.036,21.0,124.0,0.99362,3.06,0.38,10.8 +7.6,0.31,0.27,8.8,0.021,57.0,156.0,0.99442,3.08,0.38,11.0 +5.8,0.58,0.0,1.5,0.02,33.0,96.0,0.98918,3.29,0.38,12.4 +6.5,0.26,0.39,1.4,0.02,12.0,66.0,0.99089,3.25,0.75,11.3 +8.7,0.3,0.34,4.8,0.018,23.0,127.0,0.99474,3.12,0.49,11.2 +6.4,0.29,0.32,2.4,0.014,34.0,89.0,0.99008,3.24,0.66,12.5 +6.7,0.13,0.32,3.7,0.017,32.0,99.0,0.99348,3.12,0.44,10.0 +6.8,0.19,0.33,4.9,0.047,42.0,130.0,0.99283,3.12,0.56,11.0 +6.0,0.25,0.4,5.7,0.052,56.0,152.0,0.99398,3.16,0.88,10.5 +6.0,0.25,0.4,5.7,0.052,56.0,152.0,0.99398,3.16,0.88,10.5 +6.8,0.19,0.33,4.9,0.047,42.0,130.0,0.99283,3.12,0.56,11.0 +6.4,0.24,0.23,2.0,0.046,30.0,133.0,0.9908,3.12,0.54,11.4 +5.9,0.18,0.28,5.1,0.039,50.0,139.0,0.99165,3.16,0.44,11.3 +7.2,0.33,0.22,4.5,0.031,10.0,73.0,0.99076,2.97,0.52,12.2 +6.4,0.29,0.24,3.2,0.037,31.0,95.0,0.98942,2.9,0.66,12.6 +7.3,0.31,0.25,6.65,0.032,30.0,138.0,0.99244,2.9,0.37,11.1 +7.0,0.29,0.37,1.6,0.035,34.0,126.0,0.99058,3.26,0.47,12.3 +6.9,0.19,0.6,4.0,0.037,6.0,122.0,0.99255,2.92,0.59,10.4 +6.3,0.32,0.17,17.75,0.06,51.0,190.0,0.99916,3.13,0.48,8.8 +6.6,0.085,0.33,1.4,0.036,17.0,109.0,0.99306,3.27,0.61,9.5 +6.3,0.32,0.17,17.75,0.06,51.0,190.0,0.99916,3.13,0.48,8.8 +6.8,0.18,0.32,7.2,0.047,17.0,109.0,0.99498,3.42,0.44,10.4 +6.8,0.52,0.26,5.7,0.038,27.0,130.0,0.99,3.11,0.27,13.0 +7.1,0.28,0.28,8.5,0.03,25.0,191.0,0.99338,3.16,0.46,12.2 +5.7,0.15,0.47,11.4,0.035,49.0,128.0,0.99456,3.03,0.34,10.5 +5.8,0.275,0.3,5.4,0.043,41.0,149.0,0.9926,3.33,0.42,10.8 +5.4,0.53,0.16,2.7,0.036,34.0,128.0,0.98856,3.2,0.53,13.2 +5.8,0.32,0.28,4.3,0.032,46.0,115.0,0.98946,3.16,0.57,13.0 +6.7,0.22,0.39,1.2,0.049,26.0,152.0,0.99346,3.5,0.47,10.0 +6.1,0.6,0.12,1.8,0.05,11.0,76.0,0.99268,3.42,0.48,10.4 +6.5,0.26,0.31,1.3,0.034,59.0,145.0,0.98944,3.16,0.54,12.4 +5.0,0.29,0.54,5.7,0.035,54.0,155.0,0.98976,3.27,0.34,12.9 +5.4,0.53,0.16,2.7,0.036,34.0,128.0,0.98856,3.2,0.53,13.2 +6.8,0.21,0.26,11.7,0.038,61.0,152.0,0.99523,3.02,0.56,10.5 +5.8,0.32,0.28,4.3,0.032,46.0,115.0,0.98946,3.16,0.57,13.0 +6.5,0.27,0.26,11.0,0.03,2.0,82.0,0.99402,3.07,0.36,11.2 +5.9,0.37,0.32,1.6,0.029,41.0,102.0,0.98916,3.41,0.55,12.7 +6.2,0.21,0.18,11.6,0.044,61.0,155.0,0.99655,3.14,0.52,9.4 +6.8,0.3,0.29,6.2,0.025,29.0,95.0,0.99071,3.03,0.32,12.9 +7.3,0.41,0.29,1.8,0.032,26.0,74.0,0.98889,2.96,0.35,13.0 +5.4,0.3,0.3,1.2,0.029,25.0,93.0,0.98742,3.31,0.4,13.6 +6.6,0.34,0.2,1.0,0.053,26.0,112.0,0.99336,3.32,0.55,9.1 +5.6,0.25,0.19,2.4,0.049,42.0,166.0,0.992,3.25,0.43,10.4 +5.3,0.3,0.3,1.2,0.029,25.0,93.0,0.98742,3.31,0.4,13.6 +6.9,0.58,0.58,8.2,0.032,29.0,169.0,0.99275,3.28,0.44,12.2 +7.2,0.23,0.25,18.8,0.085,19.0,111.0,1.00044,3.1,0.51,8.7 +7.1,0.2,0.27,9.6,0.037,19.0,105.0,0.99444,3.04,0.37,10.5 +6.8,0.15,0.41,12.9,0.044,79.5,183.0,0.99742,3.24,0.78,10.2 +7.0,0.22,0.26,9.2,0.027,37.0,122.0,0.99228,3.06,0.34,12.5 +6.4,0.16,0.44,1.2,0.051,39.0,122.0,0.99058,3.11,0.75,11.3 +6.8,0.15,0.41,12.9,0.044,79.5,183.0,0.99742,3.24,0.78,10.2 +6.8,0.31,0.3,8.0,0.028,33.0,122.0,0.99164,3.13,0.63,12.6 +6.8,0.15,0.41,12.9,0.044,79.5,183.0,0.99742,3.24,0.78,10.2 +7.6,0.3,0.37,1.6,0.087,27.0,177.0,0.99438,3.09,0.5,9.8 +6.0,0.16,0.27,12.0,0.03,39.0,98.0,0.99402,3.15,0.34,10.8 +7.1,0.21,0.35,2.5,0.04,41.0,186.0,0.99128,3.32,0.56,12.5 +7.0,0.22,0.26,9.2,0.027,37.0,122.0,0.99228,3.06,0.34,12.5 +5.6,0.21,0.24,4.4,0.027,37.0,150.0,0.991,3.3,0.31,11.5 +7.4,0.22,0.26,8.8,0.027,23.0,112.0,0.9931,2.98,0.41,11.4 +7.1,0.2,0.27,9.6,0.037,19.0,105.0,0.99444,3.04,0.37,10.5 +6.8,0.31,0.3,8.0,0.028,33.0,122.0,0.99164,3.13,0.63,12.6 +7.2,0.23,0.25,18.8,0.085,19.0,111.0,1.00044,3.1,0.51,8.7 +6.4,0.15,0.4,1.3,0.053,61.0,146.0,0.99112,3.17,0.68,11.0 +6.4,0.16,0.44,1.2,0.051,39.0,122.0,0.99058,3.11,0.75,11.3 +6.8,0.15,0.41,12.9,0.044,79.5,182.0,0.99742,3.24,0.78,10.2 +6.3,0.22,0.34,1.2,0.036,32.0,96.0,0.98961,3.06,0.74,11.6 +7.6,0.3,0.37,1.6,0.087,27.0,177.0,0.99438,3.09,0.5,9.8 +7.0,0.3,0.27,1.5,0.076,24.0,145.0,0.99344,3.1,0.52,10.1 +6.6,0.26,0.22,18.15,0.05,23.0,139.0,0.99904,3.06,0.5,9.2 +7.5,0.24,0.31,13.1,0.05,26.0,180.0,0.99884,3.05,0.53,9.1 +7.5,0.24,0.31,13.1,0.05,26.0,180.0,0.99884,3.05,0.53,9.1 +7.5,0.24,0.31,13.1,0.05,26.0,180.0,0.99884,3.05,0.53,9.1 +7.5,0.24,0.31,13.1,0.05,26.0,180.0,0.99884,3.05,0.53,9.1 +6.6,0.15,0.34,1.0,0.037,45.0,79.0,0.98949,2.96,0.5,11.7 +6.7,0.34,0.43,1.6,0.041,29.0,114.0,0.99014,3.23,0.44,12.6 +7.7,0.35,0.46,11.8,0.088,61.0,183.0,0.99786,2.86,0.47,9.0 +6.7,0.31,0.09,1.4,0.039,53.0,141.0,0.99206,3.12,0.44,10.1 +4.7,0.67,0.09,1.0,0.02,5.0,9.0,0.98722,3.3,0.34,13.6 +7.5,0.24,0.31,13.1,0.05,26.0,180.0,0.99884,3.05,0.53,9.1 +6.3,0.2,0.18,10.6,0.045,57.0,159.0,0.99666,3.09,0.54,9.2 +6.6,0.28,0.23,10.4,0.049,45.0,190.0,0.99754,3.12,0.51,8.8 +8.5,0.18,0.3,1.1,0.028,34.0,95.0,0.99272,2.83,0.36,10.0 +6.5,0.35,0.38,7.4,0.036,20.0,196.0,0.99712,3.47,0.48,9.1 +6.8,0.22,0.26,1.2,0.041,29.0,182.0,0.99104,3.04,0.35,11.2 +6.3,0.18,0.24,3.4,0.053,20.0,119.0,0.99373,3.11,0.52,9.2 +6.6,0.26,0.22,18.15,0.05,23.0,139.0,0.99904,3.06,0.5,9.2 +6.6,0.3,0.45,8.0,0.038,54.0,200.0,0.9956,3.18,0.48,9.5 +6.3,0.34,0.27,2.5,0.024,40.0,152.0,0.99095,3.35,0.6,11.9 +7.7,0.3,0.23,2.0,0.068,28.0,138.0,0.99382,3.11,0.62,9.8 +7.7,0.31,0.23,2.0,0.069,29.0,134.0,0.99382,3.11,0.62,9.8 +5.7,0.265,0.28,6.9,0.036,46.0,150.0,0.99299,3.36,0.44,10.8 +5.4,0.255,0.33,1.2,0.051,29.0,122.0,0.99048,3.37,0.66,11.3 +6.6,0.26,0.28,9.4,0.028,13.0,121.0,0.99254,3.17,0.34,12.1 +4.8,0.17,0.28,2.9,0.03,22.0,111.0,0.9902,3.38,0.34,11.3 +5.7,0.265,0.28,6.9,0.036,46.0,150.0,0.99299,3.36,0.44,10.8 +6.2,0.2,0.33,5.4,0.028,21.0,75.0,0.99012,3.36,0.41,13.5 +7.5,0.28,0.41,1.3,0.044,11.0,126.0,0.99293,3.28,0.45,10.3 +6.2,0.22,0.2,20.8,0.035,58.0,184.0,1.00022,3.11,0.53,9.0 +7.0,0.34,0.26,10.3,0.041,51.0,166.0,0.99382,3.08,0.35,11.6 +7.5,0.28,0.41,1.3,0.044,11.0,126.0,0.99293,3.28,0.45,10.3 +6.5,0.19,0.34,1.6,0.029,39.0,116.0,0.98954,3.21,0.68,12.5 +6.0,0.21,0.29,13.1,0.042,28.0,125.0,0.99936,3.39,0.45,8.6 +6.1,0.22,0.46,1.8,0.16,34.0,74.0,0.9884,3.19,0.33,13.4 +6.5,0.32,0.48,8.0,0.026,18.0,88.0,0.99144,3.22,0.79,12.7 +7.1,0.21,0.72,1.6,0.167,65.0,120.0,0.99324,2.97,0.51,9.2 +5.6,0.26,0.18,1.4,0.034,18.0,135.0,0.99174,3.32,0.35,10.2 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +7.4,0.27,0.28,1.8,0.04,45.0,121.0,0.99043,3.02,0.4,11.9 +6.8,0.22,0.3,10.6,0.07,67.0,194.0,0.99654,2.89,0.42,9.0 +6.2,0.24,0.25,12.5,0.055,47.0,134.0,0.99758,3.3,0.51,9.0 +6.3,0.28,0.29,6.8,0.051,40.0,143.0,0.99374,3.43,0.59,11.0 +7.0,0.15,0.28,14.7,0.051,29.0,149.0,0.99792,2.96,0.39,9.0 +5.5,0.17,0.23,2.9,0.039,10.0,108.0,0.99243,3.28,0.5,10.0 +6.5,0.26,0.34,1.4,0.04,25.0,184.0,0.99216,3.29,0.46,10.7 +6.6,0.27,0.33,1.4,0.042,24.0,183.0,0.99215,3.29,0.46,10.7 +5.4,0.46,0.15,2.1,0.026,29.0,130.0,0.98953,3.39,0.77,13.4 +7.8,0.19,0.32,7.4,0.015,47.0,124.0,0.99278,2.99,0.39,11.0 +5.5,0.17,0.23,2.9,0.039,10.0,108.0,0.99243,3.28,0.5,10.0 +6.5,0.26,0.34,1.4,0.04,25.0,184.0,0.99216,3.29,0.46,10.7 +6.6,0.27,0.33,1.4,0.042,24.0,183.0,0.99215,3.29,0.46,10.7 +7.8,0.19,0.32,7.4,0.015,47.0,124.0,0.99278,2.99,0.39,11.0 +7.8,0.2,0.32,5.0,0.016,31.0,101.0,0.99186,2.99,0.39,11.0 +6.1,0.17,0.28,2.5,0.028,22.0,98.0,0.99072,3.16,0.37,11.1 +7.4,0.2,0.35,6.1,0.025,10.0,40.0,0.99244,2.79,0.52,10.9 +6.7,0.39,0.24,2.7,0.017,22.0,80.0,0.99084,3.03,0.37,11.5 +5.4,0.46,0.15,2.1,0.026,29.0,130.0,0.98953,3.39,0.77,13.4 +6.9,0.4,0.17,12.9,0.033,59.0,186.0,0.99754,3.08,0.49,9.4 +6.9,0.4,0.17,12.9,0.033,59.0,186.0,0.99754,3.08,0.49,9.4 +6.9,0.4,0.17,12.9,0.033,59.0,186.0,0.99754,3.08,0.49,9.4 +6.3,0.24,0.29,13.7,0.035,53.0,134.0,0.99567,3.17,0.38,10.6 +6.9,0.4,0.17,12.9,0.033,59.0,186.0,0.99754,3.08,0.49,9.4 +7.4,0.27,0.31,2.4,0.014,15.0,143.0,0.99094,3.03,0.65,12.0 +6.1,0.27,0.28,9.8,0.042,61.0,125.0,0.99532,3.14,0.42,10.2 +6.3,0.24,0.29,13.7,0.035,53.0,134.0,0.99567,3.17,0.38,10.6 +5.0,0.61,0.12,1.3,0.009,65.0,100.0,0.9874,3.26,0.37,13.5 +6.7,0.42,0.39,12.1,0.04,61.0,248.0,0.99794,3.31,0.58,9.7 +6.5,0.33,0.28,6.1,0.018,41.0,103.0,0.99122,3.24,0.32,12.2 +6.9,0.33,0.31,7.7,0.04,29.0,135.0,0.99226,3.11,0.57,12.3 +6.5,0.33,0.28,6.1,0.018,41.0,103.0,0.99122,3.24,0.32,12.2 +6.3,0.15,0.3,1.4,0.022,38.0,100.0,0.99099,3.42,0.57,11.4 +6.5,0.32,0.45,7.7,0.022,31.0,97.0,0.99134,3.2,0.7,12.7 +6.7,0.42,0.39,12.1,0.04,61.0,248.0,0.99794,3.31,0.58,9.7 +7.4,0.25,0.29,6.8,0.02,31.0,113.0,0.99338,3.13,0.29,10.8 +7.6,0.27,0.3,9.2,0.018,23.0,96.0,0.9938,3.08,0.29,11.0 +6.4,0.27,0.45,8.3,0.05,52.0,196.0,0.9955,3.18,0.48,9.5 +6.5,0.25,0.27,17.4,0.064,29.0,140.0,0.99776,3.2,0.49,10.1 +5.6,0.19,0.31,2.7,0.027,11.0,100.0,0.98964,3.46,0.4,13.2 +7.4,0.29,0.48,12.8,0.037,61.5,182.0,0.99808,3.02,0.34,8.8 +6.4,0.34,0.44,8.2,0.043,54.0,201.0,0.99551,3.18,0.48,9.5 +6.6,0.27,0.52,8.1,0.044,53.0,202.0,0.99548,3.18,0.48,9.5 +6.6,0.26,0.52,8.2,0.047,52.0,191.0,0.99541,3.16,0.47,9.5 +6.4,0.27,0.45,8.3,0.05,52.0,196.0,0.9955,3.18,0.48,9.5 +6.5,0.26,0.5,8.0,0.051,46.0,197.0,0.99536,3.18,0.47,9.5 +6.8,0.25,0.3,11.8,0.043,53.0,133.0,0.99524,3.03,0.58,10.4 +6.3,0.32,0.26,12.0,0.049,63.0,170.0,0.9961,3.14,0.55,9.9 +5.5,0.24,0.45,1.7,0.046,22.0,113.0,0.99224,3.22,0.48,10.0 +6.5,0.25,0.27,17.4,0.064,29.0,140.0,0.99776,3.2,0.49,10.1 +6.6,0.13,0.29,13.9,0.056,33.0,95.0,0.99702,3.17,0.39,9.4 +7.0,0.39,0.21,10.7,0.098,13.0,91.0,0.99657,3.03,0.47,9.3 +7.9,0.21,0.39,2.0,0.057,21.0,138.0,0.99176,3.05,0.52,10.9 +7.0,0.3,0.28,2.2,0.042,21.0,177.0,0.99166,3.2,0.57,11.4 +8.1,0.2,0.3,1.3,0.036,7.0,49.0,0.99242,2.99,0.73,10.3 +8.3,0.18,0.3,1.1,0.033,20.0,57.0,0.99109,3.02,0.51,11.0 +7.9,0.21,0.39,2.0,0.057,21.0,138.0,0.99176,3.05,0.52,10.9 +7.2,0.17,0.34,6.4,0.042,16.0,111.0,0.99278,2.99,0.4,10.8 +8.1,0.2,0.3,1.3,0.036,7.0,49.0,0.99242,2.99,0.73,10.3 +8.3,0.18,0.3,1.1,0.033,20.0,57.0,0.99109,3.02,0.51,11.0 +7.0,0.39,0.21,10.7,0.098,13.0,91.0,0.99657,3.03,0.47,9.3 +6.8,0.21,0.62,6.4,0.041,7.0,113.0,0.99358,2.96,0.59,10.2 +6.9,0.21,0.62,6.3,0.042,7.0,109.0,0.99358,2.96,0.59,10.2 +7.2,0.17,0.34,6.4,0.042,16.0,111.0,0.99278,2.99,0.4,10.8 +6.8,0.26,0.34,15.1,0.06,42.0,162.0,0.99705,3.24,0.52,10.5 +7.2,0.28,0.38,2.0,0.052,23.0,156.0,0.9912,3.13,0.52,11.1 +7.9,0.21,0.39,2.0,0.057,21.0,138.0,0.99176,3.05,0.52,10.9 +7.0,0.3,0.28,2.2,0.042,21.0,177.0,0.99166,3.2,0.57,11.4 +7.4,0.34,0.28,12.1,0.049,31.0,149.0,0.99677,3.22,0.49,10.3 +6.3,0.43,0.32,8.8,0.042,18.0,106.0,0.99172,3.28,0.33,12.9 +6.8,0.41,0.3,8.8,0.045,28.0,131.0,0.9953,3.12,0.59,9.9 +6.3,0.4,0.24,5.1,0.036,43.0,131.0,0.99186,3.24,0.44,11.3 +5.1,0.35,0.26,6.8,0.034,36.0,120.0,0.99188,3.38,0.4,11.5 +5.1,0.35,0.26,6.8,0.034,36.0,120.0,0.99188,3.38,0.4,11.5 +6.3,0.3,0.2,3.7,0.039,34.0,132.0,0.99158,3.0,0.38,10.7 +6.9,0.28,0.28,12.2,0.042,52.0,139.0,0.99522,3.03,0.56,10.4 +7.0,0.33,0.28,5.7,0.033,39.0,204.0,0.99176,3.17,0.64,12.5 +6.7,0.26,0.49,8.1,0.052,48.0,197.0,0.99558,3.19,0.48,9.5 +7.3,0.24,0.3,2.5,0.042,31.0,104.0,0.9911,3.05,0.56,11.3 +6.7,0.46,0.21,4.0,0.034,12.0,88.0,0.99016,3.26,0.54,13.0 +5.1,0.35,0.26,6.8,0.034,36.0,120.0,0.99188,3.38,0.4,11.5 +5.1,0.23,0.18,1.0,0.053,13.0,99.0,0.98956,3.22,0.39,11.5 +6.3,0.4,0.24,5.1,0.036,43.0,131.0,0.99186,3.24,0.44,11.3 +7.1,0.44,0.23,5.8,0.035,24.0,100.0,0.99062,3.15,0.57,13.2 +4.8,0.26,0.23,10.6,0.034,23.0,111.0,0.99274,3.46,0.28,11.5 +6.8,0.31,0.19,3.5,0.086,30.0,130.0,0.993,2.83,0.44,9.6 +6.8,0.31,0.19,3.5,0.086,30.0,130.0,0.993,2.83,0.44,9.6 +7.0,0.15,0.29,16.4,0.058,45.0,110.0,0.9978,3.15,0.37,9.7 +6.5,0.41,0.22,4.8,0.052,49.0,142.0,0.9946,3.14,0.62,9.2 +6.2,0.31,0.23,3.3,0.052,34.0,113.0,0.99429,3.16,0.48,8.4 +8.0,0.27,0.33,1.2,0.05,41.0,103.0,0.99002,3.0,0.45,12.4 +8.0,0.27,0.33,1.2,0.05,41.0,103.0,0.99002,3.0,0.45,12.4 +6.5,0.41,0.22,4.8,0.052,49.0,142.0,0.9946,3.14,0.62,9.2 +6.2,0.31,0.23,3.3,0.052,34.0,113.0,0.99429,3.16,0.48,8.4 +6.7,0.37,0.25,2.5,0.028,24.0,84.0,0.9909,3.14,0.36,11.7 +6.6,0.21,0.5,8.7,0.036,41.0,191.0,0.99294,2.96,0.56,11.0 +7.5,0.26,0.31,1.6,0.032,36.0,109.0,0.99044,2.97,0.43,11.9 +7.5,0.34,0.28,4.0,0.028,46.0,100.0,0.98958,3.2,0.5,13.2 +6.7,0.37,0.25,2.5,0.028,24.0,84.0,0.9909,3.14,0.36,11.7 +6.4,0.32,0.23,16.2,0.055,36.0,176.0,0.9986,3.26,0.54,9.1 +6.7,0.24,0.32,9.0,0.023,20.0,109.0,0.99262,3.34,0.35,12.6 +6.4,0.32,0.23,16.2,0.055,36.0,176.0,0.9986,3.26,0.54,9.1 +7.1,0.39,0.79,1.4,0.194,23.0,90.0,0.99212,3.17,0.46,10.5 +8.2,0.31,0.43,7.0,0.047,18.0,87.0,0.99628,3.23,0.64,10.6 +6.7,0.24,0.32,9.0,0.023,20.0,109.0,0.99262,3.34,0.35,12.6 +5.9,0.17,0.29,3.1,0.03,32.0,123.0,0.98913,3.41,0.33,13.7 +5.9,0.2,0.23,1.5,0.037,38.0,93.0,0.99021,3.36,0.49,12.0 +6.6,0.32,0.26,4.6,0.031,26.0,120.0,0.99198,3.4,0.73,12.5 +5.9,0.12,0.27,4.8,0.03,40.0,110.0,0.99226,3.55,0.68,12.1 +5.9,0.18,0.29,4.6,0.032,68.0,137.0,0.99159,3.21,0.38,11.3 +5.9,0.2,0.23,1.5,0.037,38.0,93.0,0.99021,3.36,0.49,12.0 +5.4,0.17,0.27,2.7,0.049,28.0,104.0,0.99224,3.46,0.55,10.3 +6.1,0.21,0.3,6.3,0.039,47.0,136.0,0.99068,3.27,0.31,12.7 +7.3,0.25,0.26,7.2,0.048,52.0,207.0,0.99587,3.12,0.37,9.2 +7.3,0.25,0.26,7.2,0.048,52.0,207.0,0.99587,3.12,0.37,9.2 +6.2,0.22,0.3,12.4,0.054,108.0,152.0,0.99728,3.1,0.47,9.5 +6.5,0.27,0.19,6.6,0.045,98.0,175.0,0.99364,3.16,0.34,10.1 +6.5,0.27,0.19,6.6,0.045,98.0,175.0,0.99364,3.16,0.34,10.1 +6.6,0.39,0.22,4.0,0.038,17.0,98.0,0.99018,3.25,0.53,13.0 +6.0,0.31,0.38,4.8,0.04,41.0,101.0,0.98968,3.24,0.56,13.1 +8.4,0.23,0.32,1.3,0.048,59.0,113.0,0.99178,3.1,0.55,11.0 +7.3,0.25,0.26,7.2,0.048,52.0,207.0,0.99587,3.12,0.37,9.2 +6.0,0.22,0.25,11.1,0.056,112.0,177.0,0.9961,3.08,0.36,9.4 +6.2,0.22,0.3,12.4,0.054,108.0,152.0,0.99728,3.1,0.47,9.5 +6.1,0.23,0.27,9.8,0.055,74.0,134.0,0.99534,3.16,0.4,10.2 +6.5,0.27,0.19,6.6,0.045,98.0,175.0,0.99364,3.16,0.34,10.1 +7.3,0.36,0.54,13.3,0.054,63.0,193.0,0.99864,3.06,0.49,8.6 +7.6,0.37,0.51,11.7,0.094,58.0,181.0,0.99776,2.91,0.51,9.0 +6.7,0.26,0.51,8.0,0.062,50.0,194.0,0.99545,3.13,0.5,9.6 +7.4,0.22,0.27,1.6,0.057,45.0,98.0,0.99299,3.29,0.44,9.9 +6.1,0.22,0.28,16.55,0.059,54.0,135.0,0.99665,3.2,0.38,10.5 +7.1,0.28,0.31,1.5,0.053,20.0,98.0,0.99069,3.15,0.5,11.4 +6.5,0.35,0.31,10.2,0.069,58.0,170.0,0.99692,3.18,0.49,9.4 +6.8,0.73,0.2,6.6,0.054,25.0,65.0,0.99324,3.12,0.28,11.1 +6.0,0.28,0.24,17.8,0.047,42.0,111.0,0.99896,3.1,0.45,8.9 +6.0,0.28,0.24,17.8,0.047,42.0,111.0,0.99896,3.1,0.45,8.9 +7.1,0.2,0.37,1.5,0.049,28.0,129.0,0.99226,3.15,0.52,10.8 +6.8,0.33,0.31,7.4,0.045,34.0,143.0,0.99226,3.06,0.55,12.2 +6.0,0.28,0.24,17.8,0.047,42.0,111.0,0.99896,3.1,0.45,8.9 +7.2,0.24,0.36,2.0,0.029,21.0,63.0,0.99076,3.13,0.63,12.5 +6.8,0.33,0.31,7.4,0.045,34.0,143.0,0.99226,3.06,0.55,12.2 +7.2,0.24,0.36,2.0,0.029,21.0,63.0,0.99076,3.13,0.63,12.5 +6.0,0.28,0.24,17.8,0.047,42.0,111.0,0.99896,3.1,0.45,8.9 +6.2,0.27,0.26,12.1,0.046,43.0,127.0,0.9951,3.16,0.37,10.8 +6.4,0.38,0.26,8.2,0.043,28.0,98.0,0.99234,2.99,0.31,11.4 +7.1,0.2,0.37,1.5,0.049,28.0,129.0,0.99226,3.15,0.52,10.8 +6.0,0.21,0.3,8.7,0.036,47.0,127.0,0.99368,3.18,0.39,10.6 +7.0,0.34,0.1,3.5,0.044,17.0,63.0,0.9937,3.01,0.39,9.2 +5.9,0.435,0.16,6.4,0.031,21.0,134.0,0.99151,3.24,0.46,12.2 +7.0,0.25,0.33,2.1,0.021,17.0,76.0,0.99021,3.26,0.45,12.3 +6.7,0.26,0.29,7.7,0.038,40.0,179.0,0.99479,3.23,0.56,10.4 +7.0,0.24,0.3,12.3,0.035,72.0,172.0,0.9954,2.99,0.57,10.4 +8.5,0.23,0.34,1.3,0.035,54.0,110.0,0.99176,3.07,0.55,11.0 +6.0,0.21,0.3,8.7,0.036,47.0,127.0,0.99368,3.18,0.39,10.6 +7.0,0.34,0.1,3.5,0.044,17.0,63.0,0.9937,3.01,0.39,9.2 +4.8,0.65,0.12,1.1,0.013,4.0,10.0,0.99246,3.32,0.36,13.5 +6.1,0.22,0.38,2.8,0.144,12.0,65.0,0.9908,2.95,0.64,11.4 +5.8,0.27,0.26,3.5,0.071,26.0,69.0,0.98994,3.1,0.38,11.5 +5.0,0.455,0.18,1.9,0.036,33.0,106.0,0.98746,3.21,0.83,14.0 +6.5,0.33,0.3,3.8,0.036,34.0,88.0,0.99028,3.25,0.63,12.5 +6.5,0.33,0.3,3.8,0.036,34.0,88.0,0.99028,3.25,0.63,12.5 +6.7,0.31,0.3,2.4,0.038,30.0,83.0,0.98867,3.09,0.36,12.8 +6.2,0.39,0.24,4.8,0.037,45.0,138.0,0.99174,3.23,0.43,11.2 +6.2,0.39,0.24,4.8,0.037,45.0,138.0,0.99174,3.23,0.43,11.2 +7.1,0.37,0.3,6.2,0.04,49.0,139.0,0.99021,3.17,0.27,13.6 +7.2,0.23,0.82,1.3,0.149,70.0,109.0,0.99304,2.93,0.42,9.2 +6.5,0.33,0.3,3.8,0.036,34.0,88.0,0.99028,3.25,0.63,12.5 +7.2,0.25,0.32,1.5,0.054,24.0,105.0,0.99154,3.17,0.48,11.1 +6.2,0.39,0.24,4.8,0.037,45.0,138.0,0.99174,3.23,0.43,11.2 +4.7,0.455,0.18,1.9,0.036,33.0,106.0,0.98746,3.21,0.83,14.0 +7.1,0.37,0.3,6.2,0.04,49.0,139.0,0.99021,3.17,0.27,13.6 +6.2,0.28,0.51,7.9,0.056,49.0,206.0,0.9956,3.18,0.52,9.4 +6.4,0.35,0.28,1.6,0.037,31.0,113.0,0.98779,3.12,0.4,14.2 +6.6,0.31,0.28,1.4,0.035,28.0,107.0,0.98836,3.0,0.4,13.2 +7.4,0.25,0.37,2.6,0.05,24.0,132.0,0.99138,3.04,0.53,11.2 +7.3,0.36,0.34,14.8,0.057,46.0,173.0,0.99751,3.14,0.57,10.2 +6.7,0.31,0.3,2.4,0.038,30.0,83.0,0.98867,3.09,0.36,12.8 +8.6,0.31,0.3,0.9,0.045,16.0,109.0,0.99249,2.95,0.39,10.1 +8.6,0.31,0.3,0.9,0.045,16.0,109.0,0.99249,2.95,0.39,10.1 +8.6,0.22,0.33,1.2,0.031,38.0,95.0,0.99239,2.83,0.31,10.3 +6.9,0.14,0.29,9.9,0.056,30.0,91.0,0.99512,3.19,0.33,9.9 +6.5,0.22,0.31,3.9,0.046,17.0,106.0,0.99098,3.15,0.31,11.5 +6.6,0.32,0.47,15.6,0.063,27.0,173.0,0.99872,3.18,0.56,9.0 +6.6,0.32,0.47,15.6,0.063,27.0,173.0,0.99872,3.18,0.56,9.0 +6.1,0.28,0.26,1.5,0.03,25.0,101.0,0.98894,3.03,0.41,12.1 +6.2,0.3,0.28,1.6,0.036,28.0,106.0,0.988245,3.14,0.41,13.3 +6.9,0.22,0.28,7.8,0.05,43.0,116.0,0.99326,3.22,0.6,11.5 +8.7,0.31,0.21,5.6,0.039,28.0,67.0,0.99328,2.96,0.52,11.0 +7.3,0.27,0.3,1.3,0.04,26.0,84.0,0.99222,3.28,0.53,10.7 +7.0,0.46,0.2,16.7,0.046,50.0,184.0,0.99898,3.08,0.56,9.4 +5.7,0.23,0.25,7.95,0.042,16.0,108.0,0.99486,3.44,0.61,10.3 +6.5,0.36,0.36,6.7,0.185,51.5,151.0,0.99528,3.17,0.42,9.3 +8.2,0.18,0.38,1.1,0.04,41.0,92.0,0.99062,2.88,0.6,12.0 +6.2,0.27,0.32,6.3,0.048,47.0,159.0,0.99282,3.21,0.6,11.0 +6.9,0.4,0.37,8.9,0.053,36.0,148.0,0.996,3.16,0.5,9.3 +4.9,0.345,0.34,1.0,0.068,32.0,143.0,0.99138,3.24,0.4,10.1 +7.2,0.23,0.39,1.5,0.053,26.0,106.0,0.99166,3.18,0.47,11.1 +6.4,0.2,0.15,6.6,0.046,26.0,113.0,0.99408,2.99,0.58,9.9 +6.1,0.27,0.32,6.2,0.048,47.0,161.0,0.99281,3.22,0.6,11.0 +6.2,0.27,0.32,6.3,0.048,47.0,159.0,0.99282,3.21,0.6,11.0 +6.0,0.3,0.33,2.1,0.042,31.0,127.0,0.98964,3.32,0.42,12.5 +6.1,0.3,0.32,2.2,0.042,41.0,142.0,0.98952,3.31,0.44,12.7 +5.7,0.14,0.3,5.4,0.045,26.0,105.0,0.99469,3.32,0.45,9.3 +6.9,0.4,0.37,8.9,0.053,36.0,148.0,0.996,3.16,0.5,9.3 +4.9,0.345,0.34,1.0,0.068,32.0,143.0,0.99138,3.24,0.4,10.1 +6.3,0.33,0.2,17.9,0.066,36.0,161.0,0.9991,3.14,0.51,8.8 +7.0,0.16,0.3,2.6,0.043,34.0,90.0,0.99047,2.88,0.47,11.2 +8.4,0.22,0.3,1.3,0.038,45.0,122.0,0.99178,3.13,0.54,10.8 +6.3,0.33,0.2,17.9,0.066,36.0,161.0,0.9991,3.14,0.51,8.8 +7.0,0.16,0.3,2.6,0.043,34.0,90.0,0.99047,2.88,0.47,11.2 +5.4,0.24,0.18,2.3,0.05,22.0,145.0,0.99207,3.24,0.46,10.3 +7.7,0.31,0.36,4.3,0.026,15.0,87.0,0.99152,3.11,0.48,12.0 +5.6,0.185,0.19,7.1,0.048,36.0,110.0,0.99438,3.26,0.41,9.5 +5.6,0.185,0.19,7.1,0.048,36.0,110.0,0.99438,3.26,0.41,9.5 +6.6,0.43,0.24,11.9,0.04,54.0,159.0,0.99622,3.14,0.54,9.8 +7.6,0.39,0.46,11.7,0.084,55.0,170.0,0.99773,2.91,0.51,9.0 +7.2,0.58,0.27,5.8,0.032,40.0,118.0,0.99088,3.17,0.53,13.0 +6.0,0.34,0.32,3.8,0.044,13.0,116.0,0.99108,3.39,0.44,11.8 +7.5,0.35,0.48,12.4,0.056,61.0,176.5,0.99803,2.97,0.52,8.8 +7.3,0.38,0.23,6.5,0.05,18.0,102.0,0.99304,3.1,0.55,11.2 +5.4,0.185,0.19,7.1,0.048,36.0,110.0,0.99438,3.26,0.41,9.5 +6.3,0.27,0.51,7.6,0.049,35.0,200.0,0.99548,3.16,0.54,9.4 +6.5,0.29,0.52,7.9,0.049,35.0,192.0,0.99551,3.16,0.51,9.5 +6.4,0.17,0.3,2.8,0.034,33.0,125.0,0.99152,3.03,0.49,10.4 +6.7,0.18,0.31,10.6,0.035,42.0,143.0,0.99572,3.08,0.49,9.8 +6.4,0.17,0.3,2.8,0.034,33.0,125.0,0.99152,3.03,0.49,10.4 +6.8,0.37,0.67,1.5,0.175,16.0,98.0,0.99244,3.06,0.56,10.3 +6.3,0.27,0.51,7.6,0.049,35.0,200.0,0.99548,3.16,0.54,9.4 +6.5,0.29,0.52,7.9,0.049,35.0,192.0,0.99551,3.16,0.51,9.5 +6.1,0.24,0.26,1.7,0.033,61.0,134.0,0.9903,3.19,0.81,11.9 +7.0,0.32,0.29,7.6,0.025,35.0,124.0,0.99162,3.15,0.65,12.8 +6.9,0.27,0.25,7.5,0.03,18.0,117.0,0.99116,3.09,0.38,13.0 +6.5,0.29,0.53,1.7,0.04,41.0,192.0,0.9922,3.26,0.59,10.4 +6.5,0.29,0.52,1.7,0.034,41.0,193.0,0.99223,3.25,0.59,10.4 +6.1,0.22,0.25,12.1,0.035,54.0,135.0,0.99481,3.21,0.4,10.7 +6.3,0.22,0.27,4.5,0.036,81.0,157.0,0.9928,3.05,0.76,10.7 +6.1,0.24,0.26,1.7,0.033,61.0,134.0,0.9903,3.19,0.81,11.9 +5.6,0.23,0.25,8.0,0.043,31.0,101.0,0.99429,3.19,0.42,10.4 +7.0,0.32,0.29,7.6,0.025,35.0,124.0,0.99162,3.15,0.65,12.8 +6.8,0.11,0.27,8.6,0.044,45.0,104.0,0.99454,3.2,0.37,9.9 +6.8,0.11,0.27,8.6,0.044,45.0,104.0,0.99454,3.2,0.37,9.9 +7.3,0.23,0.41,14.6,0.048,73.0,223.0,0.99863,3.16,0.71,9.4 +6.1,0.2,0.17,1.6,0.048,46.0,129.0,0.991,3.3,0.43,11.4 +6.8,0.11,0.27,8.6,0.044,45.0,104.0,0.99454,3.2,0.37,9.9 +7.3,0.23,0.41,14.6,0.048,73.0,223.0,0.99863,3.16,0.71,9.4 +6.9,0.2,0.41,1.1,0.06,36.0,104.0,0.99317,2.99,0.39,9.2 +6.7,0.19,0.32,3.7,0.041,26.0,76.0,0.99173,2.9,0.57,10.5 +6.7,0.28,0.34,8.9,0.048,32.0,111.0,0.99455,3.25,0.54,11.0 +6.7,0.28,0.34,8.9,0.048,32.0,111.0,0.99455,3.25,0.54,11.0 +8.0,0.37,0.31,4.7,0.038,3.0,127.0,0.99186,2.9,0.72,12.1 +6.7,0.28,0.34,8.9,0.048,32.0,111.0,0.99455,3.25,0.54,11.0 +6.0,0.26,0.29,3.1,0.041,37.0,144.0,0.98944,3.22,0.39,12.8 +6.4,0.24,0.49,5.8,0.053,25.0,120.0,0.9942,3.01,0.98,10.5 +6.4,0.24,0.49,5.8,0.053,25.0,120.0,0.9942,3.01,0.98,10.5 +6.4,0.24,0.49,5.8,0.053,25.0,120.0,0.9942,3.01,0.98,10.5 +6.4,0.25,0.57,1.0,0.062,21.0,122.0,0.99238,3.0,0.4,9.5 +6.1,0.25,0.48,15.8,0.052,25.0,94.0,0.99782,3.07,0.45,9.2 +6.8,0.14,0.35,1.5,0.047,40.0,117.0,0.99111,3.07,0.72,11.1 +6.5,0.38,0.26,5.2,0.042,33.0,112.0,0.99067,3.06,0.5,12.3 +6.8,0.14,0.35,1.5,0.047,40.0,117.0,0.99111,3.07,0.72,11.1 +5.4,0.15,0.32,2.5,0.037,10.0,51.0,0.98878,3.04,0.58,12.6 +6.4,0.25,0.57,1.0,0.062,21.0,122.0,0.99238,3.0,0.4,9.5 +6.1,0.25,0.48,15.8,0.052,25.0,94.0,0.99782,3.07,0.45,9.2 +6.8,0.22,0.32,5.9,0.054,40.0,152.0,0.9938,3.2,0.57,10.8 +7.2,0.21,0.29,3.1,0.044,39.0,122.0,0.99143,3.0,0.6,11.3 +6.0,0.26,0.29,3.1,0.041,37.0,144.0,0.98944,3.22,0.39,12.8 +6.4,0.24,0.49,5.8,0.053,25.0,120.0,0.9942,3.01,0.98,10.5 +6.5,0.46,0.24,11.5,0.051,56.0,171.0,0.99588,3.08,0.56,9.8 +6.5,0.18,0.48,18.0,0.054,56.0,183.0,1.00038,2.98,0.61,8.5 +6.2,0.32,0.12,4.8,0.054,6.0,97.0,0.99424,3.16,0.5,9.3 +7.2,0.4,0.24,8.5,0.055,45.0,151.0,0.99626,3.2,0.52,9.2 +5.9,0.23,0.24,1.6,0.037,32.0,115.0,0.99076,3.21,0.51,11.4 +6.4,0.18,0.48,18.0,0.054,56.0,183.0,1.00038,2.98,0.61,8.5 +6.2,0.32,0.12,4.8,0.054,6.0,97.0,0.99424,3.16,0.5,9.3 +6.4,0.37,0.12,5.9,0.056,6.0,91.0,0.99536,3.06,0.46,8.4 +7.0,0.23,0.42,1.1,0.062,35.0,100.0,0.99318,3.04,0.4,9.2 +7.2,0.4,0.24,8.5,0.055,45.0,151.0,0.99626,3.2,0.52,9.2 +7.6,0.19,0.37,13.1,0.033,52.0,151.0,0.99726,3.18,0.79,10.4 +6.0,0.28,0.27,4.1,0.046,50.0,147.0,0.99126,3.27,0.56,11.6 +6.2,0.32,0.45,2.9,0.029,37.0,94.0,0.98998,3.25,0.6,12.4 +7.6,0.19,0.37,13.1,0.033,52.0,151.0,0.99726,3.18,0.79,10.4 +6.4,0.26,0.26,1.1,0.052,22.0,176.0,0.99304,3.09,0.54,9.3 +5.9,0.25,0.27,1.5,0.029,37.0,81.0,0.9892,3.2,0.46,12.2 +6.1,0.28,0.3,7.75,0.031,33.0,139.0,0.99296,3.22,0.46,11.0 +6.9,0.19,0.38,1.15,0.023,30.0,105.0,0.99047,3.11,0.38,11.4 +6.4,0.29,0.57,1.0,0.06,15.0,120.0,0.9924,3.06,0.41,9.5 +6.8,0.27,0.22,17.8,0.034,16.0,116.0,0.9989,3.07,0.53,9.2 +7.5,0.26,0.38,5.7,0.021,23.0,125.0,0.99338,3.13,0.62,11.1 +6.8,0.27,0.22,17.8,0.034,16.0,116.0,0.9989,3.07,0.53,9.2 +6.4,0.2,0.22,7.4,0.032,53.0,172.0,0.99404,3.24,0.58,11.0 +7.3,0.33,0.22,1.4,0.041,40.0,177.0,0.99287,3.14,0.48,9.9 +7.3,0.34,0.22,1.4,0.044,43.0,176.0,0.99286,3.14,0.46,9.9 +6.4,0.29,0.57,1.0,0.06,15.0,120.0,0.9924,3.06,0.41,9.5 +6.1,1.1,0.16,4.4,0.033,8.0,109.0,0.99058,3.35,0.47,12.4 +6.3,0.24,0.29,1.6,0.052,48.0,185.0,0.9934,3.21,0.5,9.4 +6.2,0.24,0.22,7.9,0.053,45.0,149.0,0.99545,3.23,0.52,9.3 +7.4,0.16,0.27,15.5,0.05,25.0,135.0,0.9984,2.9,0.43,8.7 +7.4,0.16,0.27,15.5,0.05,25.0,135.0,0.9984,2.9,0.43,8.7 +7.4,0.16,0.27,15.5,0.05,25.0,135.0,0.9984,2.9,0.43,8.7 +7.4,0.16,0.27,15.5,0.05,25.0,135.0,0.9984,2.9,0.43,8.7 +7.2,0.17,0.28,17.55,0.05,33.0,154.0,0.99971,2.94,0.43,9.0 +6.9,0.19,0.35,13.5,0.038,49.0,118.0,0.99546,3.0,0.63,10.7 +6.9,0.19,0.35,13.5,0.038,49.0,118.0,0.99546,3.0,0.63,10.7 +6.8,0.16,0.36,1.3,0.034,32.0,98.0,0.99058,3.02,0.58,11.3 +7.4,0.16,0.27,15.5,0.05,25.0,135.0,0.9984,2.9,0.43,8.7 +6.8,0.3,0.27,11.6,0.028,22.0,97.0,0.99314,2.96,0.38,11.7 +6.2,0.24,0.22,7.9,0.053,45.0,149.0,0.99545,3.23,0.52,9.3 +7.4,0.16,0.27,15.5,0.05,25.0,135.0,0.9984,2.9,0.43,8.7 +7.2,0.17,0.28,17.55,0.05,33.0,154.0,0.99971,2.94,0.43,9.0 +6.8,0.3,0.27,11.6,0.028,22.0,97.0,0.99314,2.96,0.38,11.7 +6.5,0.43,0.18,13.15,0.032,25.0,131.0,0.99565,3.23,0.51,10.7 +6.6,0.17,0.36,1.9,0.036,38.0,110.0,0.99056,3.05,0.54,11.4 +6.9,0.19,0.35,13.5,0.038,49.0,118.0,0.99546,3.0,0.63,10.7 +6.8,0.16,0.36,1.3,0.034,32.0,98.0,0.99058,3.02,0.58,11.3 +6.4,0.41,0.01,6.1,0.048,20.0,70.0,0.99362,3.19,0.42,10.0 +6.4,0.41,0.01,6.1,0.048,20.0,70.0,0.99362,3.19,0.42,10.0 +7.4,0.36,0.32,1.9,0.036,27.0,119.0,0.99196,3.15,0.49,11.2 +6.1,0.17,0.21,1.9,0.09,44.0,130.0,0.99255,3.07,0.41,9.7 +5.5,0.28,0.21,1.6,0.032,23.0,85.0,0.99027,3.42,0.42,12.5 +6.6,0.5,0.26,11.3,0.029,32.0,110.0,0.99302,3.27,0.78,12.9 +7.1,0.44,0.27,8.4,0.057,60.0,160.0,0.99257,3.16,0.36,11.8 +6.9,0.38,0.28,8.3,0.062,22.0,166.0,0.99506,3.16,0.72,10.6 +7.1,0.44,0.27,8.4,0.057,60.0,160.0,0.99257,3.16,0.36,11.8 +6.2,0.24,0.28,12.2,0.049,54.0,133.0,0.9952,3.19,0.37,10.7 +6.1,0.28,0.27,8.0,0.048,41.0,162.0,0.99498,3.21,0.51,9.9 +7.6,0.26,0.32,1.3,0.048,23.0,76.0,0.9903,2.96,0.46,12.0 +7.5,0.16,0.38,12.7,0.043,70.5,163.0,0.99706,3.15,0.82,10.4 +6.5,0.36,0.16,1.3,0.054,11.0,107.0,0.99398,3.19,0.39,8.5 +6.6,0.35,0.19,10.5,0.06,15.0,82.0,0.99588,3.13,0.38,9.9 +5.7,0.25,0.26,12.5,0.049,52.5,120.0,0.99691,3.08,0.45,9.4 +7.4,0.37,0.26,9.6,0.05,33.0,134.0,0.99608,3.13,0.46,10.4 +5.7,0.25,0.21,1.5,0.044,21.0,108.0,0.99142,3.3,0.59,11.0 +5.8,0.23,0.21,1.5,0.044,21.0,110.0,0.99138,3.3,0.57,11.0 +5.4,0.265,0.28,7.8,0.052,27.0,91.0,0.99432,3.19,0.38,10.4 +5.7,0.25,0.27,10.8,0.05,58.0,116.0,0.99592,3.1,0.5,9.8 +5.7,0.25,0.26,12.5,0.049,52.5,106.0,0.99691,3.08,0.45,9.4 +5.9,0.23,0.28,8.6,0.046,37.0,142.0,0.99432,3.23,0.53,10.6 +6.2,0.3,0.32,1.2,0.052,32.0,185.0,0.99266,3.28,0.44,10.1 +6.5,0.33,0.24,14.5,0.048,20.0,96.0,0.99456,3.06,0.3,11.5 +7.4,0.26,0.29,3.7,0.048,14.0,73.0,0.9915,3.06,0.45,11.4 +7.0,0.2,0.4,1.1,0.058,30.0,93.0,0.99322,3.03,0.38,9.2 +6.5,0.21,0.42,1.1,0.059,33.0,101.0,0.9927,3.12,0.38,9.7 +7.3,0.25,0.27,3.8,0.047,16.0,79.0,0.99173,3.07,0.46,11.3 +6.8,0.27,0.24,4.6,0.098,36.0,127.0,0.99412,3.15,0.49,9.6 +6.7,0.24,0.3,10.2,0.07,44.0,179.0,0.99666,2.86,0.46,8.9 +6.4,0.14,0.28,7.9,0.057,21.0,82.0,0.99425,3.26,0.36,10.0 +6.4,0.5,0.2,2.4,0.059,19.0,112.0,0.99314,3.18,0.4,9.2 +6.6,0.41,0.27,10.7,0.11,20.0,103.0,0.99672,3.08,0.41,9.0 +6.4,0.25,0.28,4.9,0.03,29.0,98.0,0.99024,3.09,0.58,12.8 +6.6,0.41,0.27,10.7,0.11,20.0,103.0,0.99672,3.08,0.41,9.0 +8.0,0.25,0.35,1.1,0.054,13.0,136.0,0.99366,3.08,0.55,9.5 +6.4,0.14,0.28,7.9,0.057,21.0,82.0,0.99425,3.26,0.36,10.0 +6.6,0.21,0.34,5.6,0.046,30.0,140.0,0.99299,3.22,0.38,11.0 +6.4,0.5,0.2,2.4,0.059,19.0,112.0,0.99314,3.18,0.4,9.2 +6.3,0.29,0.23,14.2,0.037,24.0,99.0,0.99528,3.08,0.38,10.6 +6.9,0.37,0.23,9.5,0.057,54.0,166.0,0.99568,3.23,0.42,10.0 +6.9,0.37,0.23,9.5,0.057,54.0,166.0,0.99568,3.23,0.42,10.0 +5.7,0.31,0.28,4.1,0.03,22.0,86.0,0.99062,3.31,0.38,11.7 +6.9,0.45,0.27,4.7,0.035,17.0,80.0,0.99058,3.12,0.36,12.5 +6.9,0.3,0.45,1.4,0.039,36.0,122.0,0.99059,3.07,0.47,11.1 +5.3,0.23,0.56,0.9,0.041,46.0,141.0,0.99119,3.16,0.62,9.7 +6.8,0.3,0.26,20.3,0.037,45.0,150.0,0.99727,3.04,0.38,12.3 +6.7,0.28,0.42,3.5,0.035,43.0,105.0,0.99021,3.18,0.38,12.2 +5.0,0.255,0.22,2.7,0.043,46.0,153.0,0.99238,3.75,0.76,11.3 +7.6,0.4,0.27,1.2,0.053,23.0,193.0,0.99164,3.22,0.38,11.6 +5.5,0.21,0.25,1.2,0.04,18.0,75.0,0.99006,3.31,0.56,11.3 +6.0,0.2,0.25,2.0,0.041,30.0,95.0,0.99078,3.27,0.56,11.1 +6.1,0.17,0.29,1.1,0.041,32.0,92.0,0.99036,3.26,0.57,11.2 +7.5,0.21,0.29,1.5,0.046,35.0,107.0,0.99123,3.15,0.45,11.3 +7.3,0.26,0.32,1.2,0.041,29.0,94.0,0.98978,3.07,0.45,12.0 +6.2,0.35,0.2,18.1,0.069,33.0,158.0,0.99908,3.15,0.5,8.8 +6.2,0.35,0.2,18.1,0.069,33.0,158.0,0.99908,3.15,0.5,8.8 +6.5,0.43,0.31,3.6,0.046,19.0,143.0,0.99022,3.15,0.34,12.0 +6.5,0.4,0.31,3.5,0.046,22.0,147.0,0.99024,3.15,0.31,12.0 +7.4,0.28,0.5,12.1,0.049,48.0,122.0,0.9973,3.01,0.44,9.0 +6.3,0.23,0.22,17.45,0.054,42.0,151.0,0.99853,3.12,0.6,9.3 +6.2,0.34,0.25,12.1,0.059,33.0,171.0,0.99769,3.14,0.56,8.7 +6.6,0.44,0.32,3.0,0.095,13.0,75.0,0.98954,3.1,0.63,12.8 +6.0,0.13,0.36,1.6,0.052,23.0,72.0,0.98974,3.1,0.5,11.5 +6.3,0.17,0.23,5.7,0.048,44.0,147.0,0.99382,3.08,0.54,10.0 +6.3,0.18,0.22,5.6,0.047,45.0,147.0,0.99383,3.09,0.54,10.0 +6.7,0.31,0.34,6.8,0.059,51.0,215.0,0.99538,3.33,0.56,10.3 +6.6,0.33,0.32,15.6,0.054,62.0,227.0,0.99734,3.25,0.56,10.4 +6.3,0.34,0.31,6.0,0.02,18.0,68.0,0.98981,3.22,0.29,13.4 +6.8,0.29,0.32,1.8,0.032,18.0,130.0,0.99095,3.05,0.62,11.2 +7.4,0.31,0.26,8.6,0.048,47.0,206.0,0.9964,3.26,0.36,9.1 +7.4,0.31,0.26,8.6,0.048,47.0,206.0,0.9964,3.26,0.36,9.1 +5.7,0.25,0.27,11.5,0.04,24.0,120.0,0.99411,3.33,0.31,10.8 +6.8,0.27,0.28,7.8,0.038,26.0,89.0,0.9915,3.24,0.34,12.5 +5.9,0.26,0.24,2.4,0.046,27.0,132.0,0.99234,3.63,0.73,11.3 +5.9,0.65,0.23,5.0,0.035,20.0,128.0,0.99016,3.46,0.48,12.8 +7.4,0.31,0.26,8.6,0.048,47.0,206.0,0.9964,3.26,0.36,9.1 +6.6,0.23,0.32,1.5,0.041,8.0,72.0,0.98949,3.22,0.39,12.7 +6.8,0.18,0.35,5.4,0.054,53.0,143.0,0.99287,3.1,0.54,11.0 +6.8,0.28,0.29,11.9,0.052,51.0,149.0,0.99544,3.02,0.58,10.4 +6.8,0.28,0.29,11.9,0.052,51.0,149.0,0.99544,3.02,0.58,10.4 +5.9,0.27,0.27,9.0,0.051,43.0,136.0,0.9941,3.25,0.53,10.7 +6.1,0.25,0.28,10.0,0.055,56.0,131.0,0.994,3.22,0.35,10.9 +6.8,0.28,0.29,11.9,0.052,51.0,149.0,0.99544,3.02,0.58,10.4 +6.8,0.26,0.29,11.9,0.052,54.0,160.0,0.99546,3.03,0.58,10.4 +7.1,0.13,0.29,15.5,0.064,56.0,115.5,0.99737,3.16,0.41,9.7 +6.8,0.18,0.35,5.4,0.054,53.0,143.0,0.99287,3.1,0.54,11.0 +6.2,0.2,0.25,15.0,0.055,8.0,120.0,0.99767,3.19,0.53,9.6 +5.8,0.24,0.28,1.4,0.038,40.0,76.0,0.98711,3.1,0.29,13.9 +7.6,0.48,0.31,9.4,0.046,6.0,194.0,0.99714,3.07,0.61,9.4 +7.4,0.26,0.32,3.7,0.032,29.0,193.0,0.99134,3.1,0.67,12.5 +6.2,0.2,0.25,15.0,0.055,8.0,120.0,0.99767,3.19,0.53,9.6 +6.1,0.3,0.47,1.4,0.049,50.0,187.0,0.9927,3.19,0.45,9.5 +6.2,0.32,0.5,6.5,0.048,61.0,186.0,0.9948,3.19,0.45,9.6 +6.1,0.3,0.47,1.4,0.049,50.0,187.0,0.9927,3.19,0.45,9.5 +6.3,0.34,0.52,6.3,0.047,63.0,186.0,0.99481,3.18,0.44,9.6 +7.4,0.16,0.3,13.7,0.056,33.0,168.0,0.99825,2.9,0.44,8.7 +7.4,0.16,0.3,13.7,0.056,33.0,168.0,0.99825,2.9,0.44,8.7 +7.4,0.16,0.3,13.7,0.056,33.0,168.0,0.99825,2.9,0.44,8.7 +7.4,0.16,0.3,13.7,0.056,33.0,168.0,0.99825,2.9,0.44,8.7 +7.4,0.16,0.3,13.7,0.056,33.0,168.0,0.99825,2.9,0.44,8.7 +7.4,0.16,0.3,13.7,0.056,33.0,168.0,0.99825,2.9,0.44,8.7 +7.2,0.26,0.38,1.5,0.061,12.0,120.0,0.99192,3.18,0.46,10.4 +7.0,0.31,0.35,1.6,0.063,13.0,119.0,0.99184,3.22,0.5,10.7 +6.6,0.22,0.35,1.4,0.05,23.0,83.0,0.99019,3.17,0.48,12.0 +5.8,0.23,0.31,3.5,0.044,35.0,158.0,0.98998,3.19,0.37,12.1 +6.3,0.17,0.32,1.0,0.04,39.0,118.0,0.98886,3.31,0.4,13.1 +6.0,0.19,0.26,1.4,0.039,30.0,104.0,0.98998,3.32,0.41,12.4 +6.7,0.21,0.34,1.5,0.035,45.0,123.0,0.98949,3.24,0.36,12.6 +7.4,0.16,0.3,13.7,0.056,33.0,168.0,0.99825,2.9,0.44,8.7 +6.6,0.22,0.37,1.6,0.04,31.0,101.0,0.99009,3.15,0.66,12.0 +6.8,0.34,0.27,5.2,0.06,14.0,169.0,0.99252,3.27,0.57,11.6 +7.1,0.34,0.86,1.4,0.174,36.0,99.0,0.99288,2.92,0.5,9.3 +6.3,0.24,0.22,11.9,0.05,65.0,179.0,0.99659,3.06,0.58,9.3 +6.9,0.35,0.39,2.4,0.048,25.0,157.0,0.99133,3.2,0.54,11.1 +6.8,0.24,0.33,3.2,0.049,68.0,161.0,0.99324,3.1,0.69,10.2 +6.4,0.25,0.33,1.7,0.037,35.0,113.0,0.99164,3.23,0.66,10.6 +5.8,0.19,0.33,4.2,0.038,49.0,133.0,0.99107,3.16,0.42,11.3 +6.9,0.24,0.4,15.4,0.052,81.0,198.0,0.9986,3.2,0.69,9.4 +6.5,0.31,0.61,13.0,0.053,31.0,123.0,0.99708,3.09,0.5,9.3 +6.6,0.25,0.32,5.6,0.039,15.0,68.0,0.99163,2.96,0.52,11.1 +7.5,0.38,0.56,9.7,0.055,15.0,170.0,0.99605,3.13,0.65,9.9 +6.2,0.3,0.3,2.5,0.041,29.0,82.0,0.99065,3.31,0.61,11.8 +6.4,0.33,0.28,4.0,0.04,24.0,81.0,0.9903,3.26,0.64,12.6 +6.9,0.24,0.4,15.4,0.052,81.0,198.0,0.9986,3.2,0.69,9.4 +7.6,0.27,0.32,1.2,0.043,23.0,72.0,0.99236,3.06,0.68,10.5 +5.9,0.24,0.34,2.0,0.037,40.0,108.0,0.98948,3.19,0.5,12.3 +5.3,0.33,0.3,1.2,0.048,25.0,119.0,0.99045,3.32,0.62,11.3 +6.4,0.21,0.21,5.1,0.097,21.0,105.0,0.9939,3.07,0.46,9.6 +7.0,0.22,0.3,1.4,0.04,14.0,63.0,0.98985,3.2,0.33,12.0 +7.8,0.27,0.35,1.2,0.05,36.0,140.0,0.99138,3.09,0.45,11.2 +6.7,0.2,0.24,6.5,0.044,28.0,100.0,0.99348,3.12,0.33,10.2 +8.1,0.27,0.33,1.3,0.045,26.0,100.0,0.99066,2.98,0.44,12.4 +6.7,0.2,0.24,6.5,0.044,28.0,100.0,0.99348,3.12,0.33,10.2 +7.1,0.45,0.24,2.7,0.04,24.0,87.0,0.98862,2.94,0.38,13.4 +5.8,0.22,0.29,1.3,0.036,25.0,68.0,0.98865,3.24,0.35,12.6 +6.3,0.3,0.48,7.4,0.053,34.0,149.0,0.99472,3.18,0.53,9.8 +7.9,0.36,0.53,12.9,0.049,63.0,139.0,0.99792,2.94,0.45,9.1 +8.1,0.27,0.33,1.3,0.045,26.0,100.0,0.99066,2.98,0.44,12.4 +8.0,0.24,0.33,1.2,0.044,28.0,101.0,0.99035,3.03,0.43,12.5 +6.7,0.41,0.27,2.6,0.033,25.0,85.0,0.99086,3.05,0.34,11.7 +6.7,0.24,0.31,2.3,0.044,37.0,113.0,0.99013,3.29,0.46,12.9 +6.2,0.3,0.32,1.3,0.054,27.0,183.0,0.99266,3.3,0.43,10.1 +6.9,0.26,0.38,10.5,0.044,33.0,139.0,0.99517,3.06,0.5,10.3 +6.7,0.41,0.27,2.6,0.033,25.0,85.0,0.99086,3.05,0.34,11.7 +5.9,0.32,0.2,14.4,0.05,29.0,144.0,0.99666,3.24,0.41,10.3 +6.1,0.25,0.3,1.2,0.036,42.0,107.0,0.991,3.34,0.56,10.8 +5.6,0.23,0.29,3.1,0.023,19.0,89.0,0.99068,3.25,0.51,11.2 +6.6,0.23,0.32,1.7,0.024,26.0,102.0,0.99084,3.29,0.6,11.8 +6.0,0.17,0.21,6.0,0.05,26.0,134.0,0.9939,3.08,0.54,9.8 +7.1,0.38,0.42,11.8,0.041,32.0,193.0,0.99624,3.04,0.49,10.0 +6.6,0.31,0.37,6.2,0.052,13.0,164.0,0.99602,3.24,0.39,8.8 +6.5,0.38,0.53,1.4,0.142,5.0,69.0,0.9926,3.14,0.52,10.1 +7.0,0.44,0.24,12.1,0.056,68.0,210.0,0.99718,3.05,0.5,9.5 +7.0,0.44,0.24,12.1,0.056,68.0,210.0,0.99718,3.05,0.5,9.5 +7.0,0.44,0.24,12.1,0.056,68.0,210.0,0.99718,3.05,0.5,9.5 +6.1,0.38,0.14,3.9,0.06,27.0,113.0,0.99344,3.07,0.34,9.2 +8.0,0.33,0.32,4.6,0.041,31.0,180.0,0.99184,2.92,0.74,12.2 +7.0,0.44,0.24,12.1,0.056,68.0,210.0,0.99718,3.05,0.5,9.5 +6.0,0.19,0.29,1.2,0.046,29.0,92.0,0.99033,3.22,0.53,11.3 +6.3,0.28,0.34,8.1,0.038,44.0,129.0,0.99248,3.26,0.29,12.1 +6.1,0.38,0.14,3.9,0.06,27.0,113.0,0.99344,3.07,0.34,9.2 +5.3,0.43,0.11,1.1,0.029,6.0,51.0,0.99076,3.51,0.48,11.2 +5.4,0.22,0.35,6.5,0.029,26.0,87.0,0.99092,3.29,0.44,12.5 +6.2,0.345,0.27,10.1,0.056,38.0,187.0,0.99486,3.31,0.56,10.6 +5.6,0.255,0.57,10.7,0.056,66.0,171.0,0.99464,3.25,0.61,10.4 +5.2,0.2,0.27,3.2,0.047,16.0,93.0,0.99235,3.44,0.53,10.1 +6.2,0.29,0.23,12.4,0.048,33.0,201.0,0.99612,3.11,0.56,9.9 +6.3,0.26,0.25,5.2,0.046,11.0,133.0,0.99202,2.97,0.68,11.0 +6.0,0.22,0.23,5.0,0.045,10.0,122.0,0.99261,2.94,0.63,10.0 +7.5,0.35,0.37,2.5,0.066,29.0,89.0,0.98964,3.14,0.42,12.7 +6.6,0.39,0.28,9.2,0.036,10.0,92.0,0.99206,3.07,0.35,12.1 +6.3,0.23,0.33,6.9,0.052,23.0,118.0,0.9938,3.23,0.46,10.4 +6.3,0.22,0.3,2.0,0.05,23.0,120.0,0.99204,3.24,0.47,10.4 +6.4,0.29,0.18,15.0,0.04,21.0,116.0,0.99736,3.14,0.5,9.2 +6.4,0.29,0.18,15.0,0.04,21.0,116.0,0.99736,3.14,0.5,9.2 +7.5,0.23,0.3,1.2,0.03,27.0,80.0,0.99192,3.05,0.68,10.5 +6.4,0.29,0.18,15.0,0.04,21.0,116.0,0.99736,3.14,0.5,9.2 +5.7,0.28,0.36,1.8,0.041,38.0,90.0,0.99002,3.27,0.98,11.9 +6.5,0.26,0.24,10.8,0.042,47.0,130.0,0.996,3.08,0.4,10.1 +6.4,0.27,0.29,3.9,0.034,62.0,140.0,0.99237,3.1,0.59,11.1 +5.9,0.22,0.29,4.2,0.037,69.0,144.0,0.99214,3.13,0.74,10.8 +6.8,0.26,0.26,2.0,0.019,23.5,72.0,0.99041,3.16,0.47,11.8 +7.6,0.36,0.48,13.5,0.038,44.0,116.0,0.9982,3.04,0.48,9.2 +7.6,0.35,0.47,13.3,0.037,42.0,116.0,0.99822,3.04,0.5,9.2 +5.7,0.18,0.26,2.2,0.023,21.0,95.0,0.9893,3.07,0.54,12.3 +6.6,0.36,0.47,1.4,0.145,26.0,124.0,0.99274,3.09,0.56,10.1 +5.9,0.14,0.2,1.6,0.04,26.0,114.0,0.99105,3.25,0.45,11.4 +5.5,0.23,0.19,2.2,0.044,39.0,161.0,0.99209,3.19,0.43,10.4 +6.7,0.11,0.26,14.8,0.053,44.0,95.0,0.99676,3.2,0.35,9.8 +7.0,0.24,0.24,1.8,0.047,29.0,91.0,0.99251,3.3,0.43,9.9 +6.7,0.11,0.26,14.8,0.053,44.0,95.0,0.99676,3.2,0.35,9.8 +5.3,0.47,0.1,1.3,0.036,11.0,74.0,0.99082,3.48,0.54,11.2 +7.5,0.29,0.24,9.9,0.058,25.0,115.0,0.99567,3.15,0.46,10.9 +6.0,0.33,0.26,5.1,0.051,16.0,119.0,0.99416,3.15,0.41,9.2 +6.0,0.33,0.26,5.1,0.051,16.0,119.0,0.99416,3.15,0.41,9.2 +5.8,0.32,0.23,1.5,0.033,39.0,121.0,0.9887,2.96,0.35,12.0 +5.8,0.3,0.23,1.5,0.034,37.0,121.0,0.98871,2.96,0.34,12.1 +3.8,0.31,0.02,11.1,0.036,20.0,114.0,0.99248,3.75,0.44,12.4 +6.2,0.36,0.22,5.25,0.038,44.0,145.0,0.99184,3.22,0.4,11.2 +6.0,0.31,0.27,2.3,0.042,19.0,120.0,0.98952,3.32,0.41,12.7 +6.9,0.52,0.54,7.9,0.036,23.0,169.0,0.99267,3.26,0.47,12.2 +7.0,0.55,0.05,8.0,0.036,19.0,164.0,0.99269,3.26,0.46,12.2 +5.8,0.2,0.16,1.4,0.042,44.0,99.0,0.98912,3.23,0.37,12.2 +6.2,0.36,0.22,5.25,0.038,44.0,145.0,0.99184,3.22,0.4,11.2 +6.0,0.31,0.27,2.3,0.042,19.0,120.0,0.98952,3.32,0.41,12.7 +6.0,0.29,0.27,2.3,0.044,20.0,117.0,0.9895,3.31,0.41,12.7 +5.7,0.22,0.29,3.5,0.04,27.0,146.0,0.98999,3.17,0.36,12.1 +7.1,0.46,0.23,13.7,0.045,44.0,192.0,0.9981,3.11,0.53,9.4 +6.6,0.21,0.3,9.9,0.041,64.0,174.0,0.995,3.07,0.5,10.1 +6.9,0.42,0.2,15.4,0.043,57.0,201.0,0.99848,3.08,0.54,9.4 +5.7,0.22,0.2,16.0,0.044,41.0,113.0,0.99862,3.22,0.46,8.9 +5.7,0.22,0.2,16.0,0.044,41.0,113.0,0.99862,3.22,0.46,8.9 +5.7,0.22,0.2,16.0,0.044,41.0,113.0,0.99862,3.22,0.46,8.9 +5.7,0.22,0.2,16.0,0.044,41.0,113.0,0.99862,3.22,0.46,8.9 +5.2,0.31,0.2,2.4,0.027,27.0,117.0,0.98886,3.56,0.45,13.0 +7.2,0.22,0.35,5.5,0.054,37.0,183.0,0.99474,3.08,0.5,10.3 +5.6,0.18,0.29,2.3,0.04,5.0,47.0,0.99126,3.07,0.45,10.1 +6.2,0.24,0.27,16.8,0.04,48.0,129.0,0.99691,3.23,0.38,10.5 +5.7,0.22,0.2,16.0,0.044,41.0,113.0,0.99862,3.22,0.46,8.9 +5.7,0.26,0.24,17.8,0.059,23.0,124.0,0.99773,3.3,0.5,10.1 +5.7,0.26,0.24,17.8,0.059,23.0,124.0,0.99773,3.3,0.5,10.1 +6.0,0.2,0.26,6.8,0.049,22.0,93.0,0.9928,3.15,0.42,11.0 +6.0,0.2,0.26,6.8,0.049,22.0,93.0,0.9928,3.15,0.42,11.0 +6.0,0.2,0.26,6.8,0.049,22.0,93.0,0.9928,3.15,0.42,11.0 +6.0,0.2,0.26,6.8,0.049,22.0,93.0,0.9928,3.15,0.42,11.0 +7.6,0.28,0.17,1.6,0.046,28.0,117.0,0.99288,3.08,0.43,10.0 +7.0,0.2,0.33,4.7,0.03,25.0,76.0,0.99202,2.88,0.54,10.5 +6.6,0.26,0.27,11.8,0.048,28.0,112.0,0.99606,2.87,0.49,9.7 +5.7,0.26,0.24,17.8,0.059,23.0,124.0,0.99773,3.3,0.5,10.1 +7.2,0.21,0.36,15.7,0.045,68.0,183.0,0.99922,3.25,0.76,9.4 +6.9,0.22,0.32,5.8,0.041,20.0,119.0,0.99296,3.17,0.55,11.2 +7.2,0.21,0.36,15.7,0.045,68.0,183.0,0.99922,3.25,0.76,9.4 +7.4,0.22,0.28,9.0,0.046,22.0,121.0,0.99468,3.1,0.55,10.8 +7.2,0.21,0.36,15.7,0.045,68.0,183.0,0.99922,3.25,0.76,9.4 +6.9,0.22,0.32,5.8,0.041,20.0,119.0,0.99296,3.17,0.55,11.2 +7.0,0.2,0.35,8.8,0.037,31.0,103.0,0.99388,3.13,0.49,11.0 +5.6,0.26,0.0,10.2,0.038,13.0,111.0,0.99315,3.44,0.46,12.4 +6.3,0.28,0.3,6.6,0.208,60.0,154.0,0.99478,3.1,0.4,9.4 +6.4,0.29,0.3,6.5,0.209,62.0,156.0,0.99478,3.1,0.4,9.4 +7.2,0.34,0.23,8.9,0.105,22.0,155.0,0.99692,3.01,0.58,9.5 +7.1,0.39,0.39,11.1,0.034,25.0,204.0,0.99616,3.05,0.52,10.0 +6.9,0.26,0.29,4.2,0.043,33.0,114.0,0.9902,3.16,0.31,12.5 +6.1,0.24,0.25,1.6,0.044,24.0,115.0,0.9921,3.39,0.59,10.9 +5.9,0.25,0.24,7.4,0.044,21.0,113.0,0.99462,3.38,0.58,10.5 +6.1,0.24,0.27,11.5,0.05,51.0,133.0,0.99476,3.22,0.37,10.8 +6.5,0.22,0.27,1.6,0.039,36.0,116.0,0.99178,3.38,0.57,11.0 +6.2,0.26,0.29,2.0,0.036,16.0,87.0,0.99081,3.33,0.61,11.8 +6.6,0.34,0.25,4.8,0.038,16.0,121.0,0.99198,3.36,0.71,12.6 +5.6,0.225,0.24,9.8,0.054,59.0,140.0,0.99545,3.17,0.39,10.2 +7.1,0.23,0.28,1.9,0.046,33.0,103.0,0.98997,3.12,0.31,12.0 +6.9,0.26,0.29,4.2,0.043,33.0,114.0,0.9902,3.16,0.31,12.5 +6.4,0.27,0.3,1.6,0.04,19.0,86.0,0.99089,3.32,0.65,11.5 +6.3,0.41,0.22,7.3,0.035,23.0,117.0,0.99172,3.2,0.39,11.94 +6.7,0.41,0.24,5.4,0.035,33.0,115.0,0.9901,3.12,0.44,12.89333333 +7.2,0.585,0.2,10.4,0.086,17.0,94.0,0.99681,3.13,0.4,9.4 +6.7,0.34,0.26,1.9,0.038,58.0,138.0,0.9893,3.0,0.47,12.2 +6.3,0.41,0.22,7.3,0.035,23.0,117.0,0.99172,3.2,0.39,11.94 +6.7,0.41,0.24,5.4,0.035,33.0,115.0,0.9901,3.12,0.44,12.89333333 +6.4,0.26,0.35,7.7,0.056,45.0,191.0,0.99527,3.16,0.5,9.5 +6.3,0.28,0.22,11.5,0.036,27.0,150.0,0.99445,3.0,0.33,10.6 +7.4,0.16,0.33,1.2,0.042,47.0,121.0,0.99198,3.04,0.68,10.5 +8.4,0.27,0.3,2.2,0.037,36.0,129.0,0.99085,2.89,0.3,11.46666667 +5.9,0.2,0.28,1.0,0.043,45.0,100.0,0.99033,3.4,0.41,11.4 +6.4,0.24,0.26,8.2,0.054,47.0,182.0,0.99538,3.12,0.5,9.5 +7.4,0.38,0.34,8.3,0.052,44.0,168.0,0.99627,3.11,0.52,9.2 +6.4,0.24,0.26,8.2,0.054,47.0,182.0,0.99538,3.12,0.5,9.5 +6.4,0.42,0.19,9.3,0.043,28.0,145.0,0.99433,3.23,0.53,10.98 +6.4,0.23,0.26,8.1,0.054,47.0,181.0,0.9954,3.12,0.49,9.4 +6.4,0.24,0.26,8.2,0.054,47.0,182.0,0.99538,3.12,0.5,9.5 +7.4,0.38,0.34,8.3,0.052,44.0,168.0,0.99627,3.11,0.52,9.2 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +6.8,0.24,0.29,2.0,0.044,15.0,96.0,0.99232,3.23,0.64,10.4 +7.3,0.19,0.27,13.9,0.057,45.0,155.0,0.99807,2.94,0.41,8.8 +7.4,0.27,0.52,15.7,0.054,36.0,139.0,0.99788,3.04,0.62,10.03333333 +5.7,0.28,0.35,1.2,0.052,39.0,141.0,0.99108,3.44,0.69,11.3 +5.8,0.22,0.25,1.5,0.024,21.0,109.0,0.99234,3.37,0.58,10.4 +6.7,0.27,0.69,1.2,0.176,36.0,106.0,0.99288,2.96,0.43,9.2 +7.1,0.2,0.35,3.2,0.034,21.0,107.0,0.99195,3.11,0.54,11.1 +6.7,0.27,0.69,1.2,0.176,36.0,106.0,0.99288,2.96,0.43,9.2 +7.1,0.23,0.3,2.6,0.034,62.0,148.0,0.99121,3.03,0.56,11.3 +7.6,0.31,0.52,13.2,0.042,61.0,148.0,0.99839,2.98,0.47,9.1 +7.2,0.34,0.28,10.4,0.108,43.0,187.0,0.99738,2.96,0.57,9.4 +7.0,0.36,0.25,5.7,0.015,14.0,73.0,0.98963,2.82,0.59,13.2 +6.4,0.31,0.28,2.5,0.039,34.0,137.0,0.98946,3.22,0.38,12.7 +7.3,0.28,0.35,1.6,0.054,31.0,148.0,0.99178,3.18,0.47,10.7 +7.4,0.16,0.3,1.4,0.064,34.0,166.0,0.99136,3.11,0.42,11.43333333 +6.4,0.31,0.27,7.4,0.049,48.0,169.0,0.99323,3.27,0.45,11.1 +6.4,0.31,0.28,2.5,0.039,34.0,137.0,0.98946,3.22,0.38,12.7 +6.2,0.29,0.29,5.6,0.046,35.0,178.0,0.99313,3.25,0.51,10.53333333 +5.9,0.28,0.34,3.6,0.04,50.0,194.0,0.9912,3.31,0.52,11.6 +6.5,0.23,0.2,7.5,0.05,44.0,179.0,0.99504,3.18,0.48,9.533333333 +7.2,0.34,0.2,5.8,0.062,52.0,203.0,0.99461,3.17,0.44,9.8 +7.3,0.28,0.35,1.6,0.054,31.0,148.0,0.99178,3.18,0.47,10.7 +6.5,0.2,0.33,1.5,0.039,36.0,110.0,0.99008,3.22,0.65,12.0 +6.2,0.24,0.27,2.9,0.039,30.0,123.0,0.98959,3.12,0.37,12.8 +7.1,0.31,0.25,11.2,0.048,32.0,136.0,0.99663,3.14,0.4,9.5 +6.4,0.29,0.21,9.65,0.041,36.0,119.0,0.99334,2.99,0.34,10.93333333 +6.3,0.19,0.33,10.1,0.063,63.0,133.0,0.99561,2.86,0.41,9.1 +5.9,0.29,0.28,3.2,0.035,16.0,117.0,0.98959,3.26,0.42,12.6 +7.1,0.31,0.25,11.2,0.048,32.0,136.0,0.99663,3.14,0.4,9.5 +6.5,0.3,0.28,11.45,0.041,29.0,109.0,0.99418,2.98,0.3,10.9 +6.4,0.29,0.21,9.65,0.041,36.0,119.0,0.99334,2.99,0.34,10.93333333 +6.5,0.22,0.19,4.5,0.096,16.0,115.0,0.9937,3.02,0.44,9.6 +7.0,0.23,0.28,2.7,0.053,16.0,92.0,0.99372,3.18,0.56,9.3 +7.1,0.23,0.23,3.5,0.038,23.0,112.0,0.99157,3.05,0.37,11.36666667 +6.1,0.26,0.28,1.7,0.043,24.0,98.0,0.98918,3.14,0.44,12.5 +6.4,0.35,0.21,2.1,0.051,46.0,171.0,0.9932,3.16,0.5,9.5 +6.0,0.32,0.32,4.8,0.041,40.0,186.0,0.99235,3.22,0.54,11.0 +6.1,0.34,0.21,5.0,0.042,17.0,133.0,0.99373,3.02,0.53,9.4 +6.5,0.13,0.27,2.6,0.035,32.0,76.0,0.9914,3.21,0.76,11.33333333 +6.5,0.315,0.2,6.6,0.041,9.0,126.0,0.99494,2.94,0.51,8.8 +6.1,0.34,0.21,5.0,0.042,17.0,133.0,0.99373,3.02,0.53,9.4 +5.7,0.31,0.29,7.3,0.05,33.0,143.0,0.99332,3.31,0.5,11.06666667 +6.4,0.3,0.27,5.0,0.058,27.0,151.0,0.99198,3.22,0.49,12.2 +7.0,0.24,0.26,1.7,0.041,31.0,110.0,0.99142,3.2,0.53,11.0 +6.5,0.13,0.27,2.6,0.035,32.0,76.0,0.9914,3.21,0.76,11.33333333 +6.4,0.26,0.21,8.2,0.05,51.0,182.0,0.99542,3.23,0.48,9.5 +6.4,0.26,0.21,8.2,0.05,51.0,182.0,0.99542,3.23,0.48,9.5 +6.0,0.27,0.31,5.0,0.043,54.0,170.0,0.9924,3.28,0.52,11.0 +7.1,0.21,0.33,1.2,0.039,34.0,97.0,0.99112,3.11,0.75,11.2 +6.7,0.26,0.29,7.1,0.036,28.0,100.0,0.99534,3.08,0.36,9.3 +6.3,0.28,0.22,9.5,0.04,30.0,111.0,0.99338,3.05,0.31,10.8 +6.2,0.25,0.44,15.8,0.057,39.0,167.0,0.99804,3.14,0.51,9.2 +7.3,0.22,0.37,15.5,0.048,70.0,203.0,0.99899,3.25,0.77,9.4 +6.2,0.25,0.44,15.8,0.057,39.0,167.0,0.99804,3.14,0.51,9.2 +6.4,0.18,0.28,17.05,0.047,53.0,139.0,0.99724,3.25,0.35,10.5 +6.3,0.2,0.26,12.7,0.046,60.0,143.0,0.99526,3.26,0.35,10.8 +6.6,0.24,0.22,12.3,0.051,35.0,146.0,0.99676,3.1,0.67,9.4 +7.4,0.27,0.26,11.8,0.053,55.0,173.0,0.99699,3.11,0.6,9.8 +7.4,0.27,0.26,11.8,0.053,55.0,173.0,0.99699,3.11,0.6,9.8 +7.4,0.27,0.26,11.8,0.053,55.0,173.0,0.99699,3.11,0.6,9.8 +6.6,0.24,0.22,12.3,0.051,35.0,146.0,0.99676,3.1,0.67,9.4 +7.4,0.27,0.26,11.8,0.053,55.0,173.0,0.99699,3.11,0.6,9.8 +7.1,0.38,0.29,13.6,0.041,30.0,137.0,0.99461,3.02,0.96,12.1 +6.8,0.43,0.26,5.2,0.043,40.0,176.0,0.99116,3.17,0.41,12.3 +5.2,0.22,0.46,6.2,0.066,41.0,187.0,0.99362,3.19,0.42,9.733333333 +5.9,0.29,0.16,7.9,0.044,48.0,197.0,0.99512,3.21,0.36,9.4 +5.9,0.29,0.16,7.9,0.044,48.0,197.0,0.99512,3.21,0.36,9.4 +6.3,0.29,0.29,3.3,0.037,32.0,140.0,0.9895,3.17,0.36,12.8 +6.3,0.19,0.32,2.8,0.046,18.0,80.0,0.99043,2.92,0.47,11.05 +5.7,0.29,0.16,7.9,0.044,48.0,197.0,0.99512,3.21,0.36,9.4 +6.3,0.29,0.29,3.3,0.037,32.0,140.0,0.9895,3.17,0.36,12.8 +5.7,0.24,0.47,6.3,0.069,35.0,182.0,0.99391,3.11,0.46,9.733333333 +5.8,0.3,0.38,4.9,0.039,22.0,86.0,0.98963,3.23,0.58,13.1 +7.1,0.27,0.27,10.4,0.041,26.0,114.0,0.99335,3.04,0.52,11.5 +5.8,0.3,0.38,4.9,0.039,22.0,86.0,0.98963,3.23,0.58,13.1 +7.1,0.27,0.27,10.4,0.041,26.0,114.0,0.99335,3.04,0.52,11.5 +6.3,0.305,0.22,16.0,0.061,26.0,141.0,0.99824,3.08,0.5,9.1 +5.7,0.24,0.47,6.3,0.069,35.0,182.0,0.99391,3.11,0.46,9.75 +6.2,0.22,0.28,2.2,0.04,24.0,125.0,0.9917,3.19,0.48,10.5 +6.6,0.22,0.23,17.3,0.047,37.0,118.0,0.99906,3.08,0.46,8.8 +6.6,0.22,0.23,17.3,0.047,37.0,118.0,0.99906,3.08,0.46,8.8 +6.6,0.22,0.23,17.3,0.047,37.0,118.0,0.99906,3.08,0.46,8.8 +6.6,0.22,0.23,17.3,0.047,37.0,118.0,0.99906,3.08,0.46,8.8 +6.2,0.22,0.28,2.2,0.04,24.0,125.0,0.9917,3.19,0.48,10.5 +6.2,0.22,0.28,2.2,0.04,24.0,125.0,0.9917,3.19,0.48,10.5 +6.6,0.22,0.23,17.3,0.047,37.0,118.0,0.99906,3.08,0.46,8.8 +6.1,0.22,0.5,6.6,0.045,30.0,122.0,0.99415,3.22,0.49,9.9 +6.2,0.21,0.52,6.5,0.047,28.0,123.0,0.99418,3.22,0.49,9.9 +6.3,0.32,0.26,12.3,0.044,24.0,205.0,0.99611,3.11,0.58,9.9 +6.9,0.44,0.27,5.0,0.038,33.0,166.0,0.99124,3.2,0.42,12.2 +6.1,0.31,0.34,2.8,0.042,59.5,162.0,0.99179,3.27,0.47,10.8 +8.1,0.36,0.59,13.6,0.051,60.0,134.0,0.99886,2.96,0.39,8.7 +6.6,0.38,0.28,2.8,0.043,17.0,67.0,0.98924,3.21,0.47,13.2 +6.7,0.24,0.26,5.4,0.03,15.0,94.0,0.99045,3.15,0.38,12.7 +6.9,0.56,0.26,10.9,0.06,55.0,193.0,0.9969,3.21,0.44,9.4 +7.2,0.24,0.24,1.7,0.045,18.0,161.0,0.99196,3.25,0.53,11.2 +6.5,0.29,0.3,9.15,0.051,25.0,166.0,0.99339,3.24,0.56,11.33333333 +6.7,0.28,0.28,4.5,0.051,14.0,92.0,0.99224,3.36,0.58,11.9 +6.5,0.29,0.3,9.15,0.051,25.0,166.0,0.99339,3.24,0.56,11.35 +6.1,0.21,0.19,1.4,0.046,51.0,131.0,0.99184,3.22,0.39,10.5 +7.2,0.47,0.16,5.9,0.048,14.0,125.0,0.99428,3.09,0.49,9.8 +6.7,0.34,0.31,16.4,0.051,20.0,146.0,0.99834,3.06,0.54,9.1 +6.6,0.27,0.25,1.2,0.033,36.0,111.0,0.98918,3.16,0.37,12.4 +6.7,0.34,0.31,16.4,0.051,20.0,146.0,0.99834,3.06,0.54,9.1 +7.2,0.47,0.16,5.9,0.048,14.0,125.0,0.99428,3.09,0.49,9.8 +5.0,0.35,0.25,7.8,0.031,24.0,116.0,0.99241,3.39,0.4,11.3 +5.0,0.35,0.25,7.8,0.031,24.0,116.0,0.99241,3.39,0.4,11.3 +4.4,0.46,0.1,2.8,0.024,31.0,111.0,0.98816,3.48,0.34,13.1 +6.6,0.38,0.29,2.9,0.035,15.0,101.0,0.98916,3.04,0.37,12.5 +7.3,0.3,0.25,2.5,0.045,32.0,122.0,0.99329,3.18,0.54,10.3 +6.4,0.28,0.22,12.8,0.039,51.0,150.0,0.99535,3.23,0.43,10.7 +6.9,0.29,0.25,12.2,0.04,29.0,136.0,0.99552,3.05,0.65,10.4 +6.3,0.3,0.19,7.7,0.049,47.0,184.0,0.99514,3.22,0.48,9.5 +6.3,0.39,0.22,2.8,0.048,53.0,173.0,0.99304,3.24,0.45,9.8 +6.6,0.38,0.29,2.9,0.035,15.0,101.0,0.98916,3.04,0.37,12.5 +6.6,0.18,0.26,17.3,0.051,17.0,149.0,0.9984,3.0,0.43,9.4 +6.0,0.28,0.29,19.3,0.051,36.0,174.0,0.99911,3.14,0.5,9.0 +6.0,0.28,0.29,19.3,0.051,36.0,174.0,0.99911,3.14,0.5,9.0 +6.0,0.28,0.29,19.3,0.051,36.0,174.0,0.99911,3.14,0.5,9.0 +6.6,0.35,0.26,2.7,0.045,19.0,129.0,0.98952,3.24,0.48,13.0 +5.9,0.22,0.18,6.4,0.041,28.0,120.0,0.99403,3.27,0.5,9.9 +6.6,0.18,0.26,17.3,0.051,17.0,149.0,0.9984,3.0,0.43,9.4 +7.7,0.28,0.24,2.4,0.044,29.0,157.0,0.99312,3.27,0.56,10.6 +7.1,0.42,0.2,2.8,0.038,28.0,109.0,0.98968,3.23,0.47,13.4 +6.7,0.32,0.32,1.7,0.031,31.0,114.0,0.98946,3.12,0.35,12.5 +6.6,0.26,0.56,15.4,0.053,32.0,141.0,0.9981,3.11,0.49,9.3 +6.6,0.26,0.56,15.4,0.053,32.0,141.0,0.9981,3.11,0.49,9.3 +6.2,0.32,0.24,4.1,0.051,34.0,149.0,0.99306,3.36,0.52,11.0 +6.3,0.25,0.27,6.6,0.054,40.0,158.0,0.99378,3.2,0.48,10.3 +6.2,0.21,0.24,1.2,0.051,31.0,95.0,0.99036,3.24,0.57,11.3 +6.4,0.23,0.27,2.1,0.042,35.0,100.0,0.99094,3.03,0.63,10.9 +4.7,0.145,0.29,1.0,0.042,35.0,90.0,0.9908,3.76,0.49,11.3 +6.2,0.2,0.28,1.1,0.039,24.0,78.0,0.9899,3.36,0.47,12.1 +7.0,0.28,0.28,1.4,0.039,12.0,83.0,0.99173,3.18,0.65,11.1 +7.1,0.36,0.2,1.6,0.271,24.0,140.0,0.99356,3.11,0.63,9.8 +5.7,0.25,0.22,9.8,0.049,50.0,125.0,0.99571,3.2,0.45,10.1 +5.7,0.22,0.33,1.9,0.036,37.0,110.0,0.98945,3.26,0.58,12.4 +6.0,0.2,0.38,1.3,0.034,37.0,104.0,0.98865,3.11,0.52,12.7 +6.4,0.32,0.26,7.9,0.05,53.0,180.0,0.99514,3.14,0.5,9.6 +6.4,0.32,0.26,7.9,0.05,53.0,180.0,0.99514,3.14,0.5,9.6 +6.0,0.555,0.26,4.5,0.053,17.0,126.0,0.9943,3.24,0.46,9.1 +5.9,0.22,0.45,22.6,0.12,55.0,122.0,0.99636,3.1,0.35,12.8 +6.4,0.32,0.26,7.9,0.05,53.0,180.0,0.99514,3.14,0.5,9.6 +6.2,0.3,0.33,3.5,0.037,37.0,155.0,0.98987,3.18,0.37,12.4 +5.8,0.28,0.18,1.2,0.058,7.0,108.0,0.99288,3.23,0.58,9.55 +5.8,0.555,0.26,4.5,0.053,17.0,126.0,0.9943,3.24,0.46,9.1 +6.7,0.31,0.33,2.0,0.033,12.0,74.0,0.99064,3.29,0.65,12.5 +6.4,0.15,0.25,7.8,0.05,13.0,68.0,0.99394,3.16,0.4,9.9 +6.4,0.13,0.28,0.9,0.045,32.0,87.0,0.99175,3.47,0.52,11.2 +6.7,0.48,0.49,2.9,0.03,28.0,122.0,0.98926,3.13,0.4,13.0 +6.7,0.48,0.49,2.9,0.03,28.0,122.0,0.98926,3.13,0.4,13.0 +5.8,0.3,0.33,3.5,0.033,25.0,116.0,0.99057,3.2,0.44,11.7 +6.1,0.28,0.23,4.2,0.038,13.0,95.0,0.98898,2.97,0.7,13.1 +6.0,0.19,0.37,9.7,0.032,17.0,50.0,0.9932,3.08,0.66,12.0 +6.8,0.31,0.25,10.5,0.043,30.0,165.0,0.9972,3.36,0.55,10.55 +7.5,0.24,0.29,1.1,0.046,34.0,84.0,0.9902,3.04,0.39,11.45 +6.8,0.23,0.39,16.1,0.053,71.0,194.0,0.9988,3.18,0.64,10.2 +7.5,0.24,0.29,1.1,0.046,34.0,84.0,0.9902,3.04,0.39,11.45 +6.3,0.29,0.3,8.1,0.212,60.0,150.0,0.9958,3.1,0.4,9.3 +6.8,0.2,0.25,6.2,0.052,22.0,106.0,0.9935,3.09,0.54,10.8 +5.2,0.38,0.26,7.7,0.053,20.0,103.0,0.9925,3.27,0.45,12.2 +7.8,0.27,0.33,2.4,0.053,36.0,175.0,0.992,3.2,0.55,11.0 +6.6,0.54,0.21,16.3,0.055,41.0,182.0,0.9986,3.35,0.54,10.4 +7.1,0.25,0.31,2.3,0.05,32.0,156.0,0.9914,3.14,0.51,11.4 +5.8,0.61,0.01,8.4,0.041,31.0,104.0,0.9909,3.26,0.72,14.05 +6.5,0.32,0.23,8.5,0.051,20.0,138.0,0.9943,3.03,0.42,10.7 +6.4,0.28,0.23,6.0,0.051,50.0,162.0,0.994,3.15,0.52,10.2 +6.6,0.19,0.28,1.1,0.044,38.0,100.0,0.9904,3.22,0.69,11.2 +5.1,0.305,0.13,1.75,0.036,17.0,73.0,0.99,3.4,0.51,12.33333333 +5.8,0.26,0.3,2.6,0.034,75.0,129.0,0.9902,3.2,0.38,11.5 +6.7,0.23,0.17,1.3,0.061,14.0,100.0,0.9925,3.07,0.55,9.5 +6.8,0.33,0.3,2.1,0.047,35.0,147.0,0.9886,3.24,0.56,13.4 +6.1,0.27,0.32,1.1,0.034,24.0,110.0,0.9898,3.36,0.4,12.5 +6.1,0.27,0.32,1.1,0.034,24.0,110.0,0.9898,3.36,0.4,12.5 +6.8,0.4,0.29,2.8,0.044,27.0,97.0,0.9904,3.12,0.42,11.2 +6.1,0.4,0.18,9.0,0.051,28.5,259.0,0.9964,3.19,0.5,8.8 +7.1,0.28,0.26,2.8,0.039,50.0,118.0,0.9908,3.06,0.59,11.2 +6.2,0.32,0.32,2.2,0.036,15.0,70.0,0.9899,3.16,0.48,12.7 +6.8,0.17,0.17,5.1,0.049,26.0,82.0,0.993,3.0,0.38,9.8 +9.0,0.2,0.33,3.5,0.049,10.0,40.0,0.9944,3.14,0.36,9.8 +5.8,0.13,0.22,12.7,0.058,24.0,183.0,0.9956,3.32,0.42,11.7 +5.8,0.31,0.31,7.5,0.052,55.0,230.0,0.9949,3.19,0.46,9.8 +6.3,0.36,0.2,2.0,0.048,48.0,191.0,0.9929,3.17,0.51,9.6 +9.0,0.2,0.33,3.5,0.049,10.0,40.0,0.9944,3.14,0.36,9.8 +6.7,0.18,0.25,14.3,0.048,79.0,149.0,0.9975,3.12,0.37,9.7 +6.6,0.16,0.25,9.8,0.049,59.5,137.0,0.995,3.16,0.38,10.0 +5.8,0.13,0.22,12.7,0.058,24.0,183.0,0.9956,3.32,0.42,11.7 +5.8,0.27,0.22,12.7,0.058,42.0,206.0,0.9946,3.32,0.38,12.3 +6.8,0.17,0.17,5.1,0.049,26.0,82.0,0.993,3.0,0.38,9.8 +6.4,0.37,0.19,3.5,0.068,18.0,101.0,0.9934,3.03,0.38,9.0 +7.3,0.26,0.53,12.7,0.047,60.5,164.5,0.9984,3.06,0.45,9.1 +7.3,0.28,0.54,12.9,0.049,62.0,162.5,0.9984,3.06,0.45,9.1 +7.3,0.28,0.54,12.9,0.049,62.0,162.5,0.9984,3.06,0.45,9.1 +5.8,0.12,0.21,1.3,0.056,35.0,121.0,0.9908,3.32,0.33,11.4 +6.1,0.25,0.18,10.5,0.049,41.0,124.0,0.9963,3.14,0.35,10.5 +6.4,0.24,0.27,1.5,0.04,35.0,105.0,0.98914,3.13,0.3,12.4 +7.3,0.26,0.53,12.7,0.047,60.5,156.0,0.9984,3.06,0.45,9.1 +7.3,0.28,0.54,12.9,0.049,62.0,152.0,0.9984,3.06,0.45,9.1 +8.3,0.18,0.37,1.2,0.049,6.0,94.0,0.9937,3.18,0.52,10.1 +7.1,0.09,0.3,6.2,0.032,24.0,134.0,0.993,2.99,0.39,10.9 +8.3,0.14,0.36,8.8,0.026,13.0,60.0,0.9956,3.13,0.35,11.05 +5.8,0.28,0.3,3.9,0.026,36.0,105.0,0.98963,3.26,0.58,12.75 +6.0,0.23,0.34,1.3,0.025,23.0,111.0,0.98961,3.36,0.37,12.7 +6.9,0.28,0.37,9.1,0.037,16.0,76.0,0.9948,3.05,0.54,11.1 +6.9,0.28,0.37,9.1,0.037,16.0,76.0,0.9948,3.05,0.54,11.1 +5.8,0.28,0.3,3.9,0.026,36.0,105.0,0.98963,3.26,0.58,12.75 +6.3,0.25,0.53,1.8,0.021,41.0,101.0,0.989315,3.19,0.31,13.0 +6.5,0.2,0.31,2.1,0.033,32.0,95.0,0.989435,2.96,0.61,12.0 +5.9,0.29,0.32,1.4,0.022,17.0,47.0,0.9899,3.35,0.35,11.5 +6.4,0.46,0.22,14.7,0.047,51.0,183.0,0.998275,3.39,0.6,10.5 +6.9,0.28,0.37,9.1,0.037,16.0,76.0,0.9948,3.05,0.54,11.1 +6.8,0.23,0.33,1.9,0.047,20.0,101.0,0.9914,3.1,0.46,11.1 +7.0,0.23,0.32,1.8,0.048,25.0,113.0,0.9915,3.11,0.47,11.1 +6.4,0.55,0.26,9.6,0.027,20.0,104.0,0.9924,3.22,0.73,13.1 +5.7,0.28,0.3,3.9,0.026,36.0,105.0,0.98963,3.26,0.58,12.75 +6.0,0.23,0.34,1.3,0.025,23.0,111.0,0.98961,3.36,0.37,12.7 +6.8,0.45,0.3,11.8,0.094,23.0,97.0,0.997,3.09,0.44,9.6 +6.1,0.2,0.4,1.9,0.028,32.0,138.0,0.9914,3.26,0.72,11.7 +6.1,0.37,0.46,12.0,0.042,61.0,210.0,0.997,3.17,0.59,9.7 +5.9,0.21,0.23,7.9,0.033,22.0,130.0,0.9944,3.38,0.59,10.9 +6.9,0.22,0.32,9.3,0.04,22.0,110.0,0.9958,3.34,0.54,10.7 +5.4,0.27,0.22,4.6,0.022,29.0,107.0,0.98889,3.33,0.54,13.8 +6.0,0.26,0.26,2.2,0.035,10.0,72.0,0.989465,3.11,0.48,12.15 +5.6,0.18,0.3,10.2,0.028,28.0,131.0,0.9954,3.49,0.42,10.8 +5.6,0.26,0.27,10.6,0.03,27.0,119.0,0.9947,3.4,0.34,10.7 +7.0,0.23,0.35,1.4,0.036,31.0,113.0,0.9912,3.16,0.48,10.8 +5.8,0.28,0.66,9.1,0.039,26.0,159.0,0.9965,3.66,0.55,10.8 +8.6,0.36,0.26,11.1,0.03,43.5,171.0,0.9948,3.03,0.49,12.0 +5.8,0.28,0.66,9.1,0.039,26.0,159.0,0.9965,3.66,0.55,10.8 +6.4,0.3,0.27,4.4,0.055,17.0,135.0,0.9925,3.23,0.44,12.2 +6.2,0.2,0.32,2.8,0.05,17.0,126.0,0.9936,3.18,0.55,9.4 +5.8,0.29,0.15,1.1,0.029,12.0,83.0,0.9898,3.3,0.4,11.4 +5.7,0.22,0.28,1.3,0.027,26.0,101.0,0.98948,3.35,0.38,12.5 +5.6,0.22,0.32,1.2,0.024,29.0,97.0,0.98823,3.2,0.46,13.05 +6.8,0.32,0.23,3.3,0.026,31.0,99.0,0.9896,3.1,0.4,12.4 +6.2,0.2,0.26,9.7,0.03,39.0,102.0,0.9908,3.08,0.56,12.9 +6.1,0.35,0.24,2.3,0.034,25.0,133.0,0.9906,3.34,0.59,12.0 +5.9,0.3,0.29,1.1,0.036,23.0,56.0,0.9904,3.19,0.38,11.3 +6.3,0.15,0.34,11.4,0.05,25.0,96.0,0.99754,3.21,0.49,10.0 +4.8,0.13,0.32,1.2,0.042,40.0,98.0,0.9898,3.42,0.64,11.8 +6.0,0.2,0.26,14.7,0.045,53.0,125.0,0.998365,2.99,0.69,9.4 +5.7,0.2,0.24,13.8,0.047,44.0,112.0,0.99837,2.97,0.66,8.8 +6.0,0.27,0.26,1.3,0.038,32.0,138.0,0.99125,3.46,0.43,11.1 +6.3,0.37,0.51,6.3,0.048,35.0,146.0,0.9943,3.1,1.01,10.5 +6.4,0.23,0.37,7.9,0.05,60.0,150.0,0.99488,2.86,0.49,9.3 +5.9,0.34,0.25,2.0,0.042,12.0,110.0,0.99034,3.02,0.54,11.4 +5.0,0.33,0.23,11.8,0.03,23.0,158.0,0.99322,3.41,0.64,11.8 +5.4,0.29,0.38,1.2,0.029,31.0,132.0,0.98895,3.28,0.36,12.4 +8.0,0.33,0.35,10.0,0.035,22.0,108.0,0.99457,3.12,0.36,11.6 +6.4,0.3,0.33,5.2,0.05,30.0,137.0,0.99304,3.26,0.58,11.1 +5.4,0.29,0.38,1.2,0.029,31.0,132.0,0.98895,3.28,0.36,12.4 +6.4,0.33,0.3,7.2,0.041,42.0,168.0,0.99331,3.22,0.49,11.1 +7.0,0.33,0.78,9.9,0.042,21.0,251.0,0.99435,3.01,0.55,11.0 +6.7,0.45,0.3,5.3,0.036,27.0,165.0,0.99122,3.12,0.46,12.2 +6.5,0.36,0.31,13.55,0.053,20.0,113.0,0.99544,3.2,0.56,11.0 +5.8,0.42,0.3,2.2,0.035,26.0,129.0,0.989,3.32,0.47,12.9 +7.1,0.39,0.3,9.9,0.037,29.0,124.0,0.99414,3.07,0.42,10.9 +6.7,0.53,0.29,4.3,0.069,20.0,114.0,0.99014,3.22,0.59,13.4 +6.7,0.66,0.0,13.0,0.033,32.0,75.0,0.99551,3.15,0.5,10.7 +6.5,0.36,0.31,13.55,0.053,20.0,113.0,0.99544,3.2,0.56,11.0 +6.5,0.16,0.33,1.0,0.027,23.0,75.0,0.9908,3.3,0.39,11.4 +8.3,0.22,0.34,1.1,0.043,20.0,116.0,0.9927,3.0,0.47,10.2 +6.9,0.23,0.35,6.9,0.03,45.0,116.0,0.99244,2.8,0.54,11.0 +6.4,0.17,0.34,13.4,0.044,45.0,139.0,0.99752,3.06,0.43,9.1 +5.0,0.33,0.18,4.6,0.032,40.0,124.0,0.99114,3.18,0.4,11.0 +6.8,0.38,0.29,9.9,0.037,40.0,146.0,0.99326,3.11,0.37,11.5 +6.5,0.29,0.32,3.0,0.036,38.0,93.0,0.9906,3.16,0.59,12.0 +6.9,0.29,0.32,5.8,0.04,16.0,112.0,0.993,3.04,0.58,11.2 +6.6,0.28,0.3,12.9,0.033,31.0,177.0,0.99479,3.12,0.39,11.2 +6.2,0.36,0.27,3.2,0.032,13.0,73.0,0.98942,2.9,0.69,12.6 +6.0,0.615,0.04,0.8,0.032,8.0,50.0,0.99036,3.14,0.4,11.0 +5.9,0.44,0.36,2.5,0.03,12.0,73.0,0.99201,3.22,0.48,10.8 +5.9,0.42,0.36,2.4,0.034,19.0,77.0,0.99184,3.25,0.48,10.9 +5.8,0.34,0.21,7.2,0.041,48.0,146.0,0.99441,3.16,0.49,9.8 +5.8,0.27,0.2,7.3,0.04,42.0,145.0,0.99442,3.15,0.48,9.8 +7.1,0.33,0.18,6.3,0.094,27.0,166.0,0.99474,2.9,0.49,9.5 +6.1,0.44,0.28,4.25,0.032,43.0,132.0,0.9916,3.26,0.47,11.26666667 +7.3,0.28,0.37,1.2,0.039,26.0,99.0,0.99198,3.01,0.62,10.8 +5.2,0.5,0.18,2.0,0.036,23.0,129.0,0.98949,3.36,0.77,13.4 +6.1,0.44,0.28,4.25,0.032,43.0,132.0,0.9916,3.26,0.47,11.3 +6.4,0.62,0.12,4.7,0.06,33.0,196.0,0.99556,3.22,0.48,8.9 +6.4,0.38,0.19,4.5,0.038,36.0,119.0,0.99151,3.07,0.42,11.2 +7.5,0.305,0.38,1.4,0.047,30.0,95.0,0.99158,3.22,0.52,11.5 +6.5,0.5,0.22,4.1,0.036,35.0,131.0,0.9902,3.26,0.55,13.0 +6.6,0.4,0.3,5.3,0.038,20.0,125.0,0.99204,3.36,0.73,12.6 +6.4,0.4,0.25,4.2,0.032,15.0,91.0,0.98988,3.26,0.52,13.1 +8.3,0.49,0.23,6.65,0.034,6.0,158.0,0.99344,3.05,0.48,11.2 +6.3,0.3,0.91,8.2,0.034,50.0,199.0,0.99394,3.39,0.49,11.7 +6.1,0.19,0.37,2.6,0.041,24.0,99.0,0.99153,3.18,0.5,10.9 +6.1,0.19,0.37,2.6,0.041,24.0,99.0,0.99153,3.18,0.5,10.9 +5.6,0.24,0.34,2.0,0.041,14.0,73.0,0.98981,3.04,0.45,11.6 +5.7,0.25,0.32,12.2,0.041,43.0,127.0,0.99524,3.23,0.53,10.4 +6.6,0.21,0.39,2.3,0.041,31.0,102.0,0.99221,3.22,0.58,10.9 +6.3,0.3,0.91,8.2,0.034,50.0,199.0,0.99394,3.39,0.49,11.7 +6.2,0.28,0.41,5.0,0.043,50.0,188.0,0.99318,3.23,0.64,10.8 +5.8,0.29,0.38,10.7,0.038,49.0,136.0,0.99366,3.11,0.59,11.2 +5.8,0.345,0.15,10.8,0.033,26.0,120.0,0.99494,3.25,0.49,10.0 +6.5,0.51,0.25,1.7,0.048,39.0,177.0,0.99212,3.28,0.57,10.56666667 +6.0,0.24,0.41,1.3,0.036,42.0,118.0,0.99018,3.04,0.64,11.73333333 +6.5,0.51,0.25,1.7,0.048,39.0,177.0,0.99212,3.28,0.57,10.6 +6.9,0.54,0.26,12.7,0.049,59.0,195.0,0.99596,3.26,0.54,10.5 +6.0,0.24,0.41,1.3,0.036,42.0,118.0,0.99018,3.04,0.64,11.75 +6.6,0.26,0.36,1.2,0.035,43.0,126.0,0.99094,3.01,0.63,11.4 +5.7,0.24,0.3,1.3,0.03,25.0,98.0,0.98968,3.37,0.43,12.4 +6.5,0.21,0.35,5.7,0.043,47.0,197.0,0.99392,3.24,0.5,10.1 +6.8,0.29,0.22,3.4,0.035,40.0,122.0,0.99024,3.09,0.47,12.3 +5.0,0.24,0.34,1.1,0.034,49.0,158.0,0.98774,3.32,0.32,13.1 +5.9,0.18,0.28,1.0,0.037,24.0,88.0,0.99094,3.29,0.55,10.65 +5.8,0.26,0.29,1.0,0.042,35.0,101.0,0.99044,3.36,0.48,11.4 +6.7,0.61,0.21,1.65,0.117,40.0,240.0,0.9938,3.11,0.57,9.3 +5.7,0.695,0.06,6.8,0.042,9.0,84.0,0.99432,3.44,0.44,10.2 +5.6,0.695,0.06,6.8,0.042,9.0,84.0,0.99432,3.44,0.44,10.2 +5.7,0.39,0.25,4.9,0.033,49.0,113.0,0.98966,3.26,0.58,13.1 +6.1,0.38,0.47,1.4,0.051,59.0,210.0,0.99309,3.24,0.5,9.6 +6.3,0.36,0.28,2.5,0.035,18.0,73.0,0.98868,3.1,0.47,12.8 +6.0,0.29,0.41,10.8,0.048,55.0,149.0,0.9937,3.09,0.59,10.96666667 +6.0,0.29,0.41,10.8,0.048,55.0,149.0,0.9937,3.09,0.59,10.96666667 +6.0,0.29,0.41,10.8,0.048,55.0,149.0,0.9937,3.09,0.59,10.96666667 +6.0,0.29,0.41,10.8,0.048,55.0,149.0,0.9937,3.09,0.59,11.0 +7.1,0.43,0.25,2.8,0.036,43.0,132.0,0.98975,3.21,0.47,13.4 +6.6,0.25,0.25,1.3,0.04,28.0,85.0,0.98984,2.87,0.48,11.2 +6.6,0.33,0.41,2.0,0.027,14.0,79.0,0.99063,3.27,0.63,12.4 +8.0,0.23,0.41,1.1,0.048,35.0,150.0,0.99168,3.09,0.47,11.2 +7.3,0.17,0.36,8.2,0.028,44.0,111.0,0.99272,3.14,0.41,12.4 +6.0,0.17,0.33,6.0,0.036,30.0,111.0,0.99362,3.32,0.58,10.13333333 +6.1,0.16,0.34,6.1,0.034,31.0,114.0,0.99365,3.32,0.58,10.13333333 +7.3,0.17,0.36,8.2,0.028,44.0,111.0,0.99272,3.14,0.41,12.4 +6.4,0.31,0.53,8.8,0.057,36.0,221.0,0.99642,3.17,0.44,9.1 +6.1,0.16,0.34,6.1,0.034,31.0,114.0,0.99365,3.32,0.58,10.15 +6.0,0.17,0.33,6.0,0.036,30.0,111.0,0.99362,3.32,0.58,10.15 +5.9,0.44,0.33,1.2,0.049,12.0,117.0,0.99134,3.46,0.44,11.5 +6.6,0.285,0.49,11.4,0.035,57.0,137.0,0.99732,3.08,0.54,8.9 +4.9,0.335,0.14,1.3,0.036,69.0,168.0,0.99212,3.47,0.46,10.46666667 +4.9,0.335,0.14,1.3,0.036,69.0,168.0,0.99212,3.47,0.46,10.46666667 +6.0,0.28,0.52,6.2,0.028,37.0,104.0,0.99161,3.28,0.51,11.8 +5.8,0.35,0.29,3.2,0.034,41.0,151.0,0.9912,3.35,0.58,11.63333333 +5.7,0.21,0.37,4.5,0.04,58.0,140.0,0.99332,3.29,0.62,10.6 +6.5,0.25,0.32,9.9,0.045,41.0,128.0,0.99636,3.18,0.52,9.6 +6.0,0.28,0.52,6.2,0.028,37.0,104.0,0.99161,3.28,0.51,11.8 +6.6,0.285,0.49,11.4,0.035,57.0,137.0,0.99732,3.08,0.54,8.9 +4.7,0.335,0.14,1.3,0.036,69.0,168.0,0.99212,3.47,0.46,10.5 +6.8,0.63,0.04,1.3,0.058,25.0,133.0,0.99271,3.17,0.39,10.2 +5.6,0.27,0.37,0.9,0.025,11.0,49.0,0.98845,3.29,0.33,13.1 +6.8,0.32,0.33,0.7,0.027,15.0,66.0,0.9899,3.11,0.31,11.8 +6.5,0.33,0.32,1.0,0.041,39.0,120.0,0.99004,3.06,0.37,12.2 +6.0,0.24,0.34,1.0,0.036,52.0,184.0,0.99097,3.44,0.44,11.45 +7.2,0.26,0.32,10.4,0.062,23.0,114.0,0.9966,3.23,0.49,10.5 +6.8,0.63,0.04,1.3,0.058,25.0,133.0,0.99271,3.17,0.39,10.2 +6.7,0.16,0.32,12.5,0.035,18.0,156.0,0.99666,2.88,0.36,9.0 +6.7,0.16,0.32,12.5,0.035,18.0,156.0,0.99666,2.88,0.36,9.0 +6.7,0.16,0.32,12.5,0.035,18.0,156.0,0.99666,2.88,0.36,9.0 +6.7,0.16,0.32,12.5,0.035,18.0,156.0,0.99666,2.88,0.36,9.0 +6.9,0.19,0.31,19.25,0.043,38.0,167.0,0.99954,2.93,0.52,9.1 +6.0,0.36,0.32,1.1,0.053,26.0,173.0,0.99414,3.38,0.54,8.8 +6.7,0.16,0.32,12.5,0.035,18.0,156.0,0.99666,2.88,0.36,9.0 +6.9,0.19,0.31,19.25,0.043,38.0,167.0,0.99954,2.93,0.52,9.1 +6.7,0.35,0.32,9.0,0.032,29.0,113.0,0.99188,3.13,0.65,12.9 +6.1,0.15,0.4,1.2,0.03,19.0,84.0,0.98926,3.19,0.96,13.0 +6.7,0.35,0.32,9.0,0.032,29.0,113.0,0.99188,3.13,0.65,12.9 +7.0,0.27,0.74,1.3,0.173,34.0,121.0,0.99334,3.04,0.46,9.2 +6.8,0.3,0.33,12.8,0.041,60.0,168.0,0.99659,3.1,0.56,9.8 +6.8,0.3,0.33,12.8,0.041,60.0,168.0,0.99659,3.1,0.56,9.8 +6.4,0.69,0.09,7.6,0.044,34.0,144.0,0.9948,3.26,0.38,10.1 +6.4,0.69,0.09,7.6,0.044,34.0,144.0,0.9948,3.26,0.38,10.1 +5.9,0.12,0.28,1.4,0.037,36.0,83.0,0.99074,3.33,0.42,10.9 +6.3,0.36,0.5,8.3,0.053,51.0,202.0,0.9955,3.2,0.51,9.6 +5.7,0.27,0.16,9.0,0.053,32.0,111.0,0.99474,3.36,0.37,10.4 +6.1,0.22,0.4,1.85,0.031,25.0,111.0,0.98966,3.03,0.3,11.8 +5.6,0.205,0.16,12.55,0.051,31.0,115.0,0.99564,3.4,0.38,10.8 +7.2,0.33,0.28,1.4,0.034,26.0,109.0,0.99246,3.28,0.57,10.6 +5.9,0.21,0.31,1.8,0.033,45.0,142.0,0.98984,3.35,0.5,12.7 +5.4,0.33,0.31,4.0,0.03,27.0,108.0,0.99031,3.3,0.43,12.2 +5.4,0.205,0.16,12.55,0.051,31.0,115.0,0.99564,3.4,0.38,10.8 +5.7,0.27,0.16,9.0,0.053,32.0,111.0,0.99474,3.36,0.37,10.4 +6.4,0.28,0.28,3.0,0.04,19.0,98.0,0.99216,3.25,0.47,11.1 +6.1,0.22,0.4,1.85,0.031,25.0,111.0,0.98966,3.03,0.3,11.8 +6.7,0.15,0.32,7.9,0.034,17.0,81.0,0.99512,3.29,0.31,10.0 +5.5,0.315,0.38,2.6,0.033,10.0,69.0,0.9909,3.12,0.59,10.8 +4.8,0.225,0.38,1.2,0.074,47.0,130.0,0.99132,3.31,0.4,10.3 +5.2,0.24,0.15,7.1,0.043,32.0,134.0,0.99378,3.24,0.48,9.9 +6.7,0.15,0.32,7.9,0.034,17.0,81.0,0.99512,3.29,0.31,10.0 +6.6,0.27,0.32,1.3,0.044,18.0,93.0,0.99044,3.11,0.56,12.25 +6.1,0.32,0.33,10.7,0.036,27.0,98.0,0.99521,3.34,0.52,10.2 +6.0,0.25,0.28,7.7,0.053,37.0,132.0,0.99489,3.06,0.5,9.4 +6.4,0.42,0.46,8.4,0.05,58.0,180.0,0.99495,3.18,0.46,9.7 +6.1,0.32,0.33,10.7,0.036,27.0,98.0,0.99521,3.34,0.52,10.2 +6.9,0.31,0.33,12.7,0.038,33.0,116.0,0.9954,3.04,0.65,10.4 +6.3,0.48,0.48,1.8,0.035,35.0,96.0,0.99121,3.49,0.74,12.2 +6.0,0.25,0.28,7.7,0.053,37.0,132.0,0.99489,3.06,0.5,9.4 +7.2,0.21,0.31,10.5,0.035,36.0,122.0,0.99478,3.12,0.4,10.6 +6.8,0.32,0.43,1.6,0.05,4.0,65.0,0.99346,3.27,0.47,10.7 +7.9,0.3,0.6,1.85,0.048,13.0,106.0,0.99331,3.24,0.49,11.85 +5.3,0.31,0.38,10.5,0.031,53.0,140.0,0.99321,3.34,0.46,11.7 +5.3,0.31,0.38,10.5,0.031,53.0,140.0,0.99321,3.34,0.46,11.7 +5.2,0.185,0.22,1.0,0.03,47.0,123.0,0.99218,3.55,0.44,10.15 +5.5,0.16,0.31,1.2,0.026,31.0,68.0,0.9898,3.33,0.44,11.63333333 +6.0,0.17,0.36,1.7,0.042,14.0,61.0,0.99144,3.22,0.54,10.8 +6.0,0.16,0.36,1.6,0.042,13.0,61.0,0.99143,3.22,0.54,10.8 +6.1,0.24,0.32,9.0,0.031,41.0,134.0,0.99234,3.25,0.26,12.3 +5.5,0.3,0.25,1.9,0.029,33.0,118.0,0.98972,3.36,0.66,12.5 +5.5,0.16,0.31,1.2,0.026,31.0,68.0,0.9898,3.33,0.44,11.65 +6.0,0.32,0.46,1.5,0.05,56.0,189.0,0.99308,3.24,0.49,9.6 +6.1,0.27,0.31,3.9,0.034,42.0,137.0,0.99218,3.24,0.46,10.9 +6.0,0.27,0.32,3.6,0.035,36.0,133.0,0.99215,3.23,0.46,10.8 +6.0,0.14,0.37,1.2,0.032,63.0,148.0,0.99185,3.32,0.44,11.2 +5.0,0.24,0.19,5.0,0.043,17.0,101.0,0.99438,3.67,0.57,10.0 +6.1,0.26,0.25,2.9,0.047,289.0,440.0,0.99314,3.44,0.64,10.5 +6.3,0.23,0.5,10.4,0.043,61.0,132.0,0.99542,2.86,0.46,9.1 +5.6,0.26,0.5,11.4,0.029,25.0,93.0,0.99428,3.23,0.49,10.5 +6.1,0.34,0.24,18.35,0.05,33.0,184.0,0.99943,3.12,0.61,9.3 +6.2,0.35,0.25,18.4,0.051,28.0,182.0,0.99946,3.13,0.62,9.3 +6.0,0.14,0.37,1.2,0.032,63.0,148.0,0.99185,3.32,0.44,11.2 +7.3,0.36,0.62,7.1,0.033,48.0,185.0,0.99472,3.14,0.62,10.6 +5.1,0.25,0.36,1.3,0.035,40.0,78.0,0.9891,3.23,0.64,12.1 +5.5,0.16,0.26,1.5,0.032,35.0,100.0,0.99076,3.43,0.77,12.0 +6.4,0.19,0.35,10.2,0.043,40.0,106.0,0.99632,3.16,0.5,9.7 +6.6,0.29,0.73,2.2,0.027,21.0,92.0,0.99,3.12,0.48,12.4 +6.0,0.38,0.26,3.5,0.035,38.0,111.0,0.98872,3.18,0.47,13.6 +6.0,0.38,0.26,3.5,0.035,38.0,111.0,0.98872,3.18,0.47,13.6 +6.5,0.2,0.35,3.9,0.04,27.0,140.0,0.99102,2.98,0.53,11.8 +6.6,0.17,0.26,7.4,0.052,45.0,128.0,0.99388,3.16,0.37,10.0 +6.6,0.17,0.26,7.4,0.052,45.0,128.0,0.99388,3.16,0.37,10.0 +6.2,0.15,0.27,11.0,0.035,46.0,116.0,0.99602,3.12,0.38,9.1 +5.9,0.48,0.3,1.5,0.037,19.0,78.0,0.99057,3.47,0.42,11.9 +5.3,0.4,0.25,3.9,0.031,45.0,130.0,0.99072,3.31,0.58,11.75 +5.9,0.26,0.29,5.4,0.046,34.0,116.0,0.99224,3.24,0.41,11.4 +5.2,0.3,0.34,1.5,0.038,18.0,96.0,0.98942,3.56,0.48,13.0 +6.4,0.32,0.25,5.0,0.055,28.0,138.0,0.99171,3.27,0.5,12.4 +6.6,0.19,0.25,1.2,0.052,34.0,181.0,0.99352,3.3,0.42,9.4 +6.8,0.27,0.3,13.0,0.047,69.0,160.0,0.99705,3.16,0.5,9.6 +6.8,0.27,0.3,13.0,0.047,69.0,160.0,0.99705,3.16,0.5,9.6 +6.8,0.27,0.3,13.0,0.047,69.0,160.0,0.99705,3.16,0.5,9.6 +6.8,0.27,0.3,13.0,0.047,69.0,160.0,0.99705,3.16,0.5,9.6 +6.4,0.28,0.45,8.6,0.057,47.0,223.0,0.99654,3.16,0.51,9.1 +5.2,0.21,0.31,1.7,0.048,17.0,61.0,0.98953,3.24,0.37,12.0 +7.1,0.24,0.34,1.2,0.045,6.0,132.0,0.99132,3.16,0.46,11.2 +5.0,0.27,0.4,1.2,0.076,42.0,124.0,0.99204,3.32,0.47,10.1 +5.8,0.27,0.4,1.2,0.076,47.0,130.0,0.99185,3.13,0.45,10.3 +5.9,0.27,0.32,2.0,0.034,31.0,102.0,0.98952,3.16,0.56,12.3 +5.8,0.315,0.19,19.4,0.031,28.0,106.0,0.99704,2.97,0.4,10.55 +6.0,0.59,0.0,0.8,0.037,30.0,95.0,0.99032,3.1,0.4,10.9 +5.8,0.3,0.09,6.3,0.042,36.0,138.0,0.99382,3.15,0.48,9.7 +5.6,0.3,0.1,6.4,0.043,34.0,142.0,0.99382,3.14,0.48,9.8 +6.7,0.3,0.5,12.1,0.045,38.0,127.0,0.9974,3.04,0.53,8.9 +6.7,0.3,0.5,12.1,0.045,38.0,127.0,0.9974,3.04,0.53,8.9 +6.4,0.31,0.31,12.9,0.045,55.0,161.0,0.99546,3.02,0.59,10.2 +6.9,0.25,0.29,2.4,0.038,28.0,76.0,0.99088,3.01,0.36,11.7 +4.4,0.32,0.39,4.3,0.03,31.0,127.0,0.98904,3.46,0.36,12.8 +3.9,0.225,0.4,4.2,0.03,29.0,118.0,0.989,3.57,0.36,12.8 +6.4,0.31,0.31,12.9,0.045,55.0,161.0,0.99546,3.02,0.59,10.2 +5.5,0.62,0.33,1.7,0.037,24.0,118.0,0.98758,3.15,0.39,13.55 +6.2,0.3,0.42,2.2,0.036,28.0,128.0,0.9901,3.13,0.38,11.6 +6.7,0.3,0.5,12.1,0.045,38.0,127.0,0.9974,3.04,0.53,8.9 +4.7,0.785,0.0,3.4,0.036,23.0,134.0,0.98981,3.53,0.92,13.8 +6.0,0.31,0.32,7.4,0.175,47.0,159.0,0.9952,3.19,0.5,9.4 +6.0,0.32,0.3,7.3,0.174,46.0,159.0,0.99519,3.18,0.49,9.4 +6.4,0.105,0.29,1.1,0.035,44.0,140.0,0.99142,3.17,0.55,10.7 +6.4,0.105,0.29,1.1,0.035,44.0,140.0,0.99142,3.17,0.55,10.7 +5.7,0.33,0.32,1.4,0.043,28.0,93.0,0.9897,3.31,0.5,12.3 +5.9,0.32,0.19,14.5,0.042,37.0,115.0,0.99684,3.16,0.43,10.3 +6.2,0.26,0.2,8.0,0.047,35.0,111.0,0.99445,3.11,0.42,10.4 +6.0,0.2,0.33,1.1,0.039,45.0,126.0,0.99051,3.31,0.45,11.6 +6.4,0.105,0.29,1.1,0.035,44.0,140.0,0.99142,3.17,0.55,10.7 +5.8,0.28,0.34,2.2,0.037,24.0,125.0,0.98986,3.36,0.33,12.8 +6.4,0.31,0.5,5.8,0.038,42.0,111.0,0.99189,3.18,0.53,11.9 +6.0,0.35,0.46,0.9,0.033,9.0,65.0,0.98934,3.24,0.35,12.1 +5.1,0.26,0.34,6.4,0.034,26.0,99.0,0.99449,3.23,0.41,9.2 +6.6,0.28,0.09,10.9,0.051,37.0,131.0,0.99566,2.93,0.62,9.5 +6.0,0.17,0.3,7.3,0.039,39.0,104.0,0.99252,2.91,0.57,11.0 +7.3,0.35,0.67,8.3,0.053,10.0,100.0,0.9959,3.19,0.5,10.9 +6.0,0.26,0.24,1.3,0.053,66.0,150.0,0.9924,3.21,0.62,10.4 +5.4,0.375,0.4,3.3,0.054,29.0,147.0,0.99482,3.42,0.52,9.1 +7.0,0.17,0.42,1.0,0.075,19.0,71.0,0.99103,3.32,0.62,11.4 +5.1,0.26,0.33,1.1,0.027,46.0,113.0,0.98946,3.35,0.43,11.4 +5.8,0.36,0.5,1.0,0.127,63.0,178.0,0.99212,3.1,0.45,9.7 +5.7,0.4,0.35,5.1,0.026,17.0,113.0,0.99052,3.18,0.67,12.4 +6.2,0.76,0.01,3.2,0.041,18.0,120.0,0.99026,3.2,0.94,13.7 +6.1,0.41,0.2,12.6,0.032,54.0,136.0,0.99516,2.91,0.43,10.6 +5.8,0.385,0.25,3.7,0.031,38.0,122.0,0.99128,3.2,0.63,11.2 +6.0,0.27,0.4,1.7,0.021,18.0,82.0,0.9891,3.24,0.95,13.13333333 +5.7,0.4,0.35,5.1,0.026,17.0,113.0,0.99052,3.18,0.67,12.4 +5.8,0.36,0.5,1.0,0.127,63.0,178.0,0.99212,3.1,0.45,9.7 +7.0,0.24,0.47,1.3,0.043,18.0,131.0,0.99176,3.19,0.45,11.0 +6.8,0.23,0.48,1.5,0.036,35.0,165.0,0.99162,3.18,0.45,11.3 +6.5,0.28,0.34,4.6,0.054,22.0,130.0,0.99193,3.2,0.46,12.0 +6.4,0.23,0.35,10.3,0.042,54.0,140.0,0.9967,3.23,0.47,9.2 +6.0,0.34,0.29,6.1,0.046,29.0,134.0,0.99462,3.48,0.57,10.7 +6.0,0.34,0.29,6.1,0.046,29.0,134.0,0.99462,3.48,0.57,10.7 +6.7,0.22,0.33,1.2,0.036,36.0,86.0,0.99058,3.1,0.76,11.4 +6.4,0.23,0.35,10.3,0.042,54.0,140.0,0.9967,3.23,0.47,9.2 +6.0,0.32,0.33,9.9,0.032,22.0,90.0,0.99258,3.1,0.43,12.1 +5.8,0.29,0.27,1.6,0.062,17.0,140.0,0.99138,3.23,0.35,11.1 +5.8,0.38,0.26,1.1,0.058,20.0,140.0,0.99271,3.27,0.43,9.7 +5.9,0.32,0.26,1.5,0.057,17.0,141.0,0.9917,3.24,0.36,10.7 +5.6,0.33,0.28,1.2,0.031,33.0,97.0,0.99126,3.49,0.58,10.9 +5.9,0.37,0.3,1.5,0.033,35.0,95.0,0.98986,3.36,0.56,12.0 +5.6,0.295,0.26,1.1,0.035,40.0,102.0,0.99154,3.47,0.56,10.6 +6.7,0.5,0.36,11.5,0.096,18.0,92.0,0.99642,3.11,0.49,9.6 +6.5,0.28,0.38,7.8,0.031,54.0,216.0,0.99154,3.03,0.42,13.1 +5.3,0.275,0.24,7.4,0.038,28.0,114.0,0.99313,3.38,0.51,11.0 +5.2,0.405,0.15,1.45,0.038,10.0,44.0,0.99125,3.52,0.4,11.6 +6.8,0.34,0.36,8.9,0.029,44.0,128.0,0.99318,3.28,0.35,11.95 +5.7,0.22,0.25,1.1,0.05,97.0,175.0,0.99099,3.44,0.62,11.1 +6.2,0.28,0.57,1.0,0.043,50.0,92.0,0.99004,3.17,0.36,11.5 +5.6,0.34,0.25,2.5,0.046,47.0,182.0,0.99093,3.21,0.4,11.3 +4.8,0.29,0.23,1.1,0.044,38.0,180.0,0.98924,3.28,0.34,11.9 +6.6,0.38,0.29,2.4,0.136,15.0,93.0,0.99336,3.18,0.6,9.5 +5.1,0.3,0.3,2.3,0.048,40.0,150.0,0.98944,3.29,0.46,12.2 +4.4,0.54,0.09,5.1,0.038,52.0,97.0,0.99022,3.41,0.4,12.2 +7.0,0.36,0.35,2.5,0.048,67.0,161.0,0.99146,3.05,0.56,11.1 +6.4,0.33,0.44,8.9,0.055,52.0,164.0,0.99488,3.1,0.48,9.6 +7.0,0.36,0.35,2.5,0.048,67.0,161.0,0.99146,3.05,0.56,11.1 +6.4,0.33,0.44,8.9,0.055,52.0,164.0,0.99488,3.1,0.48,9.6 +6.2,0.23,0.38,1.6,0.044,12.0,113.0,0.99176,3.3,0.73,11.4 +5.2,0.25,0.23,1.4,0.047,20.0,77.0,0.99001,3.32,0.62,11.4 +6.2,0.35,0.29,3.9,0.041,22.0,79.0,0.99005,3.1,0.59,12.06666667 +7.1,0.23,0.39,13.7,0.058,26.0,172.0,0.99755,2.9,0.46,9.0 +7.1,0.23,0.39,13.7,0.058,26.0,172.0,0.99755,2.9,0.46,9.0 +7.5,0.38,0.33,9.2,0.043,19.0,116.0,0.99444,3.08,0.42,11.4 +6.4,0.35,0.51,7.8,0.055,53.0,177.0,0.99502,3.12,0.45,9.6 +6.0,0.43,0.34,7.6,0.045,25.0,118.0,0.99222,3.03,0.37,11.0 +6.0,0.52,0.33,7.7,0.046,24.0,119.0,0.99224,3.04,0.38,11.0 +5.5,0.31,0.29,3.0,0.027,16.0,102.0,0.99067,3.23,0.56,11.2 +5.9,0.22,0.3,1.3,0.052,42.0,86.0,0.99069,3.31,0.47,11.55 +6.2,0.36,0.32,4.0,0.036,44.0,92.0,0.98936,3.2,0.5,13.3 +6.0,0.41,0.23,1.1,0.066,22.0,148.0,0.99266,3.3,0.47,9.633333333 +6.2,0.355,0.35,2.0,0.046,31.0,95.0,0.98822,3.06,0.46,13.6 +5.7,0.41,0.21,1.9,0.048,30.0,112.0,0.99138,3.29,0.55,11.2 +5.3,0.6,0.34,1.4,0.031,3.0,60.0,0.98854,3.27,0.38,13.0 +5.8,0.23,0.31,4.5,0.046,42.0,124.0,0.99324,3.31,0.64,10.8 +6.6,0.24,0.33,10.1,0.032,8.0,81.0,0.99626,3.19,0.51,9.8 +6.1,0.32,0.28,6.6,0.021,29.0,132.0,0.99188,3.15,0.36,11.45 +5.0,0.2,0.4,1.9,0.015,20.0,98.0,0.9897,3.37,0.55,12.05 +6.0,0.42,0.41,12.4,0.032,50.0,179.0,0.99622,3.14,0.6,9.7 +5.7,0.21,0.32,1.6,0.03,33.0,122.0,0.99044,3.33,0.52,11.9 +5.6,0.2,0.36,2.5,0.048,16.0,125.0,0.99282,3.49,0.49,10.0 +7.4,0.22,0.26,1.2,0.035,18.0,97.0,0.99245,3.12,0.41,9.7 +6.2,0.38,0.42,2.5,0.038,34.0,117.0,0.99132,3.36,0.59,11.6 +5.9,0.54,0.0,0.8,0.032,12.0,82.0,0.99286,3.25,0.36,8.8 +6.2,0.53,0.02,0.9,0.035,6.0,81.0,0.99234,3.24,0.35,9.5 +6.6,0.34,0.4,8.1,0.046,68.0,170.0,0.99494,3.15,0.5,9.533333333 +6.6,0.34,0.4,8.1,0.046,68.0,170.0,0.99494,3.15,0.5,9.533333333 +5.0,0.235,0.27,11.75,0.03,34.0,118.0,0.9954,3.07,0.5,9.4 +5.5,0.32,0.13,1.3,0.037,45.0,156.0,0.99184,3.26,0.38,10.7 +4.9,0.47,0.17,1.9,0.035,60.0,148.0,0.98964,3.27,0.35,11.5 +6.5,0.33,0.38,8.3,0.048,68.0,174.0,0.99492,3.14,0.5,9.6 +6.6,0.34,0.4,8.1,0.046,68.0,170.0,0.99494,3.15,0.5,9.55 +6.2,0.21,0.28,5.7,0.028,45.0,121.0,0.99168,3.21,1.08,12.15 +6.2,0.41,0.22,1.9,0.023,5.0,56.0,0.98928,3.04,0.79,13.0 +6.8,0.22,0.36,1.2,0.052,38.0,127.0,0.9933,3.04,0.54,9.2 +4.9,0.235,0.27,11.75,0.03,34.0,118.0,0.9954,3.07,0.5,9.4 +6.1,0.34,0.29,2.2,0.036,25.0,100.0,0.98938,3.06,0.44,11.8 +5.7,0.21,0.32,0.9,0.038,38.0,121.0,0.99074,3.24,0.46,10.6 +6.5,0.23,0.38,1.3,0.032,29.0,112.0,0.99298,3.29,0.54,9.7 +6.2,0.21,0.29,1.6,0.039,24.0,92.0,0.99114,3.27,0.5,11.2 +6.6,0.32,0.36,8.0,0.047,57.0,168.0,0.9949,3.15,0.46,9.6 +6.5,0.24,0.19,1.2,0.041,30.0,111.0,0.99254,2.99,0.46,9.4 +5.5,0.29,0.3,1.1,0.022,20.0,110.0,0.98869,3.34,0.38,12.8 +6.0,0.21,0.38,0.8,0.02,22.0,98.0,0.98941,3.26,0.32,11.8 diff --git a/data/Wine Quality/resnet_model.pth b/data/Wine Quality/resnet_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..d9bce75d03ac9cabbd84f7e928f2c70ab826dbd5 GIT binary patch literal 149454 zcmbrl2UrwK*Y67mD2RXpf(aB55EKw3&2&|RA`(=B62w4e1{7q-K@cPd1(7I<2q-}i zvoh1GjF?3*=YSbAU=G|?_xpW!pR><*?tR~Ro_^5juHRZ!y?RyuSJk*U4UrTR8#YYr zfBUG2jS!0riV6>k_6b`V7_`DD&@bB0Fk<-%yO}a#-v9cfN(2O~T(Kf3AUb4ac$BPj zWMEKaP@ru{K(wqtY>JyeT+d69MF)lX2L%R(g!2sJf!fj-ee;ZZ9iePT^y1w-`oyoR!Equ49`ctHZmKTTxqR;=`kUhwBjS%H+7 zID3t;K-y~v`!^sWMlf_xSOEP;&Ea3}FBfov}SmqWv&1#)hJ5nl3t z^U6QOFG?WKXHD`Y`G1hp1q%G3PN~rX#owe%!AK|ZOu?u>6s2f^vYSA~Ycxw?2g%2O z#ma!?QHI8b{^9@X;po57Oa!WbXlj4a#{8m<{X-iUEg0`6nBb-S547K&ssD{-D$w}D zn)nxM(l3_gAC^|MK-*0)*=zhiup(o^!`XY_6BgtbF3|ZKbA~|o4^!_i=9FJd{Xa~D zXu(uB!8EUN{{u7DFH&ImH>8=s=nvBPFXZ%JNRvNE(`dmAH-VX#);}P_W5Rs={h|Z- z?7&7x`UNZx3KW?CLH`v3b61A*{wo43en)`0z>>8l9xbq9txcKo#|kG}z;zQ)UaJ4A zmEeHs|GH1;Z%$hXFn{PgG3#gn{o8A&)GUED8%Jya+5BP7j26st6WDsG{1fx{-J1P3 zqNTv@&oIpSYZ&Z*4THlUm}9iS$xSf#pByy#mxJ^EhO-jP|I0z=|8mgfkAtp%m~PR6 z1#W_c|Ky;F;co}s|BEQWc4E4*z~hgHo_~4h^~=LWe`wy(g2irvC0;V@mH!>wOaD%o zE?D-5;PV&3_ZPwM55YfL5a1>V{QWGmgx?+n{k^@3Aox#v-e2wczuJfVX&)LbSneiR z;id4$gTDoF*xxy(g780_m49&}esKhUIFZqUC^td$?Y8Lme&}0)!nwwzF?{N(d z_}9Ho|GOjRf{b4`n{Ar;%M*!T(U|q;W@rELCMQ~u>n6ziJ-969_u%IL9bzFUV9iTJ z3)cQ-WD3^(>P_LF_C?Wx^=^XVm}J3*->$H^A=>X(rWcg_PM4*6UhJ@o3CfZM<-d)@ z21Q1(sa{awBp&s<#eb!CL1nUFBmejJvTT$8PK$!7WWlCCC*=Ni!q8Z-IayHs=hUy; z``>MTr&7U|WI+vkRwC;6B=Wbj>=P{5nk=aO)A-jM^S_)n5!59M>i?Yn-x6zJX8}lu-hicV!?8iU1wjIFGEy{?%y^bgW>x8s z72ToOc*bKCad{Z#ZSbLvbtsYnpGj!CQwAxz*JS_(6l+nojX70M`;E0a-B!b-G8 zj6uAG`V=1xAvw}Nkk$PRqT8%PK7Wgb;p>Ny9Pe&;60n$j&~=6qopPjPYfhFgb!L<^ z?J;lKTjpiP9I8V88WU=?92GpZ#4{ge;BOD6<1ZGg@U?Xtnf{ɬydOgwFiZ+|~8 zQZRf2+!1c%u|_sAy|;)wOi+Uvo)j+58I6-nvq`kT5c_s{lcUQ+ApYzl2w1rc9%%Z4 z`n?3QE`ViaO$O_v3hn`wkMQ003i|XVflR-)l_`4|gXd-R!t!TvIKl2b+Ws_#gv~|R zs^vQ~P}2u*g`W6XRuB>QxkssPsDT_eS6n@FFM7)TfX>-BBJGVwU@{#BJ9dPU99?Bx zuHcEaZX!G!FO^)q_cL&E<#e2x(rpdX) zvBC+5j#xp&qD9=hy2c=GI+{4l>!XwxFT!$*w?OrNZIah|8N?125b^Uv@U;*HB6pHY zuEna6PE`$jl2XK}KWy{sBjkPAg3R9gV5teE$oBjoT+omt)oU_H^Lh{D zTdG4wwZxNmb+Nery%{<2`4h87zZUJz)T82ea>>sHzNE|22Y>5((h3XAz|9rG{JJ zeXS$O<``?y#mCo?RM;V^&8~y0i!I1^73o9r| zRe*n%DdMNS26*f13($9aD4uq-iz<4U0cR@4kQjdrvctohOi!AICwR=nZ`?04m-7wC z;W3Nw!MA2OLhB@T;f^>lZOp{Jqu-$%D-W{gMJ!g|^p- z=YY>nXOg+_5jgaoK~p!+AySvR5f8$!Lt!pu&}T(7T8@J-Asqb_^B`7phU4DTNyJz^ zi}aM_kjfkW@LZq9tGA{Tb$K2xop&1qyLThot5YcBS;<63xDb-_r0SuE*4uvbbRY!l3Xsa^ZEiidfd^+E_c#@z7Yj_spFLBUaDC_hYZYh zCL45*LMQ%)-Y4hbT^6SJ^wfUF`RZ{ruPKj2PmLrsH>B`)X)Ur}QVM@qXF~>^hLeI1 zfus!&!$X$6VovVzX}aD4iBto}fTI&V@7Jw1FFFqB0tEyMBldTWwbyAm_U-ZCl% zA!JZqjVvA(hac;vV((pP#7&uvVTBR6Vy`wnk`#*Xe^Mj0=FLb;{xQ0j-v;H%t?0+h z8aR7g2mf?h53N`3i7`1v&(*St(8?o1}h zypW6EnNBB`GyIVF-pfp~b0oYijDhe9BP=moi`uv^hdjG9nds#of}GVdxMG|cnUj!- z^Sjo7&on1^G1nI>&6LM&yihER9D{2Xu+N>AC0QSHnpqyb4C^)=0)9t2QB!+_rbK6B zJ7BYLto(7-izt_IUg=DU58Kas^g2Sd*rpjQ0>BpLqqLx)MhW@ zFzYfkDIpDqrAa{aY;U|&ycvwPd*RZaJQA*^1@D6#pi6Tvl2^Endd_;_1^&}uTZA@q z*o?+OQRT3X7lrH3Q{>L-_l$jPCG*1MJ6b#yW3is;5PvR&Y<@3~QBb3|p4mw>3x_cvuJSee1@@uCP;gO$= zQJFkiIwTUGmy{yi%4T@}J}DR)C`O9c#FEH{RN`Zri8aRLK&`qJ2`BDtIBj3!S^$G6jbNtodje3X4(bzig;j%Y5!Bl`kj@XJ-| z+Lzg+xitj8oHGTA_c@TmvfGi*PJ8k!axt0r0;oa0C;qUi5?qu=;x5NfETw9UL&|iC zfr1#sO6`LAiUFiJtwDHppDWhp@$svyM=mzRNA#?S zaZNssc&I`6(Yd&J=NhujID-`TPQe!ax2a+0;;3-5ail)wEsVOf71{;a1ZQXLTJ8L9L(_RFgW%(rR z>?(@9_F@XxaZrEgVw_u-juVZhkSO)9Ov(oN$tEvta*62%D=bzXinF?=;`_#2%+Xs(o~&}j-`!Ue@eiRWo3bQ6OHV?F zwKZx!a2Eo!Q@~|fCaM0ILE77|p-l<{@aX0gQa>Q zBg|vb;H^BYMJ#Y?_gG>abC&8+R={O5qVQY8wItPE760_Cfm=!iNuqJ?(^*^zsfxWwdeBwQX@16E18h}-?1s-8On ze;#K;cptYjAG_4>uJV;^zuFlG6D@K_YYS>WuZ}P6HX%D~CzA^|KA@QuTbc8I8AKtP z)t&>lQQFX>Fy@;z?ho?D2A{_vqkY$yvtwG|s-rvO_i{EmJbw{c{xKWIKlUSod>uSh zIGs!rWMT>93V5?mi^R6wMj_=BQ2I}d9|Sx_H5aqV+*Ttzy<{nr?CAr^w4w_lpNtl65HMTkzbrQ zwTXTXI!;+6xAi*cq%$bwT^f;*$RTzYs6jj~CmQ5z&n$dmR3A-MEW4>~$@ z9{KUy0_~C>Et0K!2RqX>2}iIS(vG_0-EHyp5D8uTk}17~j#{`owctgbR4Q#6(lqa)@JpyP|py4m?YbR|($3t)!0=-?H;pHaw? zAnepPj+t-7CCjuqc=@SAP-L%!Qxmg6_u4Boa5f32Hm2e0TN25^I7H-{dtpb@R5E;S zIu@WOOt-HH75}3 zU-N}imnY#={R}**=nF!_l1WdmJf3`p)q@+|@%Oeo5;WHY-(S8I_v@q+X$fr<@;nSn zopUCE5=CfX`WK|~aUHtbl8bAT0&qos1n#>q4o0eXv z8B>4buhAqPyd>;}rd;&avypk(%R{=+y@xf_)ginka zKofR}kq46;Nbajd5;L1md9gYxZ+I{ii`g;bTI|WPhocI1FZcjz|rQx zq;u|0swyG}kC^loGA5<7eO`gL%69s}uzx6$*#D5RjPhVK+* z;l8CtM7&-VFG)FyI@6W#!4OI8n|c}E$9R!k=Vdt2RfDuDS>XZKERv#+pl7WlW0xq8 z-a55Wx%o}VH}M#gkh>5KowW`t2#v|3k_Sw&mD_!M*UGd3NWRz89Ev@HJe`Z#m{)Ltq-dM3$xmPF2s zzk;sMN<;eTP1NRmF8-MK5`I;ljVhn=#eS`1vd8~ad8YrN@;u#cCimZ~JpU<9Z}m8d zhTk1SnsRx_ZNh5Uux%DTHa8lV)<)vEQ4Z8X(8ezd^NH}j7MXu=4!XPI6o>D&m^`2R z3DOfKNMK+d@tJ&;8X~}W>xmxZ%t(^2vr@?xe?QWHX&d+KG;3@&y9e$JQ6u3)RY*|6 zQ6^$LTLaCCB?YOuxSK0QDhnLIFS!&}uZSU9VzXhX%nlUrpa;IBV#asG4&mn!`Q(wL z6Ta5uhrh>T9JcBp1brWgb?&brZ!Pmc#V!C2x^zHYb1&*I`w0~p;?U*375G!r$>Cf} z@IJbNXgt-#;m#^J_SPDlu|)(b^8zUTQ*ShEWj^j5mWowN70J>AVdT=8B+{ELOF|W8wPy?FeSNsisYd%Ws*K!Mc{Ga1bSq%SE{IN?Poc|yIb^0{WhbVDmZLSAdFRLMI zs4|dtGvqqK2$|wwG@{@j)AxB5V>M?I)TYdXX9qZNz<3&yzI!$FbVb41W_jv|B@G@g z)-fL{VnN?;4Q1M)g186GsPTQnV2&}5Iz-B-U1nT{>0bp-lH(!E$`Ry#YQl!a6&wgF zM-{K$Q=i0hMbtP=YB2FIQ!vUGtx(aY%xct;L-Z#q(r7yRv>+I*uJ;8q+W_z~?_ky! zYoTgY2S9sj;Nv-05O}GhL+&2PZ1Ez-!$FPp%?@c;YJd!@BNU$&fHt=RrpJfFt+pZP zs*?(mvRaO6sY-@ET#i&T&QnScORK zBYx6MudtcBXPF~&nyo+@{cIAwvQ?xBCn^8zX5y(qs!_8;$Fp;Yb-R_a1Zx2+V?Lq+Bd0U04IZ}r9bHre&bp^EE z5Gj>PRk%}+`g&T^&@%bB=i(BI0u?=F5F9`dc|Gu8`(D=9G zk4=F8N&KB0?~5L&dGN8fGZ(Q)keFG6{=)N~&Gb94xOL^gzue9fa5 zmGWrE3w(N(GmqBq;n8~8p|tY}K5eAMr)^Fzr~L?@mQ!9qXHDbNd*%7`fwO#Cr=CZz zYT(iNV|nzC5h1kNXg>YWlTRa+Q2N2?5c=u_KHajFN1qtWr;kn!p;h8onbN1t}((HsvREh^*Dd76BB?G_&GM1tvZ)<(m&5L$ud<;wEt zb31tSN!I^j{}6iX+7Q~wf=@5}8AA8j^XO0;KE3i0kG5FKqbIYR*&IF{+{^ktnNN?| z#HWkShtOh$JbHUGk5*I-q3^N%UcQ%4pPR#@pX^~dOZarc*AO~+8ILyE&!Y`D@#rP2 zJqvdH0v7Y>D-+nUJHw;1RrvIHnn$a%{g$of)5};N{33#Bhy8r|4jn>2cH-0ZTluuQ zKc7x<3ZdoLJ}Ny5rth=1A0-CU`(EhR-~D!5$txB#}=uY(KZ0WBuA5LOUmi(i4R| z`d9w*_YIu=Z^xg>e-?k7xiVbEC|%LBgHx@}-*%f(FO)*NGDyse=q z^IA98VbekGo5yj&R^7Xt$;DpnhA*E84@|sf@z^|w^7}62uBzE0RGYb%ih6pG8&bHB z`|x%)SGMRZH|oJ>ZkAuWRgm3c?#U_6qUZ;T)TM2f+-usaxd+ZrRvjOmsEJD2+`_L9 zt;AcxxvtGasGy81O zl< zGb!`Nx55`yb=>PmRJqL!L%3Jc7qrJQ1>DM)U%8JCQe5S6o4C2}5w%rYTv+OMz{+vV zJFX0Uvpv1;G`D8pw$LH&GuN>;k9&?YlX~6QBGiz-$N8|{gp(sRR8+Fukb5(94cC0s zVUF;!wpD?l0e5fD0Z#jENh)|(C0FONEBAy`i^yg}1=q^_8TV~dH21^+!|iY&+()-0 zsrkyMtn#cgMOn9$h12;bL@u0i&NgRr?w4kD?jj<~;iGaZ729-fnw~iKx~sA9+{SI( zmpd%D)fMx(E6!Zvs`TdxPi9T$rp+N7Nx2uCS_NsYYLu_2+c1-Rbxem^n&-Bg2Uk8FbZmZZlsk*F_x2bY>_$pwj7Ojw;g-an6l(eEZhs%jM3 zILDBjc=Mb(=gYyBCKW&}-brS>t0VN=Oz=o)BkLnNiNk_}IQ3CwX$n(Oo$V~Ezn}And+KiK&ba2{TBPK0T6Q>{AP1eue4P`&p zFbCucm=&)l;FEho$PHFAEqZqqR(!usI-`9-Iw6U?JF1AM*EFJ4pEu*;)-CAZB4_5+ zHz#iYm5bQN=p8m(S_b2{v$c{Fqsf!y5=h0S2!Fqzi^XT;;H>uDXyR8nSfV(Mq&8j0 z30^rkqq+?17ZsB82e;q_u7}a`6F<;~{Hn@1zYmT*6qRrl?C`adHAF&EG4UYbHg?{9SQC&>S@GhX!&M3k02Qqacqn zlDv{CBh?~1=H78nqV{bX@r^Y?K9jRZ-~s?=>l0{(q6YSSREOM*%VAfyH{SPJAE%n% zAffw&AegHI`K3dNN3S{Vv1$jy(t7;-!5cJ=yB_i{7XN<#JJ)(*!@nJWGyb#qOX>B% z4UaG5#68k!W)+j zqifX$(Sfnw(X9GBJT=7`x0jmY(swV=?)B^N*X@_^*H5N2|En#Qe>I-&dA&)>lpa}9i;HaQaH38kwu(~6+YUa#55qF(ZL5m# zs?BThBeNsydaWhC$vcPFo6JYkUAs_xd_At*vwEKom?BONCPwq<42`Vml)P*ekZt_T4(Wef7o^TB7eyzeiQR{F! zKOZZ_rs68C^;kpyJ)UXZgfAV5!bQGQv8wrF6zhKur$6b#yth;EWs@?T*+tQ3SCwP= z&$+lmMUJlQO2jMe2eHPrEG%6!njYfVfV2;eIGR%IhK)3ld;GH&2{;YB;^q-wJCkT8gJvxMKMc^0bR(D)!rL ziB*rU!Jm%|r$0JOz`ydJO#7`6@@~FQSp2L_Rf5D9>1z79zp9$tbbF z4z7H$V&+fS$G)3Op#^7}L}PhU_?&PX+ELU14IqsdSy!Qx&En)@6!}rf%;*OOhG1p~xR8oN$6W z_U6=B-2$Y$x(XdX5D%S|JhXM&M))yxA?lLXgL~O2uUy*q^6qAd_9~`xGf5BX zyQ_#qej7xkH=W`7xHsHTv<(G4lt(vX+C(vKPtlS&DkbnMy4S`N{S41!ZcRQ)$;U)74i~1N&uUfF`k|@p`s@zT zDpdz=(_k&L z1NF8`l#4+E1;s&V@|7cC?fC>L_P(If!B>a1>JZjReho zqeL6OodNvJL9}nlHAWhDGyK(=%!An_aH{7enmx6K33_yt32!W;oc%`%>zxze^!I~| z&hgn$EjY#4t9LL5ZbpNF?o!AR`cPlbG{J%VOm6*z8aU-GLptnkz-O5@7=Iy^S+uqb z^slv0onN<7ORvpBvE51#_VOk4%6}4Fy>ky#*tPAjIS-imQmx>f6be@cUNTEmo8jh{ zJfT`VF2IS~B&r_o4ZR5?4QL8O#vk>Db-HLOHKJOz|f-~~|ch=EBNilCF<0P5FsLCw+`ZoR(%qaLJ)KBOI^ zQ6j+e-W!Et61$VVzujK9=&uIt9e-sH*5)yj$(CO;%pv0!h}y7tmn~M9jqoj#iOV1 zmh5uh1K)=7 z>e@&iJ?0vZ*5LB!V3s%HE^Cwhe;Vt9)e#;Y!`hq8_E-5JkDgG?qovvQy=>b8e;zHD z&FX))&FJmy7`3tW0@j8`Cy%~zl-1t{c(f|p-(x*I`uuo4-CfC}E!k_$OL%nUP9Cko z@=wp@(T8^M=x(-laQZr1V~FR`rH(vWjIC7|5Ax`oTpm4^)%7xLy&*o2t&#Mx|7P3W zV8^MKJ*SY))({Af?%T}PDcCX1U~LoDM@hDSj^V6^-^;SIcr;Am(KfX#Gl@rUW9t&Z zEYl~FPakLPIMn^o-}}B&wo_^E{#0j9%R}moHTz4r=+s@;LO)E zEXSARa@?}E95Nn$rrl4Xo3l!yo-_8fKIec>EvMRQx0SME=%J|(g4@q}sS2@uF-P=S zku!aCYJ)=^>o{ zI8W%QY0Y^v%cDKd*~V(+cU#N2^|qXuZUvSbD{HLgyqql@z95EkWpdV`(d);y@7wqG zP=WKtcB3QXg?A$~gvaybILhL;IJK`2b4tcc5gH915h`guZ};E$$m-ScAMMLi#5j5v zN;tK8A)FRpN1@}$T;Yhg{`Pb8eGX0#zt=u|WCrJS`)4bg*(-&Lrm~#=g>jr!KlcQugo|`TgO?N2sDK&IYO&zqqT+eGo`H*v~4-> z4kfkwgh_JNyqjT_bgbIS%SI&JK58we?|o5w>agg8k=09u{@o>V{Xfryq;5dbUz%dDd+!XZpi=9N&o&R^m^rI29FZIoN2q(0c9( zs|i|J!o!(?R$5x|RtkiuB4{{9n#4F-GOd7j)x&W#>L*VAYV{r7{SM-uIlmv#VfOJX?O!ZD9r^Z@9 zdv+9ArmThC7bxMOjv1J5asUb=ys74KT%=@|i=Qn@#inEgN!lDvR{5n6w|WWk?rJdB z9&!SG%FV_<`SY-4(^VnKnNIfH%fjWG1MvQdy@qMJ(`QW2rOUW{+LuhsPX$a|g35U(Ypy19a z_}DfcH)goQF`gSXnHq)GfCbcAsS^WU67kwy2NMm~kYlGk!Cq%8myt_lqzxp=5i}cD zZBxY_4e_9Cb`usF8{xc%0=&R2oxFF5fE=9|nzs&YnQehSZ^bNp{#Je+qr-;2IpQ7LDho%)w%-hS623<=DJ6 z08d=R$DzH`ad@f_KhZL$B~nB1V>%ztZ#|61=*Z!D4Gc~)@P zqv_NaD!AkD0o>|+5Wm0TgI(w3;;Q;GY}B|G-`8lv@88wn&hRK4qw^g-|H7bK*E?|I zv*$Q$gAUI3Z^M_SjHeAoE@5ql;0?3_Z8F{gx0t=dpRT20huxC&g$1dY7cD{qhfA=y z>nyzbwl=0WDdOUpP1wDb;5P-uSTl{n7CH(T9HsE7bb_zWEyb4S@1yUm|J~mb@a&H1 z7^reAqoj#*KTg7%PP2PKR&wzdrUK``xQO33*W$;1oA7j-xj5_TG<-Bkie6$k3ri&0 z<6%e|s|wZV#t}BSHO~lNuf5mrB_r1oWPK@TgssZQ=C(Zpr-RIUmKzAY$#fxbjobSf8|@1;>mHb1e;@!&VcSBv_s`7&r57R88yj)1(SllTz2i5g$iA7HIw~8MOLkMTiw)5Q z`7w;nffTSwq=ZioYKWdzJ-W&~QZ!U&3g)vN+%nB&HGz~=M zlhBQq0(R|P8GU7c7omEQ$4PV^jr!ZB!~E&1(a+~q=!Hcy^=wZGDypsrVywkjrUg;{ zaqD1f)MWJR=sU)u>OB*5Ck67KeV|VK426QnTNuZ8$|%$z7I_>lpcI<>7`d^dU|JCu z6=yA`%A3}qkg1<2qMDBE7iYoRXQ^mvg95~EuV<1XirddV3t(VqHZ{$o2&MYi!V#u} z65Y;(`(}1j;+AX_x6c?0ZOyiM#I!%Vg2wzbnwM8 zq1b~tBFwZgdrO4OOT#hTOHS(W=1?WPXr2OYNAj4G6Vd3&v2Kj z2>aICFlwtGF*iER(ZdfK;5~X7wIk6IHpU%i4n0jmQ_qfrJF^o-`3_Y`WOR>u8luDg zCcz#y$-QUJ-Fn27o{~eFl7&p<#cVX-U}yrxO$O1d4aPVZ#+t!fZil!|T+34y~! zdZ;6K60*q&LlaguP@fN3qIVNg(U1OoYSh<9)LiXUQ0Qlvx7rd^qqzaKchYs{=j;aX zddM)H*3w9M2wQ9GE`!0+iD1$DEKs^3RO+0Mrt>!&?d97V9_xRE*xv& zexT1%8yfY|BHu66Yt4MrGG_;qJ${>7`QQK*cy=a4SWl#u`6WZx!rjbw?YE5L zw{@uHzZYEJEXlDlxqpc`$MN8st=E2)my4P{B+sIxD8dJ>PkW((aK!t}`Ritk(NX{fMbh za%~(FEf)R8!2 zm$D!}ljg8O`UeP*kdfhOT(aoq7KeQ_(t{(RbS}91K{CJ|-0-1=-c$ zk@$|%)9a_4Oa~~9U6Yyqu1%D*V;e<(-+~15tHCI>m-*4=1DeT`!AB>RT4H32_$wk| z{}N~P=6e=eh9*IYR48@fohGcmvyQTOI2ID0d}7YUErAhnIj~iG4T?Et#q3nNFWh51 z3EmtILB$s_M5l{E|EDxK9GC~$-`Aq0mlNRNek;b#{V;PkrjPQ`Yogxmj0Kg;#VF}Q z4ZJb>&UFsDB6_D)!+0hyLGH&3P}l)WD*Ip`qxd}m=^S-KuyYypw2Wbt4x4ble_N08 z+Qv}dUZyeb*K%OdUL&yTi$Dj)M~L{xZ-{0!N}yyDFGx9aOl010jB(y-gywC?LS4%9 z(2*xG$bQZS;G(z8_*-MB=1qC9=zRlqvumm_epVc%d^;N?U^}B5eT2%|(k}YqF3Utj z+M-fH0w}xb0e{sN6d4yq6}Edb3B?A;_@E6sQsBbOlOF-~6CD`2g-p zyqM_&$CkJ-gBU@ug#jwKz7$Toq$7{4pVU&$IzY4HkyL9nPLm<0#HYTM zXMo-PEF{9w5cH-NU9B!{e)K;hkInTwPnwc!X2vRGk<+8C@cJu|QQ+cGejR6SoP!idCaS>>O*^ z6pQ4Kc8iYRbLa41SfJ9;519uVLt*vtd^85{DWS6WN2rqXo2faE&vV|nBcy1X!{obQ_^{+Fb$qandeica zYD<)3RMv2qM+PrMVa~fK-L3?9WLFL<0e#d~7b9@Y=@c3ATc~ee>?y6ng~-f#B6as_ zGu3(bzG(Mo2TJ_fD=HD;zu1NX}O!J?3RYf%EOpF+ce-**E-bN zSb%uD)tJqyMR1d|O0?X6CwE@;5_E2l1XuI?DXPQeAT|231+>g@0$Z02aQwp>;T4^0 zRFUsxQGNMlRMIM7e8w$=#ZLZ|v&SK(l{R4Nu2iA=^s7SI!gXl2h6&Z``i7C+6@}QJ z<-p)+CvJsoIn`e;kAk#H;dR;+u3`QTM(NvpWYzS9T2U#9c6rS~lSbw-CUPf5^xZV% z8kY{vk~}C|yqUYLN*oN__HplTFGiJv3e1{_0+@PH8AQv=5UMF=Xe(uS<*9_o34JQq zc0EX3$wlM8bJ6|%MTkFqIQlvEJ~QBQi@7ynBH9y>!l>1zQP&za!}4kk_V=crDQDv( z(efeV!S_`m?D)D6l@DFRShv@}gQ6-lhdUmrIoW_K`%Y!xw;6TwkG8+eum?QG63N}k zW(?7G?ybkVuo36Ok=tIdDo~42+`; zPfPq!-2rWoYpOun({$mqdkK{GC?VbW*&z0sV(LbnXU2-}7Ja(39#Z7W;q%(ztp1T> zt_Rq_JX>c`-0^adFleE4ti~ZtrSp)iKoCw>hY2g9pI2si-7Mh53INy6&)^-Y8xww4^94EzywD z(7Nv_N~ED7EhD9*(2k_N_uhN*qpXa^eGjsQGBXmg$;gN@fA|0I^S#e=$9d2Btn+Va zA4$@Vhwr+SOrt5Nj2G#*jX6YVPBsc$2`1WH-}r+kmHN#dBf0ldF-^t==Szu0L~JV_ zKj;P8cj8f4w6AA%#V+!1j0ZB@>@e@36?v^M3YLvIAZ!tZ2||k4DAr5V7ToXgSs`1?J<77$eA@;g^ThExs5m?aQ<& zRzkzQAb9w)5zme)(zEJwOoY!3(T|_j;oHiMcxH^}E{&RiG8wjJmt?5Cw@VV};_ zsv3iQ1i|}P&XKRjHP~630d%rA!c)E`BB-XvY%nUqvKKrMD(wg=g@38MLk;$w$^+5N z3VJa!ndslTLZ3~#Lh7CtU~jDkt5>&agtr$2CMbfshcd}hNyp6X(^18X)7T36K;79E zu;dPoWB>X|-=S=>_D4NfYAnROSN&8wzZlt+T=u5IYUbyyZnFHzI^5{a55kl z-R~%q^Cqq6Axud}Tp3y|+<d#bV=u@jheZ=T=7HnQ`M??%?6Dc1nAb)E~^Il1~D6Ixx%8TGr zc{NT87$rW(4zPa!YP{>vBk&9NaTdumPl#Y13W-cDwf zZvnO0n!v6TCgkGox;t zZp{1JxlqvTjJvzU;Fz2)^ft|bk!Q=;KBeR2`sW1nxS4>f{GYHgs`hk=Q#h;<-T`cf z4b1bA$EP02xTvih#}B4Ku>Kp)u~8@ zw3%v5P0&($DB}=46CHrnyVG#t)g3Tp=ru*7I%M~ifP07@c|EmWu^4i<1tO!TVTkW4cpSGF55?7V%M=fiq1WZuvPBkl{Mdyu^XHMt%{yq) zbwikCuoq5`<`JK>$?Q(Kh1loznFyR%i1J+RxuiP>`|7I~TTbtafAzMzs? zy^Pe4RucW@o#|qUFj5dCfahdmI40^n`#o8j9{pB=5#^ikZ{>8;-1}>3UYROh-CzVm zX~J~i%NvrkD+8u!N#f5x^HE`05Uq5-%07#gMM?cSdSkzDkIFp}=&dV<@sSX?t0F>f z_q(viYdcB1$p$w1v^X7+7$lC*6L5uT8vb<|Gl|R{qML%%Q1nd&t}yq3hkRM=$OAp( zQ<{QDmb8%-r^8YA@omQ7Uag5u-4*idkubznuERYVGvTCd43s_I$4pU|hhl?IG&v%Z zbZqD%S$jmVK*<)=X4c}%CVs5!VDQO5E6llOh2nvwxM$3kSnMgLV48_Dlb`goC#2Hr zclqGy>fN~FM=Pz>N+$wBLZFdym*KhBM%sK^AiVV>ZS36yr~l-ERCEE#Uv4CWH_c(? zqJ{K=tvckq7h~e8BVeYa33H*>6?NsJ;mN&=Td#mK!=X{_2>l&zkP)0%2v)2|p1ZGXkU*Za)iiacyN9Sml>H_#)J zVko^V8cQT|pm5DRP_O{7U&BwbTQ_0Ibx}BFAV+*Q4G`Ts@ify`9fiHKz&Kd|j;M&B z8t(+7<6TOx=`3S|b(X;s&2$_*TMRGso#CE-Fsqy=iPoM8L_%^CzCXMUMu&{BzTSgW zJ*mV@*V!ObYyn|i0dP5^fJs%eg<-#b@-W*G-dG#JkV`IRS6Gq1cOTGjhlRMUDjbYE zZRzG(1LB}-4v|W3aNvLjXbrm11x^n&zmtub)xcZ43RLA* z(6R<~68_Bx?~X5kpw$UDT_gj_gh+5e}l4TY; z>oSzz>=xm(CqVw=V+OP> zy-B-yQgL1HTFBaXl$0HCh4mvActeiCckicTR?KB)b^Zh5XbzbALz>RbaH8fLOJQmA z2ioUX58m8-#_Zw2l(Yyu#^r1XANR+No2SF31E1-+RrPS@vNfp@$i{wU53*a&gqv%s zkz5QRk+qdDMe#5(^r-Fm@?R>Yh@C-E2ZibfW`QY?dhRai#q`{6=s4{OE?vBnUMko`#2`>FUNjk&1Ik&k8g?ap7>d`%P zTjUH(GP+GF{%)oG7w2((W;2OgAcz-~lhLtVAA%y5VyWd%cFmLdWI+&vqZ<}O_Zx52 zA+@9;L4~OAd`C35Hshqb4x<*X2y-Ky@!^_U^6=q-ZuO#dexL;l zSPyhR*UGLd)1_fw>apm}YEt&o6s9YsLriWyT;s58hxEN15}}N)p*6(%Wi}Bvt0((j zEC++}S-?ojAe)#8iCX?}m2Wet{8owIX6R#T?+23A@QR#b3ZVV9VZHp?Dlcw~WNR%qaOD6>1k9I3Gu9Ix?#XOZII-E|QEHEhSb z4t}sCYaLxEupN#c^8n$`z4XTDW@J_Kk<^Zo#27uu(aL2FT@K=9=SWmQl%Ar|yBwcnKZ}okPm!&c*M4ew#Kif7tBy0t^ev$6Nn} z;?LP5H2A?~=0_=qdzb!ZU&d?DwoTPsZpUx>n!}p6JNx3Z&=eFK{z6t~nc_9B9@hSo z#!vcjF!;6#ZtY&nuHQ1oJX}-*j(WPlzp4=JzK^gHRW+E}=nwN#=hEwGPkWqQ&5(L6 z#g7<3te!>#6C;R=a+Anjo_)~C^^j_p?5TrG1NaV42R`L&5I9!GIL^*SxkH`Q&bNsy zbC-hl4Mn)`O%jAwWuZ$FH|l?HgUH@i!pmF7uGce!mzO`%W3~|}aoGnC$VkEbiQSO6 zKcCU@szx8ddaQU=Or;O%LD{s8XeRj|;cGL%pUGLE6?=@{5G$sqQ*yC?9ft|?=QC}@ zhWwM%h0wu#OdQ%nMxL9~BZ`)wFCmMq_pgwxgZv=Crwl@CzEE$er(}wSKh^zoilCVj zx?N8}VI~oUbe*tQc_wPSb|5XSc{C?^DW3mUXgYInkluN12NynO;Pd(8bf?c6bXsgc zill3pHv@b#cY){D8AMQWj5a;ENlf3{z{7AIwn5(* z`fF0*s-`~f60D$~ql-~ntB3@>-VSHi_|w7a_2lpALE@q{on4t61G}5`Nt8+ho_)*t zHZC^f(xwXLXuSs5-(EyJ)Qh2KlM6lOmrgSFmq5@Q6%e01*0X9+HAs$`VdPF;xGd!a z2k+S75*|)Btlme|7fiuGKTUj_6;71PLJ)2h!PR>rG&}bet^bk+uguO6(G#(RH+nAq z&8~uI_A+hQlL=EC_pu2w^C4D#2W(v#fkiXcA^(R)+|&{Z&DHt9!@i|OZOS-fp#v$a zR)@{PwdAis9DQHyfU9UO)Em2EvUCmZq5^ncu7oscP7uv^6GW&%7ssCn;f*DNxUowD z8xH1R*GLSBie+i3Ko$CX7~xxG8&te6g0DY1;*vKhw0q-iB0VD&g=Wm4TfQBoeA{)f z^jtBu(d9v(n|-FDEzy|lyMeU6D1y~*vH|SmFzaat8M#q|uDLmoWL$@Z`wNkOYbBmc zEJjmxJ>vPIiyj`!CDCsWv)O&xY`WGbDv@i1v(MyW=N@%P3SW$RYYXXX$63G=kxN^a zl+vG<+|j)~8y`E(fNutw^q}x+(AVW9*1?LbOkM#N8KjxM$UQ>8HS1#5(nz{S&5zkJ z7Jxj9Q^~18Pe^zfNZ#6)f#}QG;G3F&YVegVKAugj?_VU}{Ze{7o0A}C!#AeHZ#C{r z;m(riY^*(ap8EJM1mnByCiNK=$QPK4=NG*pvW7)Cl8}m_3+-{$?`l~7y^m(ypNii_ zo$ zk9Q@n)1;%WSiLqLE;1odTDgyi-s0Y6WkvY)UpY2fPNipCHqZ#w6*%w28S-27F4b&S z!vExs^+;KJU`}W{6wI*$mx)^FYWJiP0}n{o*EH7Wd@B0xk$`e>Kb-aY89kOZ3u~6u zb2zRTb12c7oRrUFmqdlbj^qAJ{+tB5ZjUZnGFI5{Yf7_w8T_$k4IYSBr)#W2=zE2g z7{cW^OnsdN=Cd|pp`0kF$H-ut+f-^JRSkEW84|2rhZp_JpjtGEUF52XXL!}|cs<8Y zJ@Yda>eGe^eK#;~;zgSze)3FwE$SRigNL6MW5K@-kiDRtXmT}bOxqU^jYzN(W*V@u z$eJ}*jqeE!QvoTxNZi?;K+A7P0vw$S@*gT7Zhs=J`C3Di7b;;#mIobsuY{|8!@<32 zn#uJg{IEmJ2*d;v$d=DMM6|e-&EJ-Rf6uNZ55vkS|6~T{Yga?-F+pkLYiKMZ<#=8L0mKHVqfA1WW4%$bPQ?D>*E!cQ_wcWTc|h^BS7RYYJ`s zH;CM$d9?RpA!HaO!EJtN+%hJP_FL}t+C(wbMve-4)v=+LZSn*cl@AJ z3A;PNczjMKxjggPEnj-5wuA{NI`RUu_BAOL^9DC%4fI~oNN2UIW^MaYplE#}#@zz^ zJz7J{rE<{zZxX81Zh|P2dgdC}e;GXrq^cM7S(DLsH2ZQU%9iQC>l0G+fo46p{@IP` zg?f0XY74vG+6T=c7!uCxfzD;6aDmSr{hYT#vZ5DQitwRBy$Z?LE(h5XJHRQs5)JAa z;iSSrI{(;I@Y!OI{(Ixl%5W1pOjts;ivn)`KwzNY8p+$m2i1Fv=(FRu=!s4ld^<-U zFC^Q?gJHG zQvfS{_${ zQo&jvB5efTX>l~#I|ojMCxFDEEIQykkKq;cCQt2m(Q%$OT7G*L+Wj@eu4omo48KZ$ z*v!E%H*!FGo<2E!BN*b|l!Kj~GA-n@C&!gaVER@s6uEPa8e};WRh50Lef2@&uB;8k zZ5m=)(BRBUQjbVDP@bNoJEM6s}7D|JW1c|(gGhgo!BdTBaGKV{}L})KC~I* z?+U@atbgRrw;=R9w*}`4r{b`P9OOEzCV{LV3|L6wBb5j!;O6bjuoOHv*+<``nSf*R z7VG~PES;fXQHnS4Trgu<7-C~f~UySvkhLHHN z5O*!!NIKg&{p|catT`KoJlh&*$EuA)HPaWHF`gX%?hd~M3ZZjm2`GPG33nym(x%@M zR86KH&`Ju1VjHMhWGv|4=jwfP9!RE`K>^2w$Bz_ZLVr43S z4DYh1GyX>7%1hUn83Ae3t!@r>7Zl)QPQz4j&KVztonUScCqtI^78)99k4~+nxN^HK z85N#~0#*{__w5g)2cJ;ho9p249T$3PTo{L(o$>YeSLAA;G1e6~kt-?!^iNn2T;ecL z$tFD<>a1f&POQLB{u5-~Z81Dn*-z$at$>ePO<>olBzk-1LU=WcU>47R*gYJ@_T_Iw z4Y5e1Dy2mEQz1yi2&3wIGt&lc=DT3rK#o5Z>dOO9 zu}oM#S^>v5=fJa8DG2)!itL~@mPeP7$A7Z$WP>C09#n%XM=Hs=edT!bQ5eU+%%elg zrr}Y>0NTlU#(CFl99mUOGGg8E8^^a)d*p*`z$^%yx)3g1*o(LN%+UCKH6HQQ!u<4^#Y`CKaKF%v(iMJ#^vkU^S<;!Wm`AK@qbqOftI6CoJIN*<-ry|&&+?ft=`LkCV@N+WM)*OQ%&Qp83g48ADkkb~5LWW+4Ncl&OVUsG%G z=hrKwZzvn{-p<5X;~dYlWd;6}NrmsF3sFn05>$TQCd}e#80KWp-Je`^ykvvRcdbF| zFfCHQa)=2s%)yrVwa_zH(xf+a3Ce}Fb>AwRg}+q}lV8(cksJ5U(EC0W%uMy=@S1-u z2F**OSA3RGet%bd`FjS@FPp;L8cfF0_lZPjvKqSoErLK^9o(*02xjA(@bGI@-11KZ ze))0Vk!m&k`<(^rdkk>?g=%7Y+yO=Rd6^bPrb6s_HE_F7fxfm=u{CL4AoRfqRhAXw z5C2-E@5ktVjU~iJvH}L`pR+<5e@NgrJplLEV8ewh5rOEmI$U|{tYbl}Z^%ew!WmghKtkYyfO zZX-e;r_KN+%Q{G(+XD8Z+33t=D%h>rim$g>0FQPM%pCK>T&Eb;UT_%cu<0X*<;%h1iPQgpz;2}@+bPhm8VNdjqF#?&Nxfr{tphsWnER8!Q1WQbl ziPG=_l+q0)G`ke0m{<_!8>%?^&4%e1C`Uh4M{fVXHmVfF>5-;qg5yvo$*_%w9?d*z zW3EG`q)rgI(JQo${~68r)PRmX;n31t0q5Fkv7qV>{UsKGKCcBZSt%AqMfKpd{2==z zYAOhGO70=?#UK%vh-+6~A$w%2alFVKRg2%!7fX4l)vt2Qd8o`>-C#~OfA~wa-0qOX zh5|TAH_)rq3-R26QryxuPNwh;#Sm(n6P!*C#1b0=T*VeV^Hq~fn_ZA zdcsLT4Mtr%4Lv^@Bfk7cQxjh@7qrI78k=1-O8GYp(vXJh$Ldj5(g!xa5#jWw-mpR; z9KKr3C7u8ElV{bsxajN_m;`fJ5wV22k_mEOubP$VOoR-sr~l>_32z){>=ix{ z{$=yvG{;wl>#hZxf^rZ$^nv|goX*W!B~bk}m8QQ{#C!#4F*xq z3Oi4?n<`D6q?1>L(MLPTr2a!3PR>t3c0C`an4L5Acwb9O7yx10$=(=O5`v%B=l*EyIly%FDTe?s^aG^jLbNZEpWMI9C1kb=!d<;6KwL+maXt6~a^ZL;*^84;#`h2AUX%#;}mpuJNGc^^ds#gVhrC);yWt*^JN)+5_6oc9b zPd0zWZPSuGLk?r8BLSZ4F|_9}?I6W??OO~^_N36i@$&S`#+`6^tuNVR;6`VBJ=kOX z8o)8@B<;KyKqrc#X~G;12mY{zo?l*!+sF0r`MEW?zi%6;PN~I3t##lp@rr1#U5|F_ z(&6KqI%>2a8{JxG(Qy}5xVx|xD^5IQyPAaIf&Wx&u#hInkKWN8r=8G9C;{H4`Jz#m zX3u?xH-yjH41cej3rl6v@X^W|a2^texfK>vczH6ODGy`5$Bz!!2k_417>m{DX(=qab z1>P1ir(g5$(U6sZ^It3`5qC`BFT0jr)qB9|-+0f2G>GCXp?RRlQw*}lnVw^4HZjFIz@kewd>;^em(?Hre?0Did5BGg5!hqOnCgQLd zd48vYR9gju!pAI#n_@&__D+S@>ssiGNozP5%!k$6(os@J62!~DlB4O#L@#L;Y`VCB zl<2-7GLQa|fuH*5F)GETh;1bJ%ATHfNXHIaAtI`@0!;lE;r8bZP`#&;ToafM^1?sp z-VaGw+5er^1_d!~U2}2ddoFCvvBP~QT(D)wQyOkn3{zwzaS|&)L_-0lEnPyZ#+1N4 zY8EX1H6P1uis<&tdI%IZfaEk4^k5Iry8XKF>9-mz89%{DNpoka*ETe|l18_wd?No; zKeO9sr=ppB6W!O}1bY4%7!e$YLhT$Lwy+rzJsL5Xm(#=>-KRBM8(_lC9G`9(VpJC= zLCv=B^knz|)&1>?XAj*oRYl|tD0WaEhl!0CYV>+-o51TZe z(H)Z?Ny?!nkoEPZB|(MgYq|;h<&$x5`#8z04~L;oL*z*O3$jSl0b*_4;X7Rg<^F~+ z|K}-E+@=pv&1-RUyF0df|3~~!8Ii|p3pni2A59*|BRQA~^H*(wmuhPG#d-%heWZnY zd@lxZg#uL3+=~6YIe2uI2`EddqNkueqz#-Rch@YytkFjLx{VLp4oriyD|v9n>0xrX z(UCbiJpp&hxTBf38`j$BfLmSx;ZM$FedEos@L@H+{`Q^zDcXTS_ghhJUp2=MZoz`s zT2za_N^A>xVDReAcro(qkHwM6CAdAX7Wi_t zaChJ(TD3v|zTQs8)i3RE^`6}znfIBNsmY;wgbVfBBTX-D^d&wYr-C!@4qTvL3FCS- zIQPRCv9>xuU#Ov{0?BeW(#3P7hWYF$>qSvws-^vq0F`VpX9<8nNuRRxlV*1+`*+i{ykrpaf!-X3KW zdDuBS2lHi{O@GEcB>X$)gWi*=WP_>->POV#!#RuL_Pi=kQ0}AM5=9tw?-)6`zz@}x z-0;_jPEyl8oBDq!W5XJ_dZ_A8@8~+f&S$r1gV1g;>=_^*-sizIy&vSvzWJT?J*&c*>;%mIZ=(et1BQha1CVlmBOg>AL6y3 zkK}nZ(qsPr=#RRouqNdh*}t^1XJxMr321&r#@;rezhwbx2A-g*;vOJqtP1_BYOpHm z1g$)*N~4dwA@Q5jAU;vl^!)i`G>s}{Tr)mWEfs4#Hoh3H)~ir4Z1H7mh`Zzqg2B;wtiUXrvt73H$85YK8SQZ7=3 zVksZVFPkIu$*X!0a;=4A(U;^dmv8)$?={h=tper`FTL@wn*4X)kKPZw-ZQCnnf~1S zkNTWm1`^tWV0>O2#U%34d6z2udRT@>b$MBDo?I-D%!O;G(;#jZr{(`$hPyh<(8c~R z$*xqx6CV5MXVn&ZO+AJ!_~PnfRVk`2o~ar1j$Ed1%#g=*bFI;Z#} z$$RaE$y$xf@VuiW=1?vS*ca!;ZjMuzjHk< z+LR3aH|IinKo*J`Md9S(Du{aEK-Qluroqi#T%yq#;;B1Id}k!Ue;ZoaFRE4G`PUC~ z=G)?iK1Wg=tVlmf16*F-PJdZC5GE>}YKVuzj%WMWieAy4kDPDmfKVfBxmpX+fek4C zoWVywvT00E9(CWa3=V9Rge4a1|G3?BT6NTDf_C^icFR3Ckd7I>#OVi^^ zoYB*+4vjpQ6UF}1^wUv-i}=#XvNLrsnwbH;9KNY;6@zv&%pf~(A#4kHLNaIXrK4*K zdV;3wqRq5Qx|~{4_Jjj9*4vBuJTJ)Fx-@K8jlke3d{FO_iVJVnz;gQfN-alKE*#DJ zD=#Mfl7I|F zo=cc1NzyR7Sc`m+n?_H@I>UizVR*bd9}{pHY)5A&*#ig7&4`79`Uex539`A19aJJ_{Vk}z<+2K^&+QR3?W zt==q-ZIQ}sfI|?syQGxlY%9Z;M@wPjv1CXrze=*!&4q6sV`N!YI!ybo9`9$a!uJc5 zXim*8`niOgH@#}Hi}PV$8MB~Pc{%WY&>dpQak7rXmk-_H$MYWJ4F0UZN%4bhUU&u> zp44oIJpJ9n%?*PbS%WJoR?K&Yd&m#^npGZ zbftbAZXx|h7x(URCz^*>LY210=F=)a(^NhoIZ+Wnuw}=$- zh%iy_e9k{4Q>tLqVFx50p}D|qZK z09mK|q;}^7nPr1SeaIhXxG#c>3vQ4{b;ESIrwYavmeV!!l_5iW2^!#DqEcE2ZgVrC zA~giZCpyUHBX01nvIw1~yTD>od)%}m6bJST;zd0k?A|a9*C$^fybK@MIs79heK!L; z+aAx4RpE!QWIW+*jQrn3al4^3)HDX;O)i7&`{l(T>+Fa*r##5Bhmkn&^*ymTz7m{$ zGjUVN8&Wi^#%V29!#P?C4^MO2$H>=Y!e$-Z-B$t6wuHk||8%l)Q9JYE$rj@2Zh>)2 zxVPZp5G9U{MZZ^-G-Rsm{T5np+j_dK5A#CbFuA#hhX`|soqJZvEe zeLkhQHM{_h*4e#ro7bti><)NsTG zy(m^l%LxZgC6g(9q0}K`KB?QM0P-_!aWd&CRnKz8@0|YVtJxE}s^Je=7+p#XIE}(8NRS!g$nBpEy5vUs3 z28FkWXm_duO#iuEvQg@RIL=ThL3jBp zm6b9&V+NsZb$;6y^ zR8-y!JIwXzL#KKsSkjL8@hzlRR+j=V$7SAlnNJpQ*o5c$!<@DyiqneRrSs+0!D*rz z2lvl0J*f1H?lzhaSMwcl?szh><971+tEYpAhax@y(jN`iU8QHLyg~5gKQiNXF)3Yc z55mg4pxmrWwUc!)+p!jd4=KYCPa^o1f1vgWVi^C$hw$2~(4JNCFq1jl^CsMwC5{ys zM9z`XU-HaHri9owSwisGbO`m!Fl{_n!Orei!V~rR@FYJS-nllLY%*?zd8>+{>5?TB zMr*?Gtv_U&!WO!(kB>a6N`&&UwbZcX3b|Y|L|0laB^MO{R=uzQNn8M`lCztBbV=bA{!%w0W$v8Cwwr-VI`CqrLc zQO4ab*I>RIhc#Nyrw2d9(K}V`)LlrLPE+Zmjr;X6f>(_5eUA4$G%diz%364aXFDYo z-)PP35)#r}22rV8=3rBK&()K8AfZpNAY>1Ah_n+)lgXaNx<7ixmpDLmO#v<+S&AFa z`T#ul#=)m@G*MzUe33Urec2c^2(ZG}HNn`BmWPIZ5%kGguE#AiN28;CjAlhS47{oZ zpYxeu!6QSO16P2P{s6s^>5PMJiqK&Fi}a>^C$}1B!qpG;^dg2p>uX)eD-*|rhr3{A zEXRL^0JCw`r0J2@TwccbLZ&$M9k=i&611$BFjMVH;ZxpH{Pl4jHQMlu`OtfjAskL{ z^di?Yo?DFRLEEA6+%$Bk8Yk|{O;A@*46TCN$iN~W@b`SiI#*1R#y$GTNBW7S+ z6ZenSkXO3~*`eddDCkGx>fwB-Q#eOj9SMoaonS5}>%#OtbNqOfkNtKr2P9Hmsd_87 zquSRBFT~kEeD-3IuCsu3$T64DW*y)mHrN`gxnlSv|X?UkGV_*(LZmg z%*He9lKCyresm?+%-W0ICbJp6m5EHcWF35|$wj+t3vgf1jBUv^_&`YsWk@Y%oGD@} z+OnYd2utt33Bz0qj)$4#YSjWSC~D9pBG(JxhyXXYsXidJeh&$-74S338O0x4;Wmxk z&>-@j3@oSyZOv>V3H{_>*K){u-~zG2hVZB{5pR`tk%hWmXh`%(hO;3gJiX6(NHbu) zSP7)f>m+U~Ls4Z%R?qtUC@{OQo+J+Cfx_G&rZIgE24^@!cGGn-!u7BH_Z*QgKNLGA zHh}S!VUlPQ0`*g~acSuc{8P4xRsE`jw^~+0rA;E}Y2-l1i<#u#%tFk+I85IxtRf+~ z2j~qd3bmcxl>cBl7}sZmkaHc}a=%Hs1l@4S=X&biu?XkJ$iO9+zwCcY|B=8IXNZ8z zWvb}3n7D3PL+##rlh->uv4Uzdo47qCzXFuOtbPs7(ocn+i+rePT?`qE5^2GSItW?3 z6f8p?vw@%8KqD}hJG1v-lv*Z%BPaUrI-LQm{~hkf4k4a8pbKS}OA~KBgGl9P@BR$Y08DXAI>sauC|T z2qd=E(yw;;cb ze{$oYHHUSEb6GFT!A+_F?>lC*aSswv_GSgM=WQ+Ob#p<0A}dU-{<`DAfxmPRrgFUO z42*-lbkEgjQlx2y&zBt}OKdqTG`|u?e^{Vh@KlJswh`MpH)7+k5w&PCLKmTZ)Hh)k z-ZYyicU}&NnS5>+xTP31mI4#qEnDe&Q9gl?q(TzA~Lk-OJ7K1Cjf62$as&rOV zG)WuXN9v+wFmx~&ZlAD&fvqgla_ST@j-Eriy+)~}LJUg$`c3PC>M$$vBE9Ub0UI9F zaa?;j-o2TH+x0VG`@~D?H2buv6psVQsuTg^H^J?4{ErlWoWZ6pxM6CT^o^WrDnr>_ zb=Xs84@Mtr@l25>h(C$|pXttE>2ibgoR%hM++@K$ZzUM+eZ)k)-i+g7t3c$SJXywP z1=B{|u_rx;Udi)7e#Hi6&(%^mem$2Y8Ww=%*6om!>4|S#YT)Z|9>Y9K1MwM~h}u^V zdcrdgc0WqMR-bm$vOn^)k!fT;_yy+W16VB_i7hG)%@pi`+ygSB8G_xXL;tVA|*HuiE zeblJ%m@qL;U5K3p&ETQe1hGeXaCOW&(85|ejopMtxqUBI7gMOh`5bteTf*!qiKafY z6FBeiTG;o;3u1LNNdEO<)_gWspSc`lr{*e9tlvt1PdUvDx;Nt2lPyGt5k<4vd9*iA z4}-6i15x(?<*x!LFr}K#?z}~||C|CwNn%*?RRESpZ6nW{ZQ#&CE$mcZhq{q-At)z^ zeZO8DUDM)0d#MGj%XP(I!}ZkWtqi*Sxl6Z{CSyczA|&mpf{;HsIA_cn-nkti3cf#R z#`TNzniWIxZ{?G)@f;ZWQ43k`{g|iv)v&PH0;?L=o67a2;U?_`a7n0uG^g9*BMAs!Y97i-t-la?<;Z9Rg;>;wmpVCKNqIRL2 z^9NG#^C|uIqJtW}3xM9na9Hi#g4XlXASy?%O2|Buhvo5^Q14p}hawcv zp`{p8&%7jgz8q$`pa>H>SKvRPtq}Uw4krl@T5O$)X&-lkhvy%%xYq!vbppND$e_%d z=VaRfZl94%26*e2W6+{V6pwd>=FB=0HERO|IkeMorWJ_TZ(@98oC-_HAsjkLp1F

&JR{-{~pwX;p^!sw%j`FNo$>mSV4iBOd#?9`AItKx~veR-Khc^_9V(X_CZo z*(I22Ax4J_&ysC(97qJ`ml2*!!{o34{ABc!6g`l`=ckJ>Nxume4@!VhOgFV!Z9^vK zjnUEH>u{fOB^tG^1zzrd=J`k9p;KwFdeR=l%5TvM1rp;(tT7=GZDY$dMjjFPr z=t=JoSasMQETkJCitDjTUQ1!&jg9cy;|Y1q;b<*~!@+1OH!q*40R__(T)@@T9@9it z%f%#x=RB3LDyJ`2HPfTXGoUT)9ebVnfNtkB_~l^N!=+=wv$i4X z>6VCtGdRAi*c+|Kl5t%%A&ysg=wG{HDN)3JCH|| z2lKg1x4CHjR~mHQE73Hr*AF|A&OWTm1U+v9_z|%X-Cq^K%oE%WV&!Zo*-;C_*H~(9 zkOb2*me8v_PNYw2BO~~@#q@w(7yG@r6n_bvWeQ76;l=nZlE0=L6C{Gj_d8y6ZunUu zc)1K4Gi=EZtw0<*U`5)g6{x-Oh24L)(ENFY5Oubgz2-$wZifuae!qet*@^g}YYp%} zuY>vWdhof@fu2pz#xA>DDEqC8>#i*$p%caEUAqeYn@l9#T< zU!5V3DettQFGdIBeOt-ROME0UwVU~)rHA7>zL41g>D4Qr&vTx0-`91$FKW?tpKP><0&&S) zGIpo}q?ZhL6kjNZbPapbFjJd1Yd=XY>#(zYMj~8fo)y`_Y9^)!*|dJCyQX$1}j{?$%0s!A@Mg*#(PJO@^1@6>5dtZFjXK3{_S7EOXzA~ zTPKIom&NfyKnBb{90!|L#}l)4#ps%Ui)e=j&;eOJ=naEME`)oxG*C3blPqJU7;^fWU?jH`BOdOeC#^L2vpT={ z=s#&_eqIWUFGpaGP9t?Fx<-=NEv)vT8!d@jfacTWag$(Jr^W{GKZhO{5G` z>{H-ZO&m1~7z=T?XA`etCAcafk71k2V9Dxn#u8KPz^`UI{3*_%5(~G%o`5DglWBh6 zo-d|+K`cIe)JzBV{UTN7tp(q zjg)D{hvFCy&2bA_H0R!dNLuC0azS*dV9 zSszct6vJaBG5i#;9pxtn!#3u%_)ZGJ*RB>0zF&p=|2cz;&mJmu>L4}!=1f8cM#%fm zUTAr`0%jcE2$K{!l&hNp24_7;M*Tu)ymGyBPRf0f0NVnLZAT_AH0~jR5ET3~9z=S2)|_ z2jh+$BSSvXc=zT;_^(w3Z5Fcq7TV+exEj8&Nf=c}7Gma^UK)O66{J6SK_v5J=u!7% zEHE*^-wzi-Y>PNvoW*niWfOebH$d+fi=xew`CuKouk*tymIvs|#fZP^I6V6%>2FT7Bx4mB8sTnTEs*?D#j70(MfbzWe1KFs6u9pvFUQ1DWouvhf$`zKGhg#c z<9_2W^VBgf`30B0p@HJ4C8P_Kx*JHH%UAW+I2GU zyh9hcc{&}Rc!`76?%#CDvI5%mCJE$U6hhAIeN=e!63kLv0I7Lm_%&uGetH>>JALZm zMw}p7rk+Rch?}Bd?=C!0zkK_@SmMQcZbmPzF_u@EO?7sS(jH$YTr6S&Uzkr_(W9GQQmPA6;s; zDi;0thy35DY*6eC2ET>hX!cKST&W(9gW5CkOympVYq0OYt>J7i*{d1C;mrq^`Ok`O^ zA5bZ6CyfG+=mTrQp@13@9%BC0U%lk!JXe&ATMi=<`EYU10@6ZLVMJ;wmwj_NJiV?0 z)jBEoW^Fuo0M^YTHMV&;__rSG zrj=rnN*3-LvlBx7bNHS?A7srr#4kZW|3>~&JnT}2>}E{{eg+wbsl1_0w#Jw+y_J4> zl?6{$#i3-&Zz@xGll3SS!tpnyTsK+I=IH^x-Cz|}*D{7n4MWs78lax-t>WkZVi}{A z5c?+y6RL_q;gch-erE;a#ny|7m~aiAIJ6vh-%Y@fz;15IR}D0M<-*?yVQlE|Y&@YLLkrq9Np)cu zocr%9RqQJV+eOisx^;w1`kaC_2|3Vj`k9UxnZxI*0%NOBgY^69Bh;U9~nkh6m|c=~$@9Q!hSa)lS;QXi>{(N{y1$?%V&@V3h zjy*PDfK#x0Z!rYkK1uUBr@#l@o$zx;8P0xXK}0XO;PBpAkj8Xp=lQ+V=8_-A?w*Gc z`q5y}Y{rVCJaNkR6nJI03t;c~ zJ~Mjthdt~y3k5mpIxLwq2XFqdBsaqDlUYr^s4zYge}AcjqKGNj9A1Vd?A&~HUK@q; zU(@>OA84{>E*u&CNU9GN!+hQ6bWA`fgfY$dus-vb);4ix{>MQBjii~g%!P-a^;#3V@KVaGb0Q?wC2&$vOmQ+68HIj#T&(JLfH zz6kSOVoAuSLKyWIqKcC}z+`6<%k%#vk?(WSjqRi&z9+)!=3DISY~};E_#oAeCxNd2 zsPqu)h?F~ol`AR;D!B6+X=^zCfEGR5B>`QGVY$L!51FjB8n1riiR9QL{J75!q++89 z$V?uOuptI7D$3v}V|$CPxGNga&N9epqmub}^Bl2q53=}^agm-OppyO5y z>^xeH?swu~PlOGI*;f&_j(mtIwZ;E3C9pEHjU@bVL%EF-s9DDHe}=AP>#xPIDX)#3 zT&h5?&MSfyhH-E^+5{tlmO=C1dO9+6mn0-f6Seg5pm)m%d+q07!z%;K5m<%h-^B24 zKpvS?#aOVu(d2_=FnH;@faQc}7&FBZ6tb>$nog=C$A2iJh>jZPnD~I3b07{QN8=#u zY7NyGO@MiV%FyDn7`1(yh--s93Rlg=af6@8fVvVK{KDamkSQnmQG}Dodc0DSV``l-%g$wR-nv*8kAI@jDk(4I-8HGwyc*rW=LOGtVM%-me5w zc_p35#EWC#wky;ww~NGVG(h1*MfiDmJKh`Cg5O&;@LX>)eo@jOFP&x+mDcU}aZM(y zyY`X3&s_nRzg19`6+W;jS{CKY?D6hIrswUSitEPeVKmdbOm3{gJG(t-?Hob8{pcaB zb(E#;HjLS^O$N=S6Y#zJD)9Yki=(sbVQl|peznU2TCJV|r#}G^FG$066RPlRVL6WS zVQAC$gp8=kK$xi_?5WPimaPtOjO`_t92p`R%*)u5RfD?6>%k^?A+fj}PMj_r;C^ba z0HM4_95S8@t25luyrd9TPGGaw(rPLb`L(9+pAcuc9L~MCo19;|4xKo4-rTO1 z{ESF|`)?C5zI+19bva?K&U6$SYl>cTfbLPdM;e0NDTA$%WA!CeV|Fdxi4!2jCKenA zP8f2k65Znu5WTa9=(LC|a?Bxx9uq60LvbT?{>egkyE`3&L~7|8i&nbL={{`>)q~Gk zX>j&PA{6km@J5#uHdISOW%OJqUdnpL|FStnOd7kN%2TgbFX-Ca!T4zU4*I#Xm91HR za{adB@$fix>|9?6C*2$A^wo@=^w$9s*-Z1Pbqrh!_8_;iw`1dzNYLunvR{d?%aN&V-vs`W$KEvT-L} znX(>U)gGRAP)KhmRl_`pF=h|b^e4`4c<#}OIG$B2Am@epI zhkaG-?(F*hnQBzjzyi06 zH1Fad5y{#?ex9rV!=y$dg$4THAj3S+c|oXuNfY9K*+RFV8s>_}gYM8HV%y5$jDyqQ zoNz6ybZnrP&hLVwb=Rrc`x11sj>eslkLlDarBI+X9!5PffSE#};#g~E@KqPI4K2XG z)&2C%7a7bxo(sNmmGrTr3e~(6M2<{L1gCxf_!kB9z+BoJQ)|mGG1rNf`%Q-M{N?aS z<`cXD4UOqwm^OB z0_ZvcyvEc<+O52gyBDnq66e$~=qr0a9j>Ia_0JN2zYlC&Jwj_+RY~HlEBv_4yCIm( z8cn4Y;QYSSIQiFlmJb-q*k)VV4q`Ld-F-@L=Bz=}brV42UL;9qQ$^RuQ?T-02Gp^x zohxrmiLRPBCsP`WmHLwK=T|OHvX!TCv>3hIilAntIh0P1;S>VO z!CPw#N=+LN7Z_`^d2%#pPtb!Zxz}WxVLpGT*Ms@2_1Ohc9b`TZ)AFglT=k1a`YO{D z?>D8BzbsR~zsv$;?Rlzw+!R*F*YHmIrO@)T8lzi}kgkjj+!`vPO&xDOb0$8cy1GiG=pzeYsd}7v3Pac~8u6K&`lBlPGvX0VgO%tn zVZBfW>qxuH6M6-)>W4Ls#1hSjg#SC)GVBKTbqN#gKXbvNG{grlU-a1c{b2Q2HvQF zg@7UsNF~7HE0?Gp(_4h!tpinCTl5{SLydF+I#;*~A{9b$o@pxCR33~*nbNT1xD(ia zTE=#rCCsb31uLx=V~M6HZd8;;J0Df1GZ&F6wlkGjB1rUv*sjbn9R6w)@w1=jLDI3U zARo=);=_p`W{{0*MvKwYV*~hP&c-oe0r>Qw8!ogf+B2j7O`mbBs?r z8B5uB^Lp(R^pA0Y`M3lXR{`FBEdZy5N=V&KS$uB06)rAIBx6RV!}s?lnA?7k=$IWM zkIq=Y47CE#g*BjEUJZhg6Y%A}46N8ak+fHx@5o;0NBe&y;ce@1{;673=h023Fz@*s zP`qJBoce+wXmlQR@r?tydBwQphNh9yCLuhz)(hTUXrXr+Jkh7oj#?!78sEOQ3UrU^ z!{1%^{AM+2;T?Q&~X{XM$wcWu}nsz4;^4E-}tt2@O+eqCkG{n)Wt;f z4O)v^+$wRAtU3gKilz2NZ%C@g6!QAeMN&LI5p}M=q#f59>4YC!@NK;W^ko~s$J2l5 z;f1SFg`17u{w(h!?10xr_tS~qW+ZC5GCoO8L5qFL@GB+`5+@XJ%^TFI{(%YDTl<8} znRu1U_n3<3&Uu2V2EZcgU}z3z+yGyK*V_u9tVyWzgi;~?_uC$#i(Rq1PzfWQ!%4z3 zKbX^GPqY)6FJo=0X23OfVPfYuvLzOiF+>? zU%WpPo+mN}z0_lFdq@vScAgEje?F1fwyS}+Eac)FE)s3$dYo`^8os?K1?67P$Uw8E?t1nJ*B0OGA<;@Q+<&A$_7c z4PpEBNKHWq*7iiht?V9}u@2h1-1q?>7ns(~JPLNPbZqQ6P#z7&jZXXN>v!w$@y}fsLOU#@G2O5$wr3rAhE(bMFrqbuj2dUD<3iLXa3@x*Z zaoaT^w7fkN`t3>~+2Sa5D2Rv7#Kg|sdU>cbwg@HLp?Z;hXj!g38*C-s5g32|IJ9Eu;8UZKm1-;20|79FRbx@n^{U7+Y96+Yih&K6fU(s(}}Kx1hbDG-%~z!MEHV?qtAX zuw!2SJy)XfVdq+qQ&WdFrUkV5&V@?BrO=UbgFHE|2~$^<;keZeSdQu-JDcTCeFmuB zn+OoOJ%`$+%!6?YKJY>YjGdM$g&U(2a7vdJq!|CABCGbo#s}Mo-UD&GSz88~LshV~ zdM5T6O@xETu8_HhZ1GjC5HX0!!RDZ7?sif#j%i{!te0w_@i`Wz=_V0xOGzAfVFq0b zrD2BUT*hOZNpEi}1M&AZxWA8e2zc!$v-f4fpu02k-FDC!50f$5o)uFV)}mbA1az@g zhi&UD@o}va{@tL3+uBrNb*&`c>MNr!rzO!F?Ek#1I}se;IdNrXhJ0}TVcw``JjB;{ z!qxX#q%m?87|6HNbJkA0>GN#-tUV2)4yxkw0Uz4^qZYdcA9G>`J8Z3n*E+hHm)M zXl(Ca0ry_K>3nd}587f%Kzws9Wc13xkG+hCU|S3aGCBI_Aj@O!7GoV^2WXZ01Xv|w zhHqrY;)D(E#*v-sq(<>GcQeO^3QBw+6UH^dQRC;dI)6SanVw8sj@rV_eMw+p_nkiz zyor9h%kETr%IW&CGRR$Pr?W%NVXxqMvNvZYmOjyj)j0FB4(AIRo6aRGwCD z-va6Do!I?#0cfAJragQvH<-2zqOZpj$tANHFG(Fny~o3lum;?2v_kKRjDzdNvN`_} z;Dzlx{A9wuzj|gQXHy!9|obKiXi*uD$zN*5FWhpz_U{$p*t#tNK(Bp*!KKcO3bd7)~7}#>2|r=#`(vUW)gjuA@& z>wSx$ZEGDo7?r|%r$W&Eo+YTel|c6Sr8w}w9MkWW^X{tYU?p!%%j(L}?3fUKII{>p zzlZ`$?`>?h+X_ZYKav0Lo#788HPO@tW4L<41SOg8pLXSf_21t#JV}I1XY=a^rbmW; zvm+|bdB!-|K4Tr$c_ra`Z*Q0$uo6Bf zMdRditWRW>6x68BhZ%XMV6Ze0bazfep`ArUI6o7P3uWMoISz1rOeGO;E#g9s)WbdD zF<4VoO&5O%K}D8heijpqpYCTvgYq^EWtk^e5jBjo&W51ttI^EP9sQO5qa72Nzr(Qt zp7(ws4}a_9nQ`oAtsYRP&*sEmOcZUOT0w01EHt=to$RmL4Cl<)`Q#tX@?n*D@J9ss z7r6&UY*%6X9Zfpgvka`H_rMG91@P<2Rbu5EPHsgW;!LVD;c8+gcg#f)0!+&Jzf*UC z(nupZ$X%sFIc4}XtD3AjnL?k^r$Ic{P>ha~Us%^)PuQLs8gr=xS#9@xUx1d~h=D&Q%AwbL(N%CBT17?^}0%D_)zr z6NLD5^ts`Ri5dCuvq}$!{%!j7p8NY|8Zu)O+v{^S0`O(d*eT z)z1buyXN7l))6Xu>=JqCR{}$lqA+v0Et|3Kl5p1&YF+3_U6@vGGnh(l*eA0;vxYfV z>p{b@hYa|Sp(Af+pzKOh{C!sz&)LMmCpIIIEF~07PJn}E9rT$lPc7zMBj?7&qtH}> z#~zQTf+lzAlsr|)39qFWAF=Kx>r8CTtbm_vR=<|rggL2pRvLhMq zd~C$GDhuhIVnu297SWfE0^X^7FY0RC?ILZOtA| z?1>}s&;3D1I6wZd<6MB!%e?xAyY%vr24eNlk@quy&K2*^qCy3##`b zo5FYE@QGMDbmS@*+_DN9UmPRi+s5%rZkFSVuTycsP!oCH)JN58mSIyw3;Di02o5e) zBeD;-0l%4bN$z&y1gh&HI{Pu>X)u4i6!Q&|5a|yLQHxS}cyi zo-k!{;++r->HH#Tr}E&3k|n;|o&`4+XVPlxlk`B$OiYnlg#ioV3GGd%Hp@N8q>EK} zxBd<_=+^>yX~v9|E&}6&t8s=P!B@N8K)W*)44NiE(hY66{A>djE!j-k7k%L0&S$&a z%_+oc-58L1mj~670tgcghK0d%-&v#d!re9pxyxk#_6Dlp*fDqbkLa4>CiBw z3ei78u!*tgjvq@v9f=g26Md3ee(2#{N0p&;H=FIQSCgXyp^$l8g2rYH@Z~3};elZ? z=kVPRDC4Ml7Tdzx{Qy>hG4LdX#l%)sAP5JMt8%LJS6&HJiuKZtesNrJY$XxVh{K%d zWDMWr3~wZI2x=#jf5mg?h0nUs%m1Q&s#nRyh!Hb5O zc(Ztziufu}@0E;|UOf+I3a>z=X)ScRH-X6vUMgHs#5kan!6Vln>{h%q9)H6Mr$$^S zhLKtD*W(dYQmF4d^)Lse6m~(eM?B_tPQcIW-Wk=go{g8M<2shj*Tqe2PB4G57;}$1 zkfI-@`1zAQaY%9mEBEE>`PxRMuiEjR?t1(cqj;he6ay9OPtfCx(|kWO4lhUnj^Dop zUw$Yfd*^tOBI`0Za%z|)@?ZJsLE~_I)--aocRHp?E0bFWX*AE>0S6`?;BIo?iG52P zoxYzpu4nqfBB9sxgFz)$?QDRe#zgpbF&$f+<58k56}2y#K+|16QW2Dg(rb>;^e5Y4 z)98tltD}o>Q0f;+KGOu5>k8p7p5uB(&roYBL}<(&$h2cSg~Q3%7`_l@GPa{L>o;N! zCk$Nf&GH?yK$&ICj%hTLLhohJav_EER5EY&`tu}8ob|SzT>>8N@2Fz*LFbX#$)&pYwAsgl@At@rNxg5$c*An|dZw9AdHa~If>ikZ#RZ@2W7<+^Cj8i;jl7pX zJvPGF%J=T`>SJ^8wyOa<-zyOrQ~2LTBRDldkNypDr594gNmpAGZ)cZ8KThbOHd*6H z-`q@oZM#18{#!-mXSI-jC%#d+ep7zW{UuZ>cO#sO8Vl2w%YyDxr;}~pm*cF3uAI0{ zB7TVl&SzUCT-&pp9tbUkAGs?razX^0>-fhyh2`)asm57rCo|5+6i^Z@1*vt>IAkG) zK{y87=GM`Wml=rP`e@jL+hlD;4cZ?2K(GC0%FZ5Ro;_^c)kuNizHrsd~pHrs{U#?B=w`K4O~L=t>#({%kDl0{ ziB3-S_~(x%OtBP3?sGw>$HP<_n80`-MwjS|z;*c9K9%gSDgle2EyGwD{z)hfRSIR#KbX{dz^5V+J9UQdwYZ6 z;)!TDrJavQPY!Ud9~PsY<#J4pj>nb!SPY)S{QS!~IC)?lz8DitS8ox8!cki=Yz~8U zc1rlySOks6&4#Tkn+*zdjz>Q!`>hqhC)fCb1k-e4j*_GIE|IjNmp2H~?O6M6O= zJd#@gA=!B(HK7nUt~LRqzT32aY#PmMl?Tz^?Ly^2d;+?N zy5LuS_}X6BAe)56IU8Z5>^fgMZ5;RY;0N+~_b*z%+TYmZ=~ggN`b)GK*D`E48UD_3 zW__ROwCYR>1l0A@ZqI|nNbLtPWjlY7lyMRLIQ&?3skSEA2x=5;-9OQOFqhDZB4 zbW2Euz*ky$*4Y=o&i8@X9}%D#!}=5N)PcmN5UzCmc4)J<#h$52*s=RL_tL-zi!Gas z&S&-T&Rv0|#%n*d+SS4L++p75m@&A}XBzzOV%=V=a^Q^DcCveD2eq8E7aF1*Ipcx4 z&W-)5++s^B+^0>kY==7WJbdZ4OkY_Ofvl34>;@6g;={8F|7w-EX~VC0m+! zdPBdDOFTkeZsQz@8O0@V$4KpM32C5nHhh zu6TPu_p>?7w!-*(UN1?^_d_I0xd!!Q!|0``E;sG(Q<`mr0;%Ss7j% zKNfFaC?s<(y`a-pv%ZX!KH@iFlstJ}0(-~Q;|9+}DlPJZ8b7&D--L^B8?O~XLA@Nk zC7cDqQ|6Q4`I?ww_?-V2tOmE&nu6=-b-MPM5q(mSgp%&cMTgWHjLxR?ObF|$)h@wT2|5s={M&e)y(9hg zc_*}bmZHASJtFC|9lTre$w^Ti_&DwbeWO-}wkwMuYg!%4@{8ist^&v#Qox0EJE+X5 znV7tI2N6H;hNdrmPx>Tk;dNI8ZkW9aBKbyY^Q;C>&6a}I(r#EPX#p*Z{Atl8bKE?E z-7AEXh|K0HoPSsg4r-4Qty38+Z=?x-4#gmO{F)o9)lC<_@`moODX25=Ivwt3dPe3O zKKs*WuG88YRWq!Ji+wwX66&}@zLd1qhQO+e8nCLSpB!>zo`;9QXqjaWhuTfZd3v5J zSJr@G)@iy!pcXIa__1fG81q8y;ftPw%Ja`?;D$-sK0^JTW?Amp2dUBjCA-Y z@R8^U#^SWVQut3I4;89wap?n3k~Z@?74~D={`M%q7#Y&1wgAijJ|`Pe1K_>#3qI)O zRuGK0g0W8mNZutqbTbJ<4=<%on-jHE;Z`p8ttvwPv=crJduS}fJh|hRq+z<+UbjgM3?B6E-|)a|8>QuFY8avq)*eox-rmM6Za z*^br85Ov-2h~60i6i7No!X_8P`M0SU^xroUkmQK=J8zPCb|ti=OALM&ZDpQ8AMouq zgx%--aEI1L6v#4wIL8<~ul9!&hRniC^Kwu-zn2{RG7k5t+vBJHt7NCRGCaK!2VM7S z;o{w4($&{SWn5O#B9ll6DcOU&%K~BBD@6$S{+T3hzeDB=C1VfUxi@@GV;S>##`_-2 z;c3OK5IMS)zP&x3yTh1L+P>Rh;9MbE3OPXimGRJBwTK+vR)bm+l6d3EW4=c*7;orY zq|GL!G_U+BwaZ)&(Q}GG!9)hPe^Y~~jwa5~GZ?-b=U|49I`01#Lp=sd;n=Y_XepwM zRd0opUMZsBMHzy=HsQ;A^$et*K{}mge#~}|QX5Pq^MH#`h&81)$ zB7nnuG3@r42x{ud;QsL(`JuLwP8Cxl=;qHw9tp>NUXQ5E-|gV^UnDu?UO}uArV_!T zT=KKT56u>YQTK{+y7qcM{rP@5ZvWcPF9_Vre^R+iM~ACuFFvGd8_g-5xChNm6XAlS z1}=`#2E&=TD0DB8f9R?}AHL4Q4U9M9eJ}*$Uc4u7)$CxbwHZxV=t=CIX5&54x1{@Z z8Q#>c1}pPU-h2$>*?Fgcgl9ZvAJN3{u#KeuO&#Wkq!X=tF=~(CXLa;Qkge95|0A&_rPDhLj3r)k5=kQ;qsAn#+Oeh;RVeN&_0re zI$9c-Z8i}d7?xf$g-7a{c=nwl{%T5w`T=RM zNh-q6T@Q?B&fJ9t>vm)0RArpBt{!kFV>U#u1BbiJ)9~mV|6L>xu4!h{o|Jp^sGAg8 zP27a5re7z!x(8>jB{gMu+nmDn zjhv+m_sxT!ug1ZBJx?^az`^Y^rZCe@hZgY}IJj2=RtGMDJvVDGlon&^Hw|h`voIl8 z1&0}%<4%Mh@f5Lw7S}?e8C(PLC&xnA^ovCEaRl@`2x6Z=8W__boX4jkc;;`82aUXN z!Ot)V+-{67?e~!Ko2Br!eJlACxgNcbF2!wZ^V$2%1E&P&z(~C<$v^vUu=&SymP5BV?DpSN8irCnXzOHLE~HFblW0AE8p0?^5|LC)oER z7XSFDppse@Zj-q}yW<4mtzsECK2L}7GxyOKqZ+s`GlPf^uZBx*Ec?+D02bL6{L{w? z=wY@9rFL4v^k1oXZej>l?q7y4)a&S(Q7Jg*z77(Dx5Kx4l5l6%E|RO(Laluq(DuF( z&bb-G&$LN{g0{Iu_tV?k3&@9*dwjg7 zDrV_Fpp{1%L+bum_~uqj>dT+d?7NYS#XZD5U7!d3(y!<;qYTVo%qtVeVVW^^3A}A2 zAf`V+njT&yAv$aE=7ny0?$j`+&CcL;DutLmD;TNJS>pLjk|unNr+O0RV6v6XbI(n1 zdR+tc5_gA{Uv`s8skxZ*#|LU9Suca=ZrJNRLZYg8a(6NF{~8Pv=kf|1l=wng>WV=~ zSrE5+Z-aLB8TUI^)8OW##>IU~R5~mdgigzV-zqN%Fk?MOLE?Dbei#0CFPZqLi=s-@ zemSrs;dxyTA+)s}ry`ut4)Nqn$64pIULE(=%G+H_d zngE@#3r9!%xb1;@XDu~aD#IPs%6Ms#*hip-0jED>Ms9Ly>7n;#ZT>E3- z(aw2bd3i0ck(pf8$>%ed8KdVrd6e9Li#UoU(xu}I!D@^XHY{HU-b-B|aDp4mUBNo( zW&=u(ECflb8kAAdMvw1-G&wZ^K4;X@bBxJ}`+n@{2K0K%Rcp{E@t_4GZ zKIk#79!|2H)GRv%&aB6Xj7*!0UU`Ki?Q9s@xiTH=@g6w8wSzb$20_)rRUk6I9FBcI z%9zW+I8U<(O_{zZViODRpVfjvrx(h0l|YTiTdFx{A2Ge_2?;m%;?2Pd)c&{@+^3YF z$Yw3rl3)dHw?n{AZ~-?YS4@jz=9A5liz&RWLe1f7vYzEL|HTKw!k?2db-D|6pAm-w zG>^^E#-RKr6GGoGzo^PG%spQPFZ&JXL9;xt*{Mk4ZEPW!lf}21ru@tbI>{cf>=xm@cg>EW?3vrl}o|=acRk(DofcuzzX{49>O2 zac?eA%ZGbV+4dmm$=Co-Rj<=h)u-HAA)fznGaVh$Yd~v!A$fJ77zDzzF!&nMkHX#5 zp!ODVSlLL|iURh;C&J_EXmD|DguZZRh?7|fi^Bcz$+JFARO~Vxs+>sgtyLl>b~$99 z(IQMt^`dfuayX=$O(z^&j*aKEFrp&_XAy67*Luc(wok)^FNJ8<9SZHt4-hlgi{*Q| zIc3jztn(@oHoxDEi>A(iWhYldb$L8^h3BHKasel&ZbV~tY{QI{wIG(7L6s-3hfPoC z;oh|qF}(RLSs1pE_^&aAjqc3?`DI=N(JhF zS`3}frGeT3Q(`+-nf!Q2P`#pqUKdNmHMM4pAIvz>QSSJAKL?iP?AfccgHIomVcSp| zm^{+~^$%8j^uRHi&UW7KMpU5vcmaK87C_|OvT4JhJM7uo&;1(R2&3mVL+rC##Aw<_ z9){#$@+A}eE8+`9vU{L<;S|!=@`IoMuZ#>Ze%)Q;LXxhjjD}7XbSSzS+X4+be;mk1 zPoZ358aan>CyKzt=M1%(AI_VVi1TJ9UulftJ6dpm9Gb6?z;COklZnUwlGK-RM58DM z?2j*?mKjIM3YmBex$O)$-Q!^V^AW;La=~})tT*jgA|7SA{td%B82?EDq9299zI0d6 z=iKPyCOMFrt%TdSWIP$KPp>qay+gV=xAGe-fD|99-#30_cS_7 z6w2OLQ1jo4#6rLpvRYEP)JR{r^i~RwM~s2Z=?}O`8M{zpK$Qyp6#x;zrFigl8Wg|P z!#(o4Ft$aKq_Vxlh}t&Dtk0vC0uD4nzm>keS&va21kA36(tO(xQ2tV`8iiHWF&n1z4xBHTSbB2crh_QI$`|8BaGt{a!n`urC{B=VoBU z+ca<}9Sie4Y;c990`1MTg%uy>!Ih({`M-B1apsrXbl+N}XN{e)V9Q#TiwlH2eP=@B zZjc}EgRsLpn7F*og(Pilxaa<}Gg@jAif5F;1;q(C=j;i(U)$NZY(7u_q|5Nt3-+;) zlL5wyMsync3P9svCF-AlPktZNA!-Be7*!VlWr;U>*B_XF903HnjB(z)-#cwK*o+x$naMPn(3Rj`3UK`2GRfTD1&eMJmE%-0a z8?+zh;Sm*Q)H3MiPS(%Ee|~OkmdJ*`S%52>3`tjL0~h%_2^!n>;?+BSByF`XW(22T z?V=o5a>WoHl{KJBtr@s2_Q%?wY_R<4N`_1NX}s`#&iS_o9r-td4^8*R>mzgF{F@CZ zc~GC5q`et)-x=b7{8P^Fy9lwFk&829yU48P7f9yDNfFeU(=ZBzUM-_4gPFKn@G4pHG9F!{9N>>^EV=xvi`X5?z>Z7D$nfb_e$CT# zx@F8YVk@4Hva`bRv`RMWc#hEWE@_CPO!IwHL_%#x>FVxQWAB5%$&kYo{PwvL-Dbwo z{)`{=?NSf$bQvO7i?qmwvvROpEeDUbtDp|!fsAzIPzT2icqY!d!+Cfjnm_3y0io&G z`Ds4xW$zOEstwTcONfY!orVeN>FBvI2(~UfZEP04jV>nwv~jS8TKDOKY?uo!9pAwh zeQ^idn}xK{Oc%r|s>!8mX_(SGm5g87&U?LZ#DH(|_)Ihj=y^-fmMdbN^5sy^&hwAt z3C%ZDfSs;7c;M1b;BVwZu5SXAiLQa=S|hY?t_TKbzoChac_@}y4!u|Aqe^#Zr=j6d zn&feSG%2K@fr1tn=9&a9dT;5(xJa`6-&mOJoCv=*S7V^(B>Za@hG*SEu>Pw#>Kqs# z^Rjlr;`ygJwYcw`M9m}K*Q$*1t(mX(^H1_EHxsM(R}=S24`_^l!o_&ACu#()AaAIN)WxX2CkOn!eq@@zvowBKyfM#rDdYR z);MS=)kJ-T)$nqrAP8Kk0=sK7aPhpYBr`4re-6f>>RiSkIlTnt`5mSAEStF_dNB|> zQwLr-Tp|lxv~j{zD;T$<6qfrHqT}6U8mk+LMvYbU$iHkNFu#Pcnm3@K^=Is%ZIeINV1P(ok?}UmJ9No4KRnOYMTe~T+d$R&Yh=+oTz7ojHl?16VEu6t|MKBJ} zqOghbYJpG)>i#@EVT5gFN=e&_cGc;(gebl>M(*XQ$o z$6Z*$>z~>~m(0(DKH*3-`ydH2rXo;}B{0Q9o+>EF;p)f+*g0+^W}mYbWIl7mE-QBD z_x6MwgK20wa*p>;ci>|aDzJKX4ZQA`fOSIYg0YV>p?O#n{yR1f>{-54U+4*~u}Q*? zftN&A;|;&1Y&s;@MWX(&EUEk$37ai^$hkpZT)F%P`DxJzdV1&ROc_t=m^2UWu-T9M z+&`o`fMtwt1%l_88aQcFh*pD3VE%LVZ`r*;eOMS)CO;=f?{A0i4|NECtq}ZoY@u=E z4B*`^#*LRs;XD<#0Y_b7LdXx|eD)ocdCD>@e&2~k00D<1<+$v4ApiZc68GGyhpHuLL)lWW$DMPADf*14&WE zqHY3#D=-SFc@YD2g71$?p`bSGAx8j z`cFw*W+Og$t&XOXvT00SJb5}W8M?Xx$^2PUP^T~vZZ2I47w2<0S$8+LT&oOR2Cvec z8O<2EFc=nUiolubX`p*nohZ-qp_B4U=`y475Tsc{pN&;TBG$ow6uU)glv`Nt$IIN? zemtloFU0f19x>^w6#Ras|D@IUJErIO^{W0j4 zBlg^%0s~M_RhJdOv!z+|wNoROx0|BHl(l?!p(*;Vn2M_;RKTWj3|MJ*2@a2o$B%px zF0e}By<)2H_=OrWyL*%%-)<=>aQ?yTzH@-s*qP{V%VsPdN9e|KN!aC+hyS(ABG0#M zgN-RRAhCHI^z~k)j(1zhj4RottVqc9O?05Z+x=v zA~EtdAtr}>;J>hWaBo^BN`N8F*^`DB`ZFNHR)##@=7~Wk3ZTzz72~~TQJd+ypcHUc zuzGho|7ny2Y}0?lTRj!S&-y0_$?c~Zl?#Bi#X+V}1xM)|BlE`u6L6aX(yPolp=*n| z{r>fAKa!2rQM1XwmS)^5WkkI;Si*#tN|-)mM$LQEV9lS0v=YX3t>o3Tn?U4r1KyF(VvY}OQhi_o&X929p4VMCmbPaDl<%8Pf332j z3W{dzEc=tT?^+4VuVq7o@9E+g33DHU^!?7edItrTF({4UC`H#5eg&$DGZ%#N@v;IJwatX5utd zX=@{~sUqMx(nu3usgt*7s^O38K6*7S1`hY-Vo~E)nwnBUy#M~8$9{z4v$ZaC<`yOF zR{Tpg2B|?t;buHv#k^{93&7XBngBz;WhY+Rbu8?EjIz zUx)>{kP zUmh(IRP_{M-~7=~=4OM>ey0hFj?IQS$@AdsQ9F3-Rc&sNVoXakGX=LY>d@Eb8%=Kw zhnp-j+`gw6=L_#3&#qR$jb+<0c;+~&Utx-mGCcVzXIUIl&w*5*8L)p}2CZNnua`fj zqM_LY`cu7_q$`Qy932O|s4YPqMAEQRYddkMC`7lWd9ul^;e2uD!_VJhtstA(rlE#Mccx#C-)0zCLR9kUbbpyA|n$UJeL9&rXxH{jcx6CX`&Upu>ssZiE&YTGH}?+izZu6 zb8AJdG2yj7q%nu=?vEMF9ae=I&Uv78MFrNcSdRt5yXf!gIFPKpOmI#m+&;XRp1w1h znlpBqymKml`%MmhcPOIGiV&F1sB*tI1LA{$vQ; zbeeFnby$8EuxDX1^c36B{$(|=Jh>j@_Zpz6W;DD}t^lpe<>>w59^36U;)P2=xM;Ql z?A)Y@!wY+Phf!Z?z&pm~wk|`9W=ZJQTLQD+Mp2Q_bU1rpE50&nr<-r)VbP3o2)_|d z+&D|_G&^&Ci7&xumIIbstpM!{H_-T-`?!nb2x4E|#- zv#l0zNn9GYdT&Rm0;I)x!YEN)0ZPA=$S6A>3@(kp=U=Db?DMVx<;YwDTgP41W+=Pvj@CaD9{V25H79g?Y~lCv-K53O@> z=8iHFe2FoXkEcLl_g*--yOoR_>7g~|Pw2Si8DLjnLeHOO=Z&^lT7GhgxcE2^ejj0bf4BeW|9_YJ(hOl*N+Ufw z`x{+Vm%{ERuV_VsFq{l2L}eWfFzd}lx1G&!;N%8eSHc*@FTV5DT6(bmsTsBYHyI6T z^XYEu7v%Z}Ly$XM0o4^(xLr}7iMzB1@ZG}bC;x;_98iGwlP}Tym>%MqI}yY;I#E*7 zLIrEs{pb8d@N3?S#~Fi^JCH=P95_7uAQe)#C4#5dayYsuAAb78!QEi?Ui^N8>vzfm z?aoo`o~Q+3ae6qU`Gow@szABFke*k7j;ET?j*+L= zYOTjpSC`?xD-QT%MFHG;WXjq=!R)i!48Dx9uFCpE$GlB(yTKXa+F1Y}D;%-LDH5$e zy&+evSL3~nDzG5U8IG&WYD!1hwFti~V$~L^@M2qt9H{&8F8UN%wHVEO5E4K7Z59_wsD}uazJx-}pg78l@*qhDv zpIPhSXzF5GUb7Bz-y}k8Yd<~xWRLmYOKc9e?gABCI6|V!DD60y0>j<2;aZ#nbO$BC z#=nQT=cnxW@WTnz=~g8)#%_S+V-wK&J5TQFcC&L)7VIByfm74BqiR$MA)nV`fn)(F zOs|A5cK&Eu;6quKgYJp1z{*jvm|IZ+-=~DZ6O|$AJo6{H8J9;NeX=1Xt)0By_1DBr z$Pk5o=E3)Z%@i{lSr=|DUb(P|MmPt8#_39wS1KYO#!ZC&zGVI~^Q^zkU4e_6)ju~8Wz^@lqvRwi5_QW=#oUSB_49_GH79x0DR}I}6dq3u9CbUh>ggH;vU`K%$q<^f3 zQ%^==UE@D8J(2CvZPTGMXF68O7UG*y8$1wDkJY{fkkMO$G$acTob4fgYG$x!_9Wc% zSrw!PWay-;>+yz|GMqNogCnMu7&5jHL(fj2o(Gaq33c!;gD&$)70Z72PcgpSm8+uL-%wqF`x?*e|CI}xspa#2+O=#sj*WRr$863Z z$G2-@^nwyNFL8-nWpj;PF`4*%j0sx1i}Ss+FVQW7-gxzuFb4KzqOoZqG?mSPv#cXJ z;nf=aKCuuUi6w%v+)kpZERMMmiXduvn)nC{Ve~)-nXon)!h@1QzdHxdjT$0-w>x?5 zIdbr@K?hqphN;`Tc(|kL4fdWxJA5f5Y1d+Sm z(9b$AB?hHf{g^S*KmTJLiG!rNiSah4)Z(o10njE}kCT^OAv?}*gYpNgUo~Qk*~7hD zyzOSlnN*H?ADw99$21)0QjXc%weW790^Yvc42M@$^8;`G(g${vxo@*4z>Cinu*+%` zxndLtvzni9KS#U5;P*mwwn+qw&H<`_eGFqvc;KYTVi4F@04Jk#Ks{NXWg0Y5(k&K$ zWE7cS3qDCTHgn(+&`nOP9%fFb5WE&M33!VU`ux8ZZgo~Wwbmty+210U36MkPIiI&4P zaLFzJd>~V*5v2vQ~z6J{H)Ny@hDX6T}g1PtGXmupgis{XanKlXrB0aHs ziWAi5v)Ss*qk^6OU+7}pIuz1QgU#Wo@RQ|~79}OqU&?>zuhoyJhOsltw`N0VaT?r} z%AvtSPsn*aXDViYk=&{9A-|+E@knnXTm(0`OiE9$&f5sr(U6r6^{h7Ma~FnStoBb z>&1Q!dd^+CAp~bO9VPBE_2`v4gAB0#guHGh`Sqg|%o?|n(LLiqKS2r#jOWwwMMKBv zTNx6vHy)OJP=MX{vti0BbJW^ujGlj77(X%|jE;>)tCZysdaju6n3;`p4mQ%%9Cd#B zn%_iii20BHWn-N%<9>wMQYvVoOZ?f~X1WP1s>=rXhkf(}b0j35D8<{}PFTs9V_e%3 z?3w|4)c*s#h{x)RD#`g zyc0^{%CfICkhz+5mmVKIow@&Fw#+OS=yz5wYO0 zkhe61uHA!bbmE2)H+P!PZwb9b7T4q8KH6^vbMA`K1#idG zrsUnkSw#%u(%oq49F`eND26Q;Oi&GrU={0JIPHl;Wv93FSL_myc49M{kx8UiSdVns zi}B~VNL+uekJd0q@$nav7^|ulo=**;Pg^VLH78TlHk}5Viq%kbybvI$7}xuK;9JXb z>4&$w=*xgo_HNk)n+($64h_WV+wEc3rL%O5Q8~DS2cPX{0JfxoFOfV!rzw`>M2JMC zmI@G`Sq(Si>p-Y}69j6P;i;D^uo=ooS4tV0wd(UVZ*LQu$Gvoxu|3(m;w*RZ(P%W% zok)a!=aHOsS(vexaZK8pAuJ*dQxw{%D08!^Zl5hMp8S>$Y^&hD`1cV1YrQ0~k8#9& zL&yoW0piqYOgE)=(+4@q;Cw3)u7nL9b17%OlJhso9c9Mc-hPFY4c5Z8tTi~fb{-wO zq#71%5e47twJ>I?D)TZkHq+a+EHh$=65{#fwPrGNbr->rfcNxcR5p8-`s3ZzRXEYl zfc({}fRUrIFm**3N-1a49<@SPWHAe-Z_?w+V#i}tfeR#!$$x1> za8-91q<^m?&*th9SD#NLAmlSme;A1G2N>&eK!bi#XPMfeRO&ruA@z71fK_s-=yIfn zN{q|IM~!-LazQPAsu_zC8~kx}=NsniZN-8sNCh&>@!dr$m?V{jy)iY=zqJ~!x$h-G zY}dCx-4ZtjDPYBW=J~p{ko=aZh2gXJ>3Ky-tPw9Ht1eezUE(9!`@sii=mp`;>nG{z z=y(|WTM{~K50G{2+1-470p#tD71(Mvkn6Q`>3@3a(8``yA7e!!V|Nu!duB?1kDCRj zetsoivv-o7MWU$D>;}e7+4O_mY?Ob`*xy$@%(a&!qVsM$IDBFbQF@g{%bIf`D5Xnq z|Dr1S>*NkONBl_F^)9nir84lJ5Q~RKrhu+>E9uI1q>qyNiP^3@T%_(&TBJlg*gLon%lU7Vi6X(MXaiAlHq>U6f98+$M~^+KlW!N{lRxvoqc$Bx z?O0yIK^LzsR|k=Y1?1z{cvP^8q2Im2NvNwN5!DWZQ9oxwX#F29eCbBmbfpTLZuyX~ z2daG38E33}zlFZI#C(F2>QG+z26v&b6>?vBq3Yc6R6?wbhE~r-DSt(HoX$MV9aYdO z7l)49X40~nZKzgfgPu_-uzcondQwXg{@#C0AJ%r7bB{Ff;C9CB)F?r(ck%+k8}@fz zxB-9t@TYuT2xuCa5vSQ6+8dIzrFX+Tu~|I$U5=#-9opAQdOpaHow5;p^HO zD0_RA#Lm>hx>QZ59jgSE@zL~@{&pPlDPfuFY>=A}kJ+0m;N9~~OxGTR!xCP+MoB(A zx78;H7dWD-+6LHA5JNWqazokgEpTOP9vrbr=BI}T@p)G{xL$FDJKyF?yA~fMHr5Rw zvfY}#@aN!`nFz|yDa6sD_T*Ui8G*{xM`YB`YDBRRe0XpN_o??9>6l!FewzYO@3uQm z+9HKAPf}pS2_TO>1HY-|gV%8r5I42QBe^@#%y|ory->lNw%fpevK6kpH6PB0?526= zjZu7pF_~7?PA5(-yTF6!W8-ENUNHJecE z0gFps>m&1i9N=^ZBWb-|I^D=KzHD0+i1f;%+{1Vm z9Ai{~6F)kT*xq3>qng7NG4sg9;Jw^Zfh}&A-Asg6Lj7!1~*f@O@$?1TVA% zjSw%)y|xM7z1229HNO_Ccd{(Wx=LuC!+08vtW($C&KNZNcz?FT_B@r1Hy-;j2G?Ye zp7ezL9qy;{e>2eak|-4$v4+Dli$L>cserAd(Cu;uk&0z~>Wh}BCZ9oeAD#ib$C}fs zsf@edGaY?`8_^)_sUU4FW4$h+yoy``yi!d@%fA(1ntz=BV|lWKT`6EZrjj0hbdEoy zDFa2)GwJh(;=IE;ZFu0qdSrVP(Dh@N`S+0hbnfL!Zc(ozL`+-{E=O`{ki{H~7^4F1 zhK&0p)Pm8IWH8k>9QWN`kMS*H&=QzS)crJIp?fX)(!(4;7Xo0xm3i>U)E~nY%K#of zCdCVjAaisMXfj@FqbfTaiXk~uRfOBvMC){25sGxCkh`OmNVmo&h}=|-{rk7VR}mv< zdR0N!Utz9rk6a!!UQj%vhc^{IQp@#8bb|{|^D7eg`RhV({h2d29`wUvM-WB;o=4Xw|7Q~JPJo&=|=k6LJ+%L0ZzKiXVAuhRbjwPtJ zFa)Ap>QUi{87>{)Of9r`;MJ*l=#C+HXEQ-b<=f=U^SN+Zd@ZVsQG(y!6tN}M1Gd)H z(H$!%!V0%NBvtw%m1vnuR=JwM10Peky*!K9FOvqny>|(DoC5zWm*a&AlDMzUft%Ru z3jFJ8T&kx5!7f!0ljjdhY6_r3_Y=K0Aj`d45)bQVTjB-MLen-V<9};5f`*PG@o}j` zl~+liSAJKD-J!6 z<1pG#1Z8%m;np5;+;3@#RsUs!SlJ{zBYvG&3&x`7Pfys_^juI`V}=5iOLW7nHRu`p zfsFS`1>-%RiRRnb`drZ@y%z6xn2|S1UF5-lBJ}1v z!}u6~G!J=A&i2c&Pi#KzCSH0=NOLn|qs$P-fkloOwR4!PkZ&YWqo<>@wJFTn zo58P9sE3=`894LuFb%tA09*0^N54GB|CueIn%}&jTu_3J;@&tU*p3(WyAjJ?2lF>q zl#qw*DEnYKY}fI|DSy_Y#oTnNTT+2juV0~R-mc_QvN2q|6=>caQU=ov3h4X7m&Du1 zi|wCQ(X8|&NLUvL7mlr?1D91$-{lrf-5HEOOE@rR&$h~#Tl~tkX82024a#2MqK9WM zqIY5g@vBTK$Z}K37`Fz@Gbx84-W`JS&yW=*r7*SMlq?xmN3|0Yn5(A)C3f@i(kw3) z-!emkAm)L|3Z-BBlHt*19n`s3NZJ;y1>wkOVzE|{?)&M4dwP`dwclkT8kz<+S<^9o z%p@$VU5X!k_i>tI#$fxyr$p}12dd-Fe1Xhg_;Q9eW8$*hUegTneN_w&UcNzzN-9=1 z2Vz3iD^BUfcyP*Q7FfM{{GpMG8Lrj5U#JG0guQg`hy>IwPh*{qe3*Gb9we_UB1u{^ z&};im-nbwO!uGu;;s=*u$E{@OSTzeGcbnl*Mh9oG_%ZpQAqi>Z20mSF2%PU@+cPUrKX-vHk9XP;3ghK>pduQ7MP{d zVQp^>jk5}+QPd7k8Lfx*>KJ%8q=LKhbD?vd3uBoh8Ld4DOI5vb^3N;Y1Jp3((!F@ zoaJ0vzpx&q%v&1Q|Bw09v`mTFUPIe`8yz2-0`)VTp_M9Cws5F2rbkVoRW8vW8A`rBh zfc1gtV6`P5_2x;Ti0T;ZO{;;^^4SFTl%lwLD;i%fz__rjIIYD5=PaL_1_);8u z%&mqolN=QOu4&3T&gop#PaIsGwk-wi#-wLd8fqgbSuuq2dYj&FF z9SDa~v3|mCmjL(0pmzOqfHYCCTRstP@6G{@^Y@6*hE@1n=N%FLvI}lR=YiY7@Vmb<_EU;D6{<@Bz5KNxX=#fhaT0Xq>^LZiZsB(uo}t&>bQmil zo*w&nfre%JlMinBuY(5OVDhHf64zcmVLQd#e0Q4$s(--;KOrQ`4B2|yjA z(e&$N`2BAgI9Kg8)08P8D?;obAIq^rKL>`EHNh>G@mYCS1oi9fV2|`Dw8-`)UUzcg zvTZgoh<`;6>UPmnqAK`CEfek)mZHFKJ=s|Pj#qwpg6_EAfOSHPam;oTl;YKiw%BMO zScW+QA#`r9!bh*x!>Y0hJiS+wHitM-aT8OtBq7kZQ3^)C)CCt;U+61Q$D4VRU~_v4 zjI}7D{Y#(lj#F=QV<*;QWwah`Nakow(q{bA69p!p@^JoSF?L5bg|C-9iNtCVIF*sg z-Ak`P38O8TCVG!<-t2`p{1%~a$_48EvS#hiC}u`y z;7E5my<<5R7Vkbl4X(SSt|^luB{i(#oN%#Rl4^4$;HYw_=w_135Te10=h` zsrveC{3~4#yFFM=XKfSe%fxVDPK9)Xauqy;nUH136QO^n=(XZtY*9W-s$YpQ*QFj; z9F`ATTv$ijBNM#>Ecl0Ja`^c}6+heLDHmzCfUFr{9t74)nf`1am#y@J{(hr_V^d~9 zi`f%W+?mVxusMwV8H*01mf-w93h;e$BCPAb$saw;&Mbkxo6;?8m!}jsg;{KF8^|rf<8}&Ra?MyWiTB7 zE@3t=pbk2uLIusa3dDa*5@}yJ$d@Waql97~nHnKN=~3o*eLf3ZPlUniIUU4!Z!TBc zE6Y4+HSpHEnC+Cb(e|SWsWO=aN3YI@-p&$qoA8jxRpemcsWtGGIgHe%E+wPZx{_0} zMRf4JETr(>Fq-vH?1f6n-WyGz+*S$hb!(CLDTN2@d>$LzWga{@NM>aT;TpH^birje z68%vMZyniB^amv1*`)bkB^`)w`lsXNx*+=9O#%W6M`8KSiKrO1gZeu^C!a!=aK?QV z^vT&a;(Rj-BiJrsNXrBsH5J3s$meYLHy!G?_&}El^TRtH;_ldwqt5=dIB{|Zf7g!f z4hIwX^D;FsTz-}ApPGRi+4p4Gw|vg<`UK1IIdmV;cJDRvjm#~q`u z^A$RCL41BIT%WNN6YVGAo4S2`m{J!0b?f7szQw_4_VXp9Hbd!zNVZ2CQRKnm-6?*Y^a=@7&X6~2GKPeVX9I&`g|%PK__%jW%4Gf%UZ^FUhCq*S;jd1 zaSQ65`%ZIKrPE1lA5nY%4EdSG=C-0Isj)C~$a>X7wt*_VNeafs=MnJjrz9$H zGv*BV`dpmu83`u!`9}EoP9eS%{?50)UkCTr=7ZI_1TdP>!kNyD0KS^}0xcMS%GiZe z491|syJz(C>jKcc^q%IOXho5A^>FRqMRM?V1u@Z*!3N(FI8~7XCs(GU19Nu1KTrWj zw6-I?dx(60AccCO!C*9I1H4PpXF0bX%3a8SO)6#7e{dOmGMET?qUm_q#1+$X2l@B) z$ryT%@g4pcL9BW){499ES2HG8WPT_HE;&!u=FcT%Ve&AiI}dY%1GtX3aTtGo7G{cQ zKz=m42Wcs+`FHf=S>_^?-^Q1ZvB7fv4pP0+7j9?8V7T-el>NGq z{$69r&8y4-n4JuV_RmJUb~l*nJ|Dap598}K8|b@N!+u^a^_`Lrqcmz^_pL^ zNi+iz64yiUxGLCP;EZR)Zje1IaD$PlGYzwwK|3%`DjV zaxr+YPHS~e2n3o}%}PXw z^6yh5cPmBwMjb_3YIx1S#`UBby0rs8y?S7dGLZ*p$wBkG$i zk9aj1T$(qtoyk=EzVQKZ))<3kZB@AD<2*hrnHPnjE2JRt_&xe4`5)CxPJ+OA6a4CA0b}2)bC-`~&~0a2F)gSO zhBk?khu&j}!}|nG$_RwA!Gq=}cZk8O{XWppbD9J;&jE#abu6y_L}pdVL5_tsd~fKb z<$H`E_`rvalC|L}gtSuRzZ%|1K^NK=bBtm>8~%iJ5_pK%_X zEm@6%TcJ2=C=IXfh@#iq=EA~z!gy_EC-s}JgSl&-k~MrWQBKRlmPTz1OWH^SH7mjP zMJY}!FT(HyVfamdkjsLFbW3e4ifuB)^{sv|hdC_Ee{e)Fd#1cypu$1zqDT_P zy63~|3E5Eksf$XV3x>vB=gH10VVWn;<^?L*=v)|rPUca9-m1+YopFR*{j`C87hMd8 z?n%Ib*Q@b-%rx>RHkmt7B?WW(u5d5LAED2yCgB6SX!z-PlYBeu3JyzH7C>GYmTXGJ zD!b{V<(ef#tlUq-&%{AmzJK^1JCup)9_=(8gx8cR+XJgnd9jH2)f*D_W$&~TQ+}h9*Zu9r`D93i_ zCGLl4faG}Gn#%G;M`n_$<=VJZUlSDHwA6AeS6nGXu=lvis&+8{k5-s4Y zkSiLrSK!yZl`wcl4fMcr+%8TshD7jw+w%DbH_QrtNBy*%^kHE*C&hV>z1S=(0OH^XB~6jEWb=L21h_ zdg#0e2Az|nC#Sun7k<`2kVOTSh(4v_RugE(r^7Vpgd++ESdzCrPt0e$b^ybVy<}(H zayYle59La~*6ubHL~eUU_9muLdD9kJ$1)l7&4Zv=c_m7}v;^0)%iv~*3}aM1 zBg&uWfo6L(jAmSxCeDOg*y(|P7yI*}eP(!oG4KuD&(If>TDb(qrq*qIPh_i8v8Krp zkDPKNI|Bnz{e=*W91;-M^UV8xgE=om77_1hW_W319n21Khb7f6;Kz>#Y5#x3EGrUY zAJ;*9cNV4k0Jb;F;fCf$;`k*1#cVnLSlxVd`H+T@NkNc$<}=+zBiNwj)TtEK78IoM7~v3!Ep{K!TkaBWwH*vi6QAM#?!tTh}ta<#iUc=IY|A z6HPGbi3|L9J`FuoPSU}VJb1NdfMl?JMg4(Xn*L1^eOgz*k=3WD#`IEHMizozOe-iu z1f=3gYU9EHg8_+*Io|*UPgMj8)#K6eFC{iB7|Z!j2$#XUlKQ6o{8pVNbicV0q`eZ% zZR$sUM>XGU*smT9$IyyF@8ugg=RVvo*!rKgc|dzpR;*bM7CW`fbhQd~2t7Q)CxH*e4`(3YryRaRrvdUEGeI1!9ZXbII9_XZTyw4YBpQB3{Uh#92Kt zWSY!Ic;w8wCHaLg}aF^_pnT0BEr=W22HGx@b5-L}4uyE^5>NTzm zyxW7}X>kH1?=QiFK00t9+8PUrCt`lMJp^VIW5lNW^mw5xEbCCi-<`}Azr`DlE%U{c zf6?U3)<%qKWInQ0)pW|JI?`G4kxKQ)Ld~U_BsVY!W25U(>z6F1ypp7+j|r&3_+>2D ztA(n!90a1uGxbCy0R|_tgP1YJ+p4mFJBkJf!btp+gTV68fu{1un5Y2 zn*whR&4>Q^y`-<=7|ERMVwS0wiOGMKlXCeca@O4xMv0n}#t6oE(pIGLLw2z7Z6VGr zSWf+~x#Eh{3FdM?i_v0%3OX!htogALaPsOl0RPbt>9rJsP8;I(2`|X|4^XAcGV?RVP<+4*rde0Q)ZrXFqq+h&G;@%0#}!uVM8D`9l&X6`fVu;k9u#Ouz5_{hAMxI53o zsZX0A?VK1S&(c9*|7dWq*iTRQEkTjDp|~h*BA%7LPey_hNngMbYB#loI`!V9dqY=a zaZd|GM&)ATpJ_B%-H*KYX*Cxu)1p(fgE4^33xa&K@pM`lJUdiDRjiUQ=#L*hsSLns zi8N{#d5xxS+luY3kLaSi{_u@tVMx(LI&{>()<>=(p0^q4+)_gAuO2o3q_hM)9A1)F8!yqL zjh)~ZWdkpB-Vn(VN!T)G9h6GUqK9Hi$ptL|tto1vFu|NtnqY&Hk!3L9;%1aPGaub| zI@8Pd?ZNRj^Uu0R(RJn~FxXa!1&mXnNHsuC;TTcqT*nWrS_S^3n`XUKBwL%k;U;5B z`c?aLwowUSyI~cHveCc?N{lK0&W8?P%Ea*IelGms6!WtRt6;y?dOmu5D2AS7Oiwnq z&W(ttb&?Ly(U1tnWF2@M%7UUp;%L664f6dQz)v(73t%9f>&>hVQjET#nk6Mmatx!+5+6}{%tbZh7~xiH*oI~mufjYh}P z1vvS6E$nm4#SU0;Yi-kKoj+a;?V2s5Wjt| zGH#cDLgs0-pt?{L>&w@|)SzL`;^Ar>&$>)W*Hj^~_dc-e1GM%x~v$zqBiJv1F1emR4|j6f{&+e3tkJ>bsKlXPT+IiX&EBFb3>xax&ICI-sm zSnnD5_y)V5sz;!xb}ainF2k28NidxFgiib8OzRhKV*KtYWKO*pPWxSn+iqyUK>l4K zJbeXLg;qn$-M>7s_5eZ1Gj>2|#*TCqG**v6iH9W+cPtHrHIk^;3Fgf@v=pX2yh~1` zJ3-mlZOof92I||}dG1^TeDjT=*Q_Rhh-?!|EM7oG2F8;9yi{mdWCZC$_3-MB0U34b z8toM>;pD&M2zD!;CHCLLh=Y|9E?X)Gr)9oyDVP25m;6*NLU;qlxwQ&js~sW6>|L-m z&l@_1WijY-0_`|cL^aPwgZ=cy*yPlVSGJX6$u0#5E_g{j?l=$^ITcuIqQJcxXo7wB zh2h*t6;3{%gi3YgxcY=A9$ZpI)&KmYnG>q?j>k>g58W zWYBliOgKF$9yrw?^5A?TGz#^TTbgic|8xr{O5vISOQI6e~5etZvUVV|vcD%)q9u0^ev5lG2%dQf|i?)}$ECDvx( z`-d}drFJSk*P#dZmlu*U^KBqrE004$;WX#{T=?(rJ$k;o2G96ZU|fMa-rAEz_KB3U zOm`vm_|AF}J~!#2+OIS}q7n!0*kr+ z9qlnsaVS8`RT0opGeCqi@=#NqbuSJkL7t!(|KNyyOADF}5CGd)VU2 zmUH~b@fO%~!UEm}R>8T%T<*&HR2&l5gUPI?trNb8NbhTgo_WlLCc|-ttdDSi?n5g6 z$AjJ$Y#>KFQlZ+a8k{{-p}pe+_xy)G<%zy9R*A7= z)BkWrhl()dE`hu_Hj|aAAoYE<;IQf<=`QpGT{0a^GF$lR7N>aM$~1gml7L4&CcvP| z4&r{}4EMl0oplQuu;orLWZs(u`Ru(aZV*I#MlOF1{A??gb#PYN*{$BKS`5=F8bsjohwSfV2x+SMSKT&6=La>C(+N+gb5XHEAt8DfzBM>2VH6P^)1Pv5Falh5VNKvbo1&G&^^ z@!1g<)M{bBry31ik_2N9O-R(|<%hzE=g?}n7FC6f18l$CzY{Vf=dmvDT-Y1Ghs?dQ z5t|>dywrq7>c1)n-0I~}R(w1C+MWP&JB%^HuZnpLGU2(*C~|PzCo;_2 ztcRc^*mokG`%Mz*sE#I#8>a~WbVc#elQLLwuns247Q*wliEw0K8+6Y}F^_Fg0_l;n zwD|50f$HA?HaDM7yS#UEzdp~P4b^4X^UIaJU)AWpYGL%36vHnsC9%H57E%%lh;)%M z4PXA2K8Tskna8`LtK$yPo*sthRmMT_%3KV3#8 ztd=|~)SAPk6Ta9mF9+4;rC_3SHhwPrL+WkAAuI7M`J^|@Ej^YAi`JP@3E4UDt7kH9 zNQomhGhgz@X$FiwE`e%47eV-&BmDTWYtgSy7XKbE!zmG~p|j>SabLvFA-S{gf25sf zIM;9Z_eoY686jEONtBfFz0L^9Xdw}`gVkk56tRT`SAAEiNCJ4HqJ z^}iq9&+hwn93FV)_Hj$R` zUnTpe8N=ch2b_C9npbfk1?t~_Arh?f_{EYrE2XxgsCh0{%RAClThh?+{3x;X_6O&U ztD(=i22$Gp(MNhQ+=As9-##Mz*J{vO{2N!|*GY~?u0ZR`bjaK;MXOuQ5mSeX=z4>|LU-^`*n~=g!F1NoR z)f40CzKLVBUE7zMdiY?WZ#gP1QKom#y29~#A>h}qfe)(s^n|<%Ddh7Yn(y@>&NdfD zhQl%3pr1xBk)UF)chdpJg_$eSfc>MMxS??^8rM$&*8{upaH|H%kx-}ad3ofX?Ig}I zIhxnGI0TGqgUGO#9GukLg!}_V^xl{{p19zL^0xsU#`e>5=N^%yw`?yi7L9l8HsY)J zJkp_^$n%UVMo0GyV&~II4mlLTGnE>ueb5VhS4lB;X*O6VB;j(+Dys7)8%6HR;wSo? z^eV`Lysj%aXJo*?uzU3AZ#z8oBa%o>j>m?7)3BzyiWZwzVg2z2S|wpb8YFL0No7QNk^MgKJ(87B=q=6q}Snj(o1m%ubVNt#(q01ua!t15vo7X4$ zKK(FRxp_OBzA~T8WHV=t$61inZ^%m@*aFRp2f4l*CHQhj7;PwG{qkX3I2Oq=5cdy~ zUgPQby~UDO{{ALUsHcXAAG3qb_ny>ROAv}3)=m>Mi1r&!h}1;bg&oD(IOGb z4{WDT$paGH^M}mQFNI#`cygWjQ?w5P{5T^`2kdr&?cZ8x(Tt?`QWv0e*FG9mp9f*- zc{p!lJ6SH&N7Onsz(*?)<=-)8m<)5cG6$`ZSq=)OOd*fIr^5UOZ$d@byTr>c4=65^*HHyw46TqX@n6)P2BN(#>Dv@gT=p% zVMEYXbn~hq3)ELb!@YyNGrA@`!P(D=wbMQt@gbgQwq?M$R5@vV6oN;@%kk z6o1o3R2gSv0pBOGsila`5%Zz_?scNOXagRq=0U~r?v8IN*=S7HlGR1bG5dTydh_&9 zX<0pOzq^kYPztYx zL#Ph&icZiHxsX&GtS73hFa0~L5MQfG!&=i^%%7`5X2t58<+_zZ@RU;2p8ka<{w{#y z`neby?n7F<3ovcu6&1d{lycp(c>PZ-kOi9AS%fGIhtEPqmirVwx1E0YW(aZxf!L(^ zi3Uq0qRF#TynWdd`hK3L9bT?*k*$%u-F}m!r<1WZO@P)~WrKIFG3ot20Y6U3f&3O% z;`hY|#geOd9~2aDLqI9XVHq9kYq`Lm$0JqE)7uZvl7r%-R%q^D3q`C)JRq)(f(!pp zuy7^IKg}k!0U1=LSrvD6F{j)UQ|7m0z00kPE5Emat{!?#d%RLnQL3EHg4e)P)?4$k z8>fN}_Rw!q3uiSoZ-b^m0(!V2dJbfc1EYTo4Ga9J04nGjqpI1xBBF7*0 z@upZ0aQRP{L4}wpJ+(LsInOjmpNG6fGD#ik;^XA^{cZ3p;sSZLiSeApOQ9p^AMp(= zgwkevB5#re+x!zri0%aZ!j9Kx?F%s59uCBizXCF|)p4blCVH!}`}?v85o~3%7_o_P zp7}_RKda{!C+wxuUWdX2KYyY>J&smgos0Wkm_p9YHYz-JnitD_q}TVpC#qi!XvJVW zJe-maZ)E-GmpR+WkH=XU+E@xxc?B3C!(8tBobguuUN~CTPbH2mMT1Tie5zZADR%Ps z?E6~G?R-URX4liMi!#vK^_@md7s6?J0p#wSN@6d^*tA_4xNL16Y&@Gm58H{t-LW+E z$)1QKk5e#N{VvHT(olCw7luy_5fNp^&A6x!5nltL?64%RS)Pnbd=AlwwmevUzy)6M zeW4-?qUpMaqHtK_Gfg_LO*>9GLo{6EDGUWuy`=+WrD-wQw4@xnXO^JA9Z%e2XOD(` zH#yp<2if}-f%nD=B-or;ENBW{tNMlVRn~OGcU1s)*c#0iC5FFrL}mFUBMh$tuk33g;RWZ-9@|CWk$W>4$G;RcUv!Wty(pYmcbr!5 z(#2C@W$?HE2Cpev4v#&zAYfF1CwQJzR;!n0#lGNVwmM+)E(xeIIe%syI}50;SO}Jx zx{#foh1-Gv>!#PEyHqmo%wACtxVH?nzT4o{>W93V?VI7Yz?Gc1pA~Gf!SR_ z+!z%B!AtZ=R(UX#d9n=fl{l)(SAsi}w85OQ&epN)t>zp)s=qJ-uC#f=hQvxxTceMw zHx`m^<+DV3a5L~djv*#q#i;!#9X9W=BY{wWLIh03sA$Z=93Ux|f=SZ|y43DQ1-AiidiNsSr6`FBZYrquF^O!xSxT!% z5+Fv{2Dj!VLcGCYUZwLT&Luk&F6+&P$eo)pMtT^NPJ!}E;D#v3!^ajHuoh-Aj1h}LYF+TjZyPn@RD{wSjJzZ-P<_b4b7 zGlQ1E9Ojix!+|+6pfQc*F|O^Sqh2Lc;CLqNw%(1G&#t2T`I14k#Rr0Z)}ZR6NSw;A z0-ce6Xt~`MSbCC@j{ZQXZkmK1H)awyfiR3cnT67U4{7)IQdnEhaxO9fP-Y(vi#p0t z>b5^P{E7pmA4h4ISti;~Kx$r}1TC^UI3Yn6XO3B*xS=+_U(rO*d#ez>>;+JcUZ&+5 zET?p6CwTiQ^DgucQ=8~0wC%Paa@|8@`dA{qpBsl;)#gLo&ATM)Y9PINPJvz;`9(=k z1+nI{BLDgil8g2i$iI^Aa1(@gUZajeSx_QdM${ZlaQNL^JY=w+>!*i#PZyu3!I3Ips9XoXwazm3eK4pL zsgQpDVQTw03_cH+KxwxFa%-kx37b>bH%Wr)^Ky8k=ET@TRq*Mw4=1kI%p3gfi&t)^ z5N|JAct5s^Mk-2^;RYM5ni>xuEer9Z$4q*mrvN_OzDQ@)>)}47<#dMWEDSt07f%ke z`CNN9S(u>?cWd)-apWoTolL?n@{6&&P8HQ(FrM%;#%X0tZN;ukTz||L*3C_Yhac_} z$>r&AL;M8qE1xLcr&bCpeNrKGA@dPTPJ#K45}{UhE^K}Kk|u=CMJLH#o`1L?THSKP zpWnq{@s~=_Aq7}IaSw{=+d%uxD*VN=WZDX!$&r_J_$c}}NuCpm!F--Xx3mmRSWij% z`BU!nqXJl_e}EH~zCt!uNW%Jnh3K;F3mrHXe&$$qHmthx9~U6r2%n;u=TcoACa*3- z{=`md-I#&>XR0xF?;`G2Weq(3unD~0XApI69ZIF`#%~*?@%CvUus^YwgBU-=GG(X~ z%EoUKXA_Ik6ZAoHJ&0+Pm- z%PU@u!}Al>@V$E+9yoA{Jcwpp^2Pb+F{6Y&Z27_6x@d`I`*z{evgOb;-bZhhnqsxP zEj>0f72h)k`7@JJvV-+~r^)w`?!5Kj@N@xnI3f`6XxaFY7YRajwEo;h`|PBcALViorJv8GBt^8Q&Mi!|z%R z+Iv=-7Jm=JZh_f&J2My@rszY#4H-OSEI>lGl+&v^@z~k-o&0AYfxf~{a8_1?UUusu zo!Z_sVb?=mzC;4NS(k+tMr^imvXdlT+m4Mo#dt`M<@lye$3u z>f@hqX?T4qo%;Vg-r-Zjp6PA5a3s2HFzxcJLl+9PGa1>!N0!_HpE_`V+M)j&MHA%(^ZZ2 z++LQCiv+WMugEKz?;RRVDOm2Ig3*I#N#m3%NSm9=Nv^TLkK@vyocx=YX(I=^xhix; ziv^&74v5xyqU+moQnWXm#CYXEW@{Rn9M}QMAqRNpOqY|xMz=@`%VAZEy5X}9whq}G z0>|o-!HG{7XTD^t&mU{xN>^yj#f-J?@Gkw9xyJ<_Snk+E6nACN7H>gVd&vcFsx2iE-+vK-+e=U~EFN}d zwR5+ex8uz3m&hxT1$g?|a@>DZ$6VpP92}9Yfr9clp7=|azfmrPq=*yLY;r1BoluQx zA3Eu(w~i2$@RjSCVnFrzI4aJs4;g2ZVDYKtDDg)X^bHNsklz-XCK;UZc96o_E=O2; zGzX21^cY*Cg&UGzid#aqV2Xk<%E=m-XSRyKnJtSr>DXiRRag&A^e!Y`_8I3kh0`Q_o4i^*+k=FP_9M_!;L3gIJeX=`73Zkc8kbIq>`W>+eDM?eo`o1>+!$W?1EFA>Fi_qq$|9D*1oX;CD}&aL)LLyb4&vRT#DenVihPm z@}O(pbIyjXKk6g%LH|P&U2!lB4+)mw66qCOlF#WD z!i2R`U}O6znZnlaLb-yx(X;ITeXySd*vUfsjcQ2Hy~wK`@1%||>tSL*DY|GJ<7Ui` zCWj`>CVofbaCosiY6Rwi>k%!^-~vZ(cMOx$eC%^>TnSw*l&IAiQmuCpbY#E+cUmq4 zo9-sqIFbiBQ6{)mQV?R)ZSlHe8M>CH!O?Pga2U+R*cDYoGP4Nsr7gLolG()goj7=E z$w9fKKCxlh$I^B+oEs$xDb`7-mo^PHYUJXwiUOLyY%VCDGC=uqq#J(>(Dzx2SW>Zw z?vgXcw51;SN6?u%UOz`eFK!@CvyI7a-8|l(3qs^!aX;^ZgF5h?QXs#tkCH8a&Xb=p zndIq&S~4cT6QnPP(!1AscH3#?pk?}utWowP29J$#<*iLPMR7eD-yRMBO|?cJ zP7QvI3DXIDnot`YM8Yhek&WNxLclg@kgq00Xx?5(AS=;j_IX}}Q3=*u48@0=D^R{z zjXPb-I3DSpT!+0ZHve9X!7-)0zWqvAmvYtA=}H3R-dheM9fwJNW-m=KD8{?HRnX4S zi0)&(=#;@xdh&rZ#=k3qXxppY%Isu#JIxOz=V?Lwm111|t(RKd6vpOW_8s1j#ECuY z@neez^*eNfK4x>9#TBc`u@@3>X8JnfBD5RqygzW~yyKzKy_h?v` zBt$W3#1kIB^MXlt1n}v|FfDqu1r#35#uj6H=4EW=3e>m5VC1cmKcJA{{PiWvP}#q51(riU6?BjGmAXUSVIAZd&w!{s+n7f< z*<8==A6b_Ake=Jl_%bOOWWot2`bMk(!lhm4pU;t?$ohI6Tffsa!D;4F2Tj=7S_#Ns zodKn)5~yF8$%~L?@71ArxOvkbY*@=iobC|I8(H{ySskqKJ-{txOx~S^ zHn8bbJeRUA4K1W=aj6+Q8wnaG^5#9fyCcFxBs2*h1l4*Dlh=#tU4eoD2MuD1!MTGjUyO9c;^7LHnmj;_z$1 zGZ35z4_7V3#I$(cE;WLElUH%S!Z$;Kj5$%Y>;W<9t>%IQSE0^1IsBzML~NH25v9&qsB+8%bADREt6C?j zwPqQZ8Mvp*n0(*VLb5iL;Js`Im?gIf-tA{@L@i%fFKGrn4{Whe zVGDF|BXmdZNn*9D1h-$5p^Kg5(B_pI*6M~}cJClJ&)g9M(;V=Vc^N)rGsl(dw!*=z z74)sEDZDGJATuT}#wtmBx;tnJc>OwH7O=67Tl0Dmy&JU!A`foGXCDW6?=4s0fXE`q ze>=*BK46&Hi*D$>;Ujl%!yz8;&0_HXFM$?5(#Fd1T3oe!E0-0X3JI*Uc`G1|Rx_qk zt!4pK*sj6NJ9021{mF+j*-EuVOO&>C<+@xvVb;S$~d_zW%hF?5^CYC zf)y6prQ%l8N*aA+0w|0mqETo!cX)Lo5!um33$Lc))5H(-Sab(%RTw3*GEF$!HG>Sw z%G0m)o}gc-i$)bZP;k@cW^KJj5`!!8^Fvd*MamIx4;z89T|T}W-U-+0+03zO85|-X z==AIiJbCFdZ>3*0u64ObmeWcY_{2}$%*;{GDI6-Z6v*kL`Meh{8t|>P0#jb9;6CUj z1AWcBv{w`GmELwd`hGpo-Np1;tqSE1sKLu!73PndLUHYI5r$oE<;{KJL3DJy;oT!W zuvo%)_2YVQ&DsEN{73~a)(fZUt^TT9u zy;BF>H~$*ZiWH({1KUV}=P*5Hr)j?NW*)q>IEeS}DbPoPR#3WaDws>&AvznppkhWj zCUoZD0p@A#X-WfC$BpYNL1uIn{=2-7e)kBX-KY4W zq72E7wME!`Wo5(l%kjJ6ZbQ>U3P< z=Z*7yw8;62`Sew+FgUaI*Zmvb;2kc78^-L(1^FTR_w_#VQLzfEy7FL3!VYK%(5L&4 zh~Oe;=FbhB2lT(auyUg)c^9|_cWnIU`>)yL_fMVKy8i##t6ZsXD}BJxTe8po#M z*YOr!#c3Xo8?%8<4+rGE3W1yP#q=uc2}XLg6YwZ5mxmJ8BoYrPS};+K%Cwv~`!pTzv@)iA(w!o>X< zAb#d%hdMhyQW7vEq0Qxl`;~#2HoNHK34hLPxo<o7{_8hCa;a*({+;y7#z%`)*;gJJTva`d|*Yl&r+hJN7`+nI0lRx0j8~< zP3HIA;SR0Jh4o_^&?9L8qc`>F!S9T>zNQwpT@vOljV5*U9A!@5$8ps2@>4Rhm$5w$ zkMNp)jL>j%d8|FP5;qHdr%J!o@uqz^+G-?`7k`{F{_>5ER9$}%yzGYG57{y(%K{Mseh8l@WYK9d_rt8PavtD9z#Z2zXP!jZ-ch%@e{D9l*Jc;^-KNQlmC+|g+Y6AveJ zJdF(_<7sYGZZsKArzWG}dT|(7Qwg_>1Te6l~rZLpNTwyKabBKlDODH$vt%R{{MT z2q#y@Vcx3}x^1m9xhnpUezS9=S#%p{l|*A7#-ib&G<56H;{NqBmyg|kGBMHsUp>^p zZy7Q;CNN6gsWQ$ZUk>`E7{Ch+H<&8vi-&v{;^dGx=49o=(YFi0rX-73d?g=EjAsG; zD$Be`i*Suw0+G=lp;Ly8@Y~%x^zpVt%)e5CigPD&M`tEENVZ58Pq*oL>4&#*unWT#9o( zw3wfEXeQ?Z1mTOPGZ|SHO71HCGIux_jyoSoV-?3ywWHnSfj}%TuyP5$S*e8j*&Ox$ zl?CB{Jt1{s5@P`Y{*f1f+qHjb%<-#KXJa1v-|pms?e7tpH(8K2SVJqLr=Wdb4S5J` zj@1!NWWTcWlf6-J=72rdw{ihJ=XHuUu+QAN@jd++k%M+Y>>a*#0UmIuz=re2NYy`+ zi$U{nX=D^mD;%ZTU#j7y>tb;Csw5#!r)esmAdYExkyrZ}=Sw;rU;RmipX;k}nN|*d z`&fg*`>v57&0=#UvwC9T7>*-8%dsO}5WX32hBu#5&~tgbdE1X%Zk9_K4c^KRLgs!b z;j0KsFTSA~=h!?VA&b7M+YVl9m!RFLay;VM4EwBJQRA%(cv@FN042-ekYWIN*U6q$ zsy;Ylvm-<==^=ldXTpPXf9RUSMW}Yio#s>v!Y?;JxLdpeuT@LoqF&Yuu}%Y-mI_Fm zx(R*>){yEgrsnplshD+UocUWT@zQQ*Sktl!ToR_h>tA`4JW4_NlLcUCc#y10*iGK= zIzcnkgHYLHCNa9`NjLOYfz`X8L{a)Yxt9~nyiP_q!`u~Qt^!Z(cR9{mwi>vH4w$EB z4||@+gRbBdI=b~7%j-svFMYefwNVX5N_Ro`N9?#Zz7;bc3xH;6GcW7*5%T0)DO?rK zrOL6)mL6q`|NK31f5SR3-yH-8(*|gMgddsXVM{)&=_1PK*!nA>0@Jg)h*@kUK5p8I zPv0<(;j001U1SOf2@;GPF9GY@9ynC#4Y_%f;Hz2!r4dn(wk;5>i&LQ`G!d`Q{y@7j z16V#X9$an92^nNQNl`26C+PyoLKQIhdCXkzdl62`nS%W+C&^nAOwNX`fS&3kxR*IZ ztCbcpPx3gIU?_rbp7~%@Q!+`4DkIZG-C+{{UYKz&4(l$P&^>3n=%UFwpq(WJIp=d= z8lNv%HLZcJm>7%>^TGbzDJcHQ0PTnpJ{s8o_jMGpRXPpq71Lp((-Nq#w}lgWAs`jG zo8VPfK2?QW{J6QU#yk-`K;y& z>UDtDnA6gMDD#}|PU7ns&%J6nYi?8jwc}Q$HXPVk3PRFyBcLD%J>b% z-jlgihBHuHE(Z>Zt>%?abwdw@8C0vLpI3ePKeC`_F*Q}%P4o+jQS9X)UGsAe2sMw8 zaJf2g`H~2MH;ka^wH#`Wmci7?{_LG~mpKC@@blT(_~qL|{I7~1rm=j9QgI4oi8_;A z%CmWA^(gTu-_j2#Zxdw^edg^+)q5LGwE1)6OGDI#f$mV(Q&>i#;nPr z|HV|mc=3K3wuN#nGYs&U-b!5QC@d27eJGBPkDu>lNx{XliT4<6HOt{u6h-0&m=Q#0i1NX>WWvjhx4N1BsWpcLAB; z)e?vETbIJD?crGN6h$V_#EwGNy>HJ~#NmD=Ox1})pU_vlcj|GNu&ENpH~!(-#_wUC zHGZrK%7UgI18$L!1gO`$0WMR*Ejx3-K0_C6rlw=m99yV55rOx1d17OCEINGbCzDDN zIRVCgD6A62vFu!4^EDkf8}CHxMyojq6KAv?G=pQ|!$c|Z78fHh31i>7!qJf|y!}iV zEFOA6u;PEzUuc|sy?c__KX4*9*xB>-)G+l#+z6Sz-xl+2iZnWPu8 z4&C8bbSl3+v|Fc>8d%2E0~8m*H7mx%(XU`V6KC|$TTPR* z57KLYD$wzC7Vb+nqlFm9-1&p0eXbnz= z?_F^?w5*ISAB@AnnuTEUSOy1{YGCij9Bl5mMP${>A?V6+a+N&`OxMOiRelXOZ28Vi z>1aCW%3PLOGOa?nL)Md%Np)0&ALt!vg zPXUz2^q_su6mnna3f(R0i$ezts6Z>b&sEK^Y-bbI7})~nQupHC)0SXz+!2%5*Bj-{1aT&(O|scKb?kV zJ)|*j(&_mUWfZYy>w(uLu*GSF_!XzayQ9tA9q$kTi$ZhV$DhsrvGe;a6JCb;=2CzGRp8ou#2B$QA$Q9Cn5 z)~5i_-IB#kVzUm3EeD85qbt$2Rt8>HKE9clLjGA4z<&0gbRN{il=vxlujB#oeyRZG z{5!C9O$C_L$)eBGbdouFHSA!lrtVWgU^ni_Es`IkC#B7C-Ku!lH0I9E9vKhPr<813 zaDYUdWpj^HIc#^Fhjk-ANy;e!Q0Sb?-5$`uts`l8bHE+0P276s(P_5Fyz+`l= z2mQv^fXDtA!%aX8NQZNr^kdFPB%;N2$Xc@nxy`&T4GXy-U-_-ThO z_@m=9l?K5_+lXjk3uIp&qiJue@W2vVSeiM^dweAo-4ds9IcbGB|KSoy`IZMp{fCK) zTRkLwWsc=A1*Dx?3OfgM|w5vxi-rCIpHrgzEnoc zuNB}TwjXZVF^4Sht%1nHnP70n9R9?8r1^(*&?R<1PitB^2o)@#@r)rmf2T0r7sn66 zm*(TTB{ICwA(j=EO2LygYPjV57W(#W8J-Et#`v~g^FI+4+Li|-NM&W#cfvNjhNs5zo$vJAdEmV!Egu`t%?Nc2mC@nyaP=4zHwf#2ys-V5Qc zf_$Hi8M=Eo1-mUM%sj{EuT~fsC@~6Bo`7qEjE5s)i zg``k3fr~lpg*J}qAm3z9wOEHJ!YlwsUp4S9Z$3g>Kc|tMKdSMO$3;?h)&M0+OdxSc z45gGw$F81e+?wu;_9v&q?vQ-=H#-e_qEeAxy_a_X*$8nH_#nM=0f=8=uJgDydOqg~ zZMC?|JN2#z<(PNZ`9%dR*w(|nJoAO=0naZPq??DPz<(8dw7O^y z1Pl0Mo=5|Fu8KjTu7wNs`9mEH(nmfzcw#NnIPd?L)_N(#fi`!Jv zP?{?2&qF1MVQ!FH8}Sp)qY*+V`!`*c0lR439q z>x=NDz5q1lnV|2Tzr3?=#9+@yQ&bqrr_QTVu`4)(&Ztd-9}B*a+FR@3>dg6ga(6Mx z`zT`cRB`4EU@WvMJv`dK40;dm!Pb&fWbQRn@}6->_RFn=30bk|s*nc@<;zLv(rO5t zAOb@#nTLAyK{BA`!VNnrg3$y)why1eO&&@^(=XfTJtcj7^QetnI~;?(*KV6<^1tB~ zx@z&96l&nj*IazP(;t3ZZp5-YF?>0PdA$Z^pa)d8p;$T_5 zl9c%BfrQ8b+Ur)2MpS{j%4TIrtZyxLl)2K@Ok^FiV`S%fDd?Zs2u=HwpuXITT$Yc4 zh0VZA_$G~x<+bMfADNQ!(ErT$&RfZSGquK7Z^lTG`%f~nL6hy@eo&o*jYRgi6YOr1 z!>pZ1XZ$vW8dY=LH8+4DgI1>fK9;pCBQ z5*GIe(8J0*!G(D_K8^UHZ#NA9fk&M4^6^9eDQp%b z^dk_Xr*ZVmlzG_b@t3%x2h<5UQH@wLGPgCBTal@NU+&ew*n34d!FP%-?_ZDG7ph{O z)?s>`c~o`?O+oKlio{~NB~+%dj87t44=kI3hqkUUKTzi|NLO5 z$~C$(xg336|Ir7(CqnjkA*>Uh3HNJ%(W6dfpk8YVg-eQgXWo0l_AgGDr71{mEg0q1 ztQ$R(YrGzqbOOF?$s-@9Umyww8_@WEDDeAapDDboh*Nu-iQw!%^mMg2IVkvmXPm1` z$k`R}RDTr+Y)=5!`W-l2#N2#0>*%L}k2G#qEjoW;4$U*_+?MQAFkDgz7S>OY!D-*j6Sv>t-V_Xz z!NdS!-l2<~VL8zL;W#&Qb1cbi=0myc9J=!byIzv5QM!3M8qS>tTiH9dc~BVY?lq$H ziZCq5aYxpp(_ z?%Tw0S`&I5_JJ^-0)(;o#Q_86u6I?!5%DoH{a77ZoS%lf1blJc`aDqfHHKgI^?1KD z2d5lX#x$7Rw$PncHswLV3L8vb)J~Oi*Mr~oW*V+yjxM?D zup-|U4+fWDT9p_*KfaQiG|LI5glJ(o>-FXQ^1+4KtY_@Ar+u?}6)Hwe2ai*R5T5P| zMKci=k{u**HUyAW09Eswb0a{E$0iF(%b&|FqEB2mDnmRyg)m4e# zdL>L$*W8g? zNEfvp7vjFrHe#)1|qapxn1(f#Q&-UZ(YtfQZVD*V@Y>n41neVzt5oSMlP zQP*jt{{*79A`WHRR*-p{uFw)!5m+3p?@&IW&ksa^PDKJ8tNKHB^#l;N zq{m#lvJpC8UW-XN#VBbP3zM3PKw(lnXx&lxG+!!r^mkDwB z=pj23zu|yqb42mOBQw;=ze}E+$i<;Y_PE`D74g?p!z_zLh%OxBw)c+`jisV+d#e*F z{$Y+{ht=Q_|ACbLW&Y>2H6UZ?hS8r=;qz!IMEITSSiD67B<6>q&hHIib>lpbuPvLr zvN5HXE&RY|Bp$a}tYhyw5vZ%Qf!fZw=-K;*cg6W4dA?m8x=s*mPBDkI0W#ZaOB8b>=@$z3nDzAj%wuI=<_*XKm>Z+|}aeF%d)n=9!?w;GHI z*$ziS5}~)Kn!3(jX8wJJCz|34Fu#|Imj0oz$G@MkOx3D%0`d*%z|p z&LQ$ju8=IKZh+VyO{65At^H(`QD+}x!9|q9l?w$JF_MJ;W+dP)-#y?Y?0}ce%-}uq z6>nGJ2E#JeBM9UA z#CPCN#xS_@LmVzdSK*EGFFS^1n3HE~HVkzL;zf}WS~RGS@696crKK7gTeyIMY5?53 z!2B9~+Qd-CmhnU8(qxvE^}o6t$~Sn@yvRhXb$-!tVv!ntVh?NA$Px$oDSqaK}5=#XVwy`wHs7>TW4_`FR){4p24|* zEbh5i4-+b;Q~vb#q$J88Qd?%i@jgC$efzh$zvyx>ahQxZc6{LNpT3LD+>6P^Vt?2+ zhCG2nFLGyQ2DnLX;KdgGqZXl&aNKJv2Io(QpKF}q?GCpNl@0Y6Yc>@udCS3Qg&0*+ zUk{sqOa!^{0wQct3Q{vz$GXLtC{~AJj7c6$nD>wjIb_26=rlAh$c5ExpITwW82>w2 z7a%kjfByBS%MB92;aL^D@tsC9meldY&Hr(i+vd@)&Qpob_UW+cYZaAy%;pKxY&h$! z3*mTvG#<#vhR~%yNu$~~VlyTKyOjOl%H2FjIWLL6E=S4U)+Nw2qX83jZGqpU0(S_y zVTHRTij4W=)y)wo{KFdj*?Md18Ey2pVZFH50{Gt_U9S7^5SjE^7uWw=PJ%{NpxiVK zX14943;zqkb@%L{Hf4bFmNuZn-ihcc6hx2BXTHF|XfUc>3{+JXolnI>(Sp}R3NYB|g-_n3K$wd%`BUIcr-kHmy!Mk^z*Q3(-QGuy z%)H6zM^n(eFbBfk+@nI`l_+o42x)f~LCNP$_%=HhN*88gVX`hd#LmQ=!qr4^ZVM3$ zo`}^cPpGb%5iYtP4hN#OVK$1918|C`ud$bx&{awQvy?;K&MbI#Pyl!9*<@x%_i~;R;}a|-qn}542S=)K0n0yUxeG&>`x2a1^Oi_?8=}g>?LheD z@p<8Q@}%-KncPvy{hVuvya(#=MED3hWvRpZmp%~4SZJreTH&4Q$&7m#$?dawO;rC$5GnmXoct5Uq&Ap= z_ns+2*Ety&RGN?DtXuYd`V!`6&Y@Q4)?oGr#yoJ`4JmB@T%s|Tyjr9IMn*TuD9hsg zYz{{2SCS~6)J-n%q|vfX7;c!@G1GXB?jY|-$B6hE!m@)4TnN?@qoEK z%u28T7xN4%H(rh!>ttb49J_xX#G^-jG!&gpYo z`%tY-5s=>6N5v-Je;yEvX^aQ88sO7W9{v?VfoW5r)guxsjegOI3d*$abUl3e zm4y?XM4(T3FLWAJQjJyGtS91xVv7Rl?``SOC1!Hy4-Bl1}Nh1B0uu^NUw`F3@loK zk>~f*w;GjjW26AzIyK_e@isakJP&PyGH9t4<7ZWycB~k!CJ($<5+g}()Q?ibaZ5we zGCc?WTN;D`_8xG!#0q!b%|olb-lR>%7jkzH690BNR8{Qd$u_>@xYPScqI(6og(Wn+ zazDAWFp*b6@~}QKn7OBmajoBd()}+K?Vd)__hK^O)AEjdwTy>vsf>x=cA2<6mWO|4 zrI@C|@)>MCm|(bx=n5^T78cj&o*mzbX59`rkQfRM6A4@pZX{;w67aMtfD}mJ#kHo8 z;pvZ`{$3@UGCMi~f`edv6FLO3JcK$?{IN)b=f7%%9(&8UO*h848b2j?|6(4kP2|Iu z&(z5l;XkCoW)_@kvg|l&w;U?a5RFTZ(-*M~Y&{%_HVL_y_rx9V89X2r0yd1VQ-@D& z%JGUy241adAg{HU7ir#nUX<84DZZWnH{Sg(j?O!t%kTZ;R(3|ph!UlcQ6zDnD?}+H zsYql*N=SuJl)Y#6-n5sJxX;y;(jF?>OObXOKB<1^`}?=YgOdAw?(4j+*X#Lmz>pom zaK%&+s_Tz;IBguE-|Hw*{ym;MzTh2MQk{-@uS77A`N~>5YjHwF5I+3fN_H$P#)z@= zq4HJ<%;~D-)rHqnb*b@G^vYA3HCzcH)CiT^Q?MvP7{4`4q#3_#@$}q1R8Hd>RlS=G ztDh@i<*&co<9Ert;vaeHRKA5}yoZST4rjR3`l`b!sD|43y(C>R?%bmYWg5^!$+CDY zh>Ms2O;`1?sGViqq$RQM&MtCv{wFGW$C&@;e~c*j=JKn{)6p|)3(K@!rj7;nLVSNZDH54`I4w)Voan&4F`ggνG2mtJYQCi zY8e*9z>DcyoIJNMp6OHl!@N`SG>BF5$DEVXh>KJMxc0JpzC{)G-k${n28QJ6mBVzb z<|;TQ><-^77Q+>z)u=f+6iar8lZisCd&ZGq{Gvr5(Ird9uiQ41oH>-uz$^O&QCi61cQp<{ptA#8%~h$Yr^pU5XX4!o|tfBHwxG?-|eVA zl~bFH*%_Ol<$N`6Pg@M_OiOJDiGiB}Y&XM~KmW8r$rWj*mLS~pU2C46GzWK#cgMi* ztl#`(AbdLZjhpaY1zK+v6EUahSk}LXJG->mTqgD=@y*M{g=*vRS+ysmyxxt{kF&@D zDFTik#KGpuZ?ZF90IJ!Y)Fs1?^lR#X_pFWBt&#}3KFZh>lW8u!#*})WZ{xpiDFrpD z1KdhMHrGXmVBB6?`l+%CX^k2Bs3*XY%Vs#}Ui6i}%aqUo=T=t|EraN6xe zLluL-DrOt;&X0svoyYXR#mjtoh#-#7HABqF!Q;{TbmUzfY%&MZX2%%Fl3fC38!XV{qyoIGOThna+ow<8pD2k;z1 zXrcPqVD850QrNmTn}`q%w6x17|AnTbl<$2Kad0PYVSCU!S1-|l1Ht_4?~&B#L^1lU z%7*jz&0$_u5L9d{gyR>gz-%Y$V6K*ehkv@rwacYgaO(_7{bKjz%b1;yz0XP31vebjJV>WC_0p{x7Vv3# z5pG_bL1YG(fS;x!*zKK+ue}q9*Pa&yW3m~S^#|$F;5kyW2*q!Xf$-0cM4x4>c8U~1 z(1aCWyv>5xO|nLVk#ZR9Pry41PxICP#BplJGDwk5VO~2nr>uWS<{wXi%S9fzvqc4c zmZYH5i)WNSCyyblZ$qF{k$gEd5mirZ#m-He$zWLkDa>R!N`r%>Yn3nS1FXib71mhx zy%-&qEr+$v)nFu*3h@i|QT)9^hh~r|YGk~p=ff_NjHTJIWYP{ zPMv-{pN0al9bCR@7MPoF04v?we4O?&lnE8Y7t(DAzd_ zFV~sEXRQ=CRmJ`vWp8mYmsXk!9I=MA&!WM*O^#YT?4*ayUGe$6OXSwlT-5#eg$%KL z*GkQNaKBTJ`6nz^Am&plDUZyB zx&8WNf74w$3QvuA zGOlu7i`TLu$jQ1?6p@aAf*)V`^ekzT=B(pGo+`dXOH&diD0W;<=N_ z@HJ-&S8yQ}$FBnD*Dy!n%RAw8$0STe8RnOlgQ9>6XzCMz<%Hc1*U1~*1n8YP1N>_Oq5hK}bk;Y}A)RzIke^D*S-(czqCGfKgnj*m5LlG9 zoHS?U;r&P(Zc{@jG?&jK7Gi1mc-3y|bUFal1($QUR1K%wRHB&jF(R>(>9NbpF!y>6 zxK2J!y$@&MFXQ*5gfa3%3^?pjO-JQx3Y04?fnySNpfpzsmN)#N`4(=Fcl9QRa=$qV z%QW&wLKW{QjYSEu*|^X93jOY}oX9<0$H@sKkXW;9aJc-Oi;pp3dlh@SJ3kz2Ri?mQ zwK}NMv4zXM&D>x@S(IA^Rnu^3)PM{CNQgY9`~BPgS6E zSrf-k)yH6?UtD^{1-j`VB~HHV-PYAg8&8ay3uND?B)5aUIQC!rQ1Mn$r>6@~GcDoB zN7l=8Plb8uz0qvPAL=`!5T|-iIwlP;A4>_9>~?;PJS39*Sz8}e3cQ5vt?TR&Rle{%mF(A_HWEG2Wg&p z8P$E@kji$hdUzk_Roc|id^Q`~0s?4N+XD7?c<@h-SL6I=w~5;O%Vd%GKeEV@`33vs z!0n;|=*c}Ia)Ks!Bu5rE>4!ld<1HTeD@r?tSr$>@AZ;voPdi6W(q^{|SYEptlYjS; z%@1TxjXQjhhFPcMlu~<0Q}{?0KC*$z-bCm%O+eXZ4kSv~5rvp`TDmxsv2C})C1*)e z{=*U8oIgmlb^dmg9Nz{(3y0|MrHARBpEWda`g&}g&-N7C(m3%%Ln0II2Mdasm+nP2 z-qlD)mHX$&?G^3xoz@089li$cnYiG|d&VU8@@kyN-UXf^Lge44DB@x_pQP*_F`ws< zhWUCAc?^?+$!za&>svLHkNwG={lwGb?i2WVc9XCzw1iXqnMNDKwCGW>LJ;v>3b$Xq z?l9dQgWuohC8a%%!No%)v zb31-i;Ov*SIDd2kScUZvl?8G5U+*lmoNR!OUoydc3cKg0rjl=c;$+sWRiLm&8lGJ& z#BYp;bvAlD=-Sp}>f&|ieVBcZ)%p11hZJmRV0n9{p&I-=$p6|Pfzpr8^VVXzB+pPB29%Dbfb_06r26N4+T$=6)i219)u&uKZXVLX zec~)<`dR(|@BGP2*N~IbH}g(ntOv~A8cdh$<9lq8ru-Y`T5|r;&6kqk=AjW{oMuge zjs#+&A>%ffPQ$eULGbNGEB||>5S9*=(}}7|9gAj8!-LZs$W_-gTrsc}a!m5!eS;28 zv|;{C`9Rn&mkn8yt8o9+cnqv)!c${v$kP3SxU+r%F?z$;y{5fH($AnhT%O&{vM$p@ zf&SdQMG4qEaRL5b76HF>9q|wg3I8xLg6y@&Nq70 z+!7pG$++Zojxgz<3$D$1O=Geh(a4bH2HldFU*Q5DaWe?kweNO~4F$(CV2{v0?gZNtCYGEeA_=1Kgq^ESVvNU~j zx&{mhOwDfHr;VvGuU$&Y)G!RGPnM53(_ zHXaN>i?4y$(d7!$_M1UdPde>OvxXb8m9*@T58R9N#c2yoX`ZSKXr#8#KIVVAbUOl! zf>z+48XGXU?tzys+K`d>8vJ-5jLZ!h1J8J8=q#?L#FO(g#FyV#Ksq;u z?$|yHTR(8vFI|Zp{)sqk?>cY{{XqBan#2G7x{Bu>@MA&Q}i>Cs&b=ZZZvgDAP~y-q05Wl z*U$%&aWMhKLoDEBY&&s0v53Al2}c~Rf#k`7DEDdso=AAqQ8>SppRa6>XWux$-i2*+ z`SCE){kwsRns(5aYP)gC$~=e|^MUwh=0cg_R`ivLC1s%;4%kJY$J#XB)=&{<+6~*1#m@M=7hQp&t&jE+JhGbNB4zM`DLblv5e1{EP*gj${}S9g8}=3McxX z4Bs65oSdBchqfi}!bcWq<_)6x#IkJ`?r_-#6&VKf?AIhXF?S4IzC;v9{0lnjj5dNm zZV5SNk^z%j-QelQbi5n19B#)BpkCb}I>3CH zvxk-OgOMRdwB4d^celarcfV-qoegx?h(EYK&BdYr?5M5qbXdByhx%7B2BMZP*%2ZR ze)X65y%EN6RMnWu$SxfjzGKl!a@NcsCfbR3;M8I2 z6u1IXK3pLm4>B)J_Z1><n^f8~9!JKFE6OFl(;R7`TJubZ;c4gV`kza4i!m`rPTo=fW7eP?Q!Vydi(C zX`)Fv2>i?~Q3}hb6%Kjf9Y05#lIa{hIa!9HqLqSEKyPr?k`U zB8}BJMjZ}?;oHz7bmU(GfjX`_;Su*P3h*RgtVZ~%QI8`zM z-@M92)7ptBEz?a#M;SjSAcUB<)gp@MEzD-pQ21H!=%&iDhLpC4Msm?@h)* zxv?}MD;}g(O2Fi(2Q1dBh2EqUcqONjWcE4nRx)yUx*`^%U7fMXppHhkMZ@P;)ga&D z2l}ooAGRio-;r#Ox_5QZ)_Dn8W~j5X~E;Qgh9p2{mgtf~Xs?qXcT^u#{b z4Cqro#Z3tMKSc43v&v4aOG+N$~9i`jTg} zi;^ggHd}&28sm5UsHffMa_DK{dVKfB8kTA%KzPM-u8GUTHBIYbt!W09K_aX(DuoB1 zSXa4gC1gCV!oj1jX_Zh5wG$X3f}#P?7}|VlyH5^uyzqtV$2l}amW~UFBE{P^ERM6D^soTloFQ5Q4tS263R`<@9dr#EAmwE|wA1-L0G6K|NTL%YyoLec`D;&2z8HYy7f z-ZQSnIwM+^kbwVeIgGMkvq5wludF}HyD$C8k=hrGXB!#TMgiZyc%@8GazQSCc>=oLv+A{k~vJbTva_x zyeFLFq|axQ=Cd-?z9koHz8d2D>?|m6lBP23-6!x{5R=*L(7i_*9&QZtyhX-bt=D8e-4cg*bNXc#QlNgEBEW7`9{yjk7PN8&By$#N>6@v|k@b)17D* z?+8lbGtid0V}92Pz^fmr<+o=1wIZFq?p{jvY5X911yMNv$y7WMWXdnr5pK7VIBrOngl!XtX{Rqje zswMZY#KKX=q|u*8NP6!j{=sr(>Kmet9#L^n#Pq#H*2fYnE{NSP%<;``X>L^JB)xZQ z6Rw(4LDe43hN~i9J7!7MKHx`1-b+Oy{h2p;B@9{Q zW60_;m^)V+)<}(kB0IwNIPRoeCl~GfP7qm-T-<^U7&o&4LQnqVq}MOUBfn}vpkxe| zl_X-eVxm@9!7 z6ie_+&2#Q?%|{YapN8IyGtqlK9$qt_>c5K~*gn1iCP%T{u;Mp*&pm__`ZybNhFR}W zjtS%}+WFAmWLU-8N^mZS2Iwu#^-ajR8W1o_`JvC?| zcZ3dGWTKhOI^;LAUCA;p;2g)}!(#=!>g!T)w#~<^01fDQFF|e8j&e~Z{?OZ3jydCn z;HQ}usIA{b1GVFc#6lk!p12l2S7ySGmwtG7P#2bZCc)#`J!J9zY_xH2fcBH6=@^Sp;YD#n7~NF5YF&Z{VEU)LgCsp2$C?)h@A^ z^I{hE?=i>3F~5oKTY1R;&jfyiJ9F-Nsdzuo9nD-c7(1;PFJB2HrxaaLGg^yIv%5pL zS#fagstIGBhv5=&XWTIQfVZA04%-9|QvYZPgoo*vcCel#kMm}oc@li7mLj)pJ?miV zn2%{K*_g2q`Md`yxF=MfX{`C=VSy*=9@m4jqg&vN)(7(6J58_|?}2K$GH~aFE}5y7 zME~1jOmDLe@~-1*r2It{YBG(%XM6)WwIrET%Ae-9ys5#zd;M6pjP)&9m4Rf+7Azl( zg*3*Jf86tc3)5{Q{#Ul*q6R56eKHlq)sE8@mKiuYRte(wiQ|MO7c_by2ii7f@corB z)cbCS%s~-w(pryc(K2Avt%8xAVcea=HV`n~2Dccc;JCAeDE+{h1a-Y4j~DxaAnO3t z9y&=B?-xMDobA+qh=ZP+tdDB?YBY#>Mx1W|XpGDtv#nc+`^(kv&_y3qdZvQH;ts;k z&&6*2RmA;G6&dReuz$3ibl)F`zsrY-!+*c&%Q!8JHYtF13x9ObErV>G7J7RZ^NV$s zKxyc5vSth06`w1IZw4EQdk|w89gf8cLrt3Bsep;6|8OB&bK&dsKI(Qb7!Q`a!Ah2w z*}td|Pu*ksr)M^d@gJp%r^~>rs+Qm3M|hD{95;X8VOpfH4zD&`;G=qL$;zc4$#1;} z9TOj=k>UP$nC-TYcnyW%FTq59^Okrzb{b>C>3CyO^KQBhUy)zTBYoby8fHWjh<$EJ zw|S%ySKD|HY`#xo?J8iBdL87gT!h-WzIg0qKKM6eV@+H*%O;lKQe8>$8}3|cfUidNfur+Tmpvo z()rO@^*JVh5P)=x!o=|}Xhk`ruxZV0b&3Leb# zz(0zVo8G95DVyHYO}1M}sO)^qdD2d1n0Arfajv)lBB)qW2;|N2g1&P#aO!dtX*ZJQ zGTO?Zd|)YNov6c_9X>GhZy`$F*~8s`w-TEE$fL^C)uedAEqcPOgUEh%fZu^*sGETl z3Y)ls@q`>|TC$ZUZ!Lw01`PlQYuG2LOX50r;)ij{I2z|qetcpKE1fiKyX}t04G-}W z1|R9${098=Mi9=8{va-%tu!)kHkR{`h~7s*tX;btO03wq&fcY2QG$5HZ6{v&CV@&W z^?aIgI+|Z}fZW+NXkdMvv$)On4nCPg`hgev_gyAqziq(F+S73EWgD2;`j?+;?m^4X z`N7RWbI>SJf!aYUT)!n26k1ZC@Jbu?*3Sl|j8tZmmt!i#*HI`0H{z842 zSKT#MGDC3%wUO#f!8 zz~isEXt;1Ta-l5C;w_1dA6tohCcFDHjdV|E6j!s7F+4~ko!j5S@>4~`OL-OsUu_|` z!n($3`MdlAM_-`pmV|%i> zy7O_fZ#Hh7w*&W?4AI@9rtn~TIwsHH$h(w8aAH4iDBEBDa>;<8Ds2>LI!ph4*a4GP zx6!cGJ4uCC64hLs34bJW01^$z=W!LVXRk!vr{S$8)@yt*3=brElWUu| z0C}MgJ~it};pZF>_c7wjd5PBsVrcMZfPS644S&ZwcIcUD+ zojCR?7r`XO)hLuUh6cw?;1s*3({V9DOjCNx{c+3SYEN7u;=XDS@wpHL|7@j`ohQSK zT((cytAWea<56OkF&XzD3tX-w(Q`$s0Sko~(<>WgB_lAdJQA**c7eH8EJmMxkGwVP z<}$@sqV47}sBY#C7p$hhF>3>?m#e02NgHu@{8}8_nMnWhEF^~QuLhJ0uknq`uw1i z27Yig`?WC(s*dE*-fuprxM4SPN?k;3Y79LVy&XQ68KCH^jYRU&Un166LT8PW1KXFM z=-Kkm=8NhF=&5_rFm|jtdbZg^bjNG1`AiAfv8J5*64tF8Qv)-{bdrQIIp8KO2dHHP zQR}<;HO~`KcSi~d^z|q0`g-_deLVDOr4zqY8+7p11)p!p@a^%%j=op#sa{Dg?=XKd z*e>|P8+P$D`13Wh`z)(?OEm{$hpq8#e+g{AvjTncGob43asF^=I$UdfNk1F=VCyeI z$ggk%+xA2#HDO)#*Lvs^FDGKIZw7U%57F6%dFDS>$wAA`nGhS3hp#`do(aZQwHY?2 z>xRyA!b|m`$1s;n&Am!Hwm87`M%E{0`i6A7GM^4#Nm`T2LC2#E%#{*wIL978e^bZ1 z<4^I<(VIK|OP*&q z{m{J893b=5W<0XxEB!Lv8-H>2Wbzcope>vT!EA>eVY?N2B*bxh+#~wGKWmV>M}BS{ z=4bh)L%mB7cd9m(-Z;`iPG8PNo@u<>bfR(9|8@^DN2yO)6!gAW4H|dzQR8C?mS&|w z)|3?fSac5VeiM$<_hoV2`|2QALxARe93odwz94o}%JIqE1gb7pisPo2Lg;mlw3Wnh zQ(2y2UcU)=-j~3Yi+4lQ<2-m%SA@s1Zqb{en?PdjCdNHohxZ~~F<{#kXe>>}c+ssy zw9610kGq29`}1VWfn*Y!=mq473&te|qIJ$(zFR+?6fUtrnZA$o$e5cnI(m@I&|==l z(>ajBa`}OxxtztSM)X%!g@;?#k|#?jxjP)m9q3@Zbc=})w7>z32WOGK6BXz;x*8X| zufrR!-Ke%&1$8LgNTl1I(oe~(yG*7S>_u&fW?32<-0LR;Yp0Mg!b5b&f+Ui%QI>vBcMji+72f4L*`X5XdSszsx?0Mn1Ft z62t!*iEf4@{w-62&5tvw;w&*(Jaava-8hDy?4*Fr!ZKiSwuGJz_Qs|;4|t(kFEHuw zfCqnSQQoMQRxzDU>4PhHm}tYz$w$b~@?~qTS$Y(2KeUY zM8>7*&||~3u)%Q}{FBH*PYo5Yt8b+DSC~LSh&G;cC z0MeqbFWSM|*@_+aqGCbmXE&c>umNIEmZ189LJ;4Bbn&61#CK`}WQmsIp-J~?{>sDT zp@u2`yz2+kV!n_)vzLIWasv@-VLj6x23Tm^-*M^i~z;5YOBq zHkGkZ&3tt$Cv_22565ZO+3fKw0aCR~dATKXv37+UF7qtM-jz#fx7sdD=*)xC%Yo#q zQwJ5^JpmR=AEBdN6`-W%hQv=64%Oc1SUsQdq%WSM>T-|yCs!X3#~m)jQl}he{(8oV zj{YQ%7qN5tVObWN8SR4h73mg%XG>GtlZ>Aw`qB^gKaQUb0} zL88*tO$($)$UBcj)K{p-Jw|aTDfx{w92SOQ6oRh~61WTA(ZvDlsn+6&RBF@&^F6fyY6+=uDe3L?aB$Ho*}rT2!3C4!+Z9QsQ2g|t?@F!^TTs-`-yL~ zg83QG+Z-nQO``Ed;R;kvkE40D(daNRN7vDyu3HE?A2XvXzdar$9(Bm zWzxX$#ze3_caZka zuL4a2b!e0+$|7-i zG2ze;nr2Z!AJ=5y_tsqVbL+kkiA8%TNlXHxe<@(8k;|7Iyv~11IY)mMX~FnF#`@eA z017vfpj4+B_%=Pr$!|p2fmOs-v7B7Ju>usz+Nji?Omue6g02PS@LDS!F7;I4lz0bB z-med3MhUD7wUc|uw1cyOF*MuB8NyShz>eD)=r3)I9~{!)&!%F`EBQ$eXZ296woYF0 zs|a2lScb#SaUf~gMzkyv;i|Gc?0wfmAIy}1?&qZ#^+cUlvq^?uvx4Z&as#S4Jsg_m zR>G+%ap=0a3KR3HSf|E8&U;!f6(-X_Vyq@+UW^7il|_tyC{KctGtlP830fgv4L%O4 zU_Z7R*Q9dT!p@(d)r&e@?pu<(!Sg_68M}Av+=LTH7*kt02zrAxQMXf=6n8E}p>+aS z?34&bUnb+ymXVHG3PPZCMh<7i9pr2y>uK@I<*aWhl3WiD!}EQ2%q}g90>Q5@i0-Hn zg!ixxr%W|SSBM0sH6FC^$uUyoGMQ{&k_(GB8RKooNW3<4A6f0W0(N=zlfPO5aHNuX zB$5;%eDoq8)fd6^J%R^$6^My-MjBd6N3Elvdf*;e;%f{8rH^QMehn7Fb1qtcC5V^W z;Mc?HXzwb9&fk$2W4$FZ(fMTW$WpL=xtwpAJrCW5Dqvhj4r#oRgncpka5kZb+*g=~ zO%LC2RqS3l#F#S@PtEAagBm#8e1|sOY$i{ch9ULE0UXD5Q=x)?L@&4s!Z(bV6=s{0 ztB-?ma23J6lbN{6V*;+)FpqPjQgjRRSU%ai3e_iVf@tLeeD_2OOm7{eCsOOVwmT`P zy-%2IjbeEW!3?zW>Y|vtjI1qj`hZ--JMy+=>pv$r#<{>x8QnO|nB-H4g4_Z5Y z(Rc@7xatB@^Uyn{4qJH@`?FF%h5?d3uk;1heflB>DFZfv|&sfe{kL% zVy{q7?2k$#_p2CNjBe1DfB_bKSc#7mrl4UzN0b(rLQjJY3_YEJ!?P0LLB64x!BR&Ue56Fv zedSR0p&%NyzoL%6ewlSzIb!ut1~|kre~afd*!`uQu4&T8j_?@H%_WUmh*hKB?(zIJ zmF-aXVk?Zg-0YYp7z&a>1!VT8<*a+a5q!4jL3ih3FwIWELeX0CHDVKe)_;n4&s0M) zqY+e3bLFM868&{Hi-r|vVBj`qQoA{c zE|8cFmGuThN6Hgg*!#YCD45wpCsFmShVaokjdc_U!T9Oba6-5eUnTvdPomA}TaRc6 zdX@!iRjcv)x@ZJjnyv zh0CG4>;v7hC!R>v%D~RI=ZLXE3i`dv1ARM7+z@-8ta{TzcYF&*`M+05Wz01?El36a ztqCTIoFLw4vmt&Dv%!99G5O?_jT#TvVTrCKq^#VGDcP;$Yug8M(=Q&P#G_zb>p7x& zK^`<_4!2jR`Ec`u*FeZ?H@xVliFTtNgh&UYxEX;KH-E@JHXn_`baCA01$bnf1MK)c z1yAdjQJ?Ua2C`E21 zNMg3e8n)9W`0{upPPDd$y6(v!A-s>DRW}LN_Wws7Kh9ts0C}MOk@2_NMDf{37OYFC z!xLqnNz0i9=&^7u+}9{0j{jJ{Y-S<(cw!O0*UW>4)Bl^%lX1^jMckZH1eS`$WbN$3 z#P3fed`Ru0J2$R{AX|1PJJVoZc`FBuCLiWAj@aM^n|i!(kL_UF9r4~^Kbgxs_}72$ zqqbGgxxZg?Vbs}#pU-?^@1MOO8R<+X)Gh{(S^jwMw<=oH8{?`zG5q*D5(RcK7C5eg zfQkEU zCWS7-))N&_o)`klMcrVpyeiJ#=nZYl*Rgv{DBQjyLKoP$(zOM1FspmN`S`!%JKnsA z1gU3KKdQWzQ8H~&7 z&F)f8<6DX1xF>YK(K+g7yo`)($;W$no3Jr>fDHRp)1m1xNM5S)^UvLDXFzD8<**3m zo{q(V4}a;Ce>I@>AQ>g67eVa9Y&84s4GIO)q~Yx-HC~ZH4L@Had*=`GHE-l0(y|f+ zqNCX^tcYf%`k`xz8hQqmkUfHgPTYIle0Op$HBlF)W3JDDLCXfxwOJWFDi`8l=qb`~ z;*NS#){te<*Xdg}Z)9Bl$Ll!{6N871u-$7cI-jb=haV;I@`*99)zA#8u6e_qb9yVr{%+D@veDNC&_^1#f3u_It>3Aq>H_-BHVR}8 zH$vI2GV)}_HEKUy9fmS&v0uRn?UvZVD)}aQGpr8P4z9+)0%P1WgY9bnI+J;VBQ$Dj zF=@Z42ogKoNImNkI_ZGaYCq!~-_3&}#y4{A|4gzk#ltP344fh;fUDofV4sg4tUbxN zCK(lU!ax{`e=voQe-t^Jt(jcSPHl{4o;1CbK=?NK3+bt?pl@fZm~YcsP1-6dsrkzo zcy-4ZLe8b4X3zw1aWRIJi;1}N`UJj6rIze(o=!`n2Dz!z_mM{utjp}kCcLTIPh^8l z;b(a~k`#CNFWrY)D|kbbXfHkW<_=Nzh=h;<9muFH!#@G_ln!TMa7R3O{l>FHlg+z7 zGKc8twplb%A{!Dq72w_G8C1kIksc(&RPtaJ?7#biUvl>(mtq}-CWj+nx}h*W2y%mj zz0ajQV{~m`8-fA-A`9FE3?ew0yxC9 zx7`U9oc`lXR7*7G^Yx}+#Eo>A_fZ+f$~chwU#swYXgSFK8&8NzBJP+niyBnB!&GvO zd(6(dsxk7oe|bI3(MYE&_NWp`8`eG1qe*;P+2^rA3VNAdwsPPgnJ_bv-JQ)@kNbPR zEiasx6pg~aRu^fdofldy*C)-frkK&Uo@;Wo!Xnu)7^$#=yW@l5do5$j%f{nWV^2K1 zR|%?$0+D+9Fs{A`IQwkF&EqZ;ST+;3mwCaN>Rg=4{2)L0-MsJcXBydVOiaY%@X^LU zM8wVj8XxKs_@Ra#9}~$IYweD#LmoJHUlnu&RiVSISXdInQ}L2qxIC>0BEDZDZ8DW~ zs?<)%^^JpTH@4yUx=q-%i!p}JZlb>}D$z)fVEE>4nsvAUzbFmRi_Z5+(quP$sNqYe zF+cf6l`0sTp@s=1%!hYo9?fXsz`Q8~&XmQVavYd6J#e;H^J^p&P=_oI%^3(3)` zFX*$|)%Z{K49ODMg7(Qd)H_fMnj$%3Ue!sDXocbAoEkW;uof@2E0Cf5WpKkJl}^wX z!Jv@_*w~qYY1(!4!+|(lZWV(I`9|oOvJro<`PXi%6HZ)I0&AwT=OXqGzkx9ywcoJr zTB-HinLmF?m*sf4#Eqe`z1)%}%2B*$F?qDuv75p%^Mz14VCDAV}Yn$~~(FUz~tvk{bEjPKU{8)B?EQ z7zdgG#VA$LLayBj0U5{vDUK=YM0(4 zkxla81)HGhtT2XK*Mj{Lf4tN*hx_|}BdYz0$B`ZV+`rAa=AyOk@Ypg0zXabTj|w?z z!PydL-E>+}R!sI?+h9KZdk&_wGhgv0=J6@1N7bBcdjITnuw4@h?}DX>yhJoHVfkc( z2NUQm**x=7`*z;*gc+>*lMm#WHH~^&2-*6qQ}9kSZ0W8co?rg)Qa84Mvs4q=G2_5I zlm1E=XZMs^{pcg|hllyrHz(-qPA?pB$%Kbt4fy5mezGLZ99o!$7!v<6LD> z+#m&K7oRofa?g-xtqJh-kw2VhPUN>(HgiY5{^EX0{v{7XdWdDjSop8!5PkOFUg9FS zl6v_wZ^Da4JY%$ox|KT9$$y04$@vm+erAnl|62(PFY5T?V(Mt6rEQuuDV!YE?c^WK zR>NmG#*l=PU>hL<5;YclPfay3T>h2$on7&M{W=u(jR%z}Kk4Re_UO(n!h4zZj3ZY^ z8*{AjvHlcvW#`%0o1y&khbG{quMK%}6QG&t3}d{KF@Vi^^xI}g71DxB%e1g@=0v)Vpa(`Q9q44;B9kD;Bkw-PfEX(nZ2y8YXiAxk|{fH)n ziwDBA&${HOMkFe^SAfS>Pde$tZfv|7iic+8;Pcln$cdVrFz34$9{eVSHaXwT53I_8 zfMZ=$IZ72Cgsg#F`9Cy;=^bvXg|S*b6*?T2L9^vbxDt1SD_gA!c>(_PvBok`eVK`h zw)If`@)}VV*g+n=5XSnk>(NX{ls}uX5uFn?Nyd#au=j5^$W)|ZMa68^J!$}RjVj=P z4BNR0m|>mcD?0b0Exz`$V$A6rJQ|XYEg6~MdMF*Q_$i^l+XGZR)Cx9DtA_nn)>ytb z0uN8AB7cP`kyW>ZnLg&QaP?9Y$V?#BjrKVAa3EA~P$J!@Rzd02Zu;N%m-J$3C^{M! zV2FqlEcbPVw3DnqQa2NugEioKa|1qOc}{J1&UNfwi^f+X@cp|;l>1j<{;9Zwn_eUi z=Nm2Q?K#TOkueUnq*tPDP!1ozsTl4rP=!9b91N7b&WFoYnWx;$rK1zyn(q?(!TBwC zPwmd#CEMc4L1M~ukn)Mg7XRsVhxL2vdFT$mOqS(CKaY}^cfNCFDNSUNK?9kflZla` zg|zD2V)F3nE_4asO?T{@1r_b?s9To{UY3&Zq>uF>e`tW^W3JQR)E_ogOTbxo3;eUi zj^%-0lC_N6XT^3nca&!Xb6|tj$qp*2QUm+lU(w8QappV!+X7{MGccuLAt*$R=bPIO z5}8~6?B4A|)0>6SZ(aec+q4>g3x~ttr`P1*g$i|f5!|b{1c_QR68>50R62Yx`7U8Mv=f|1(|2O-m4xo2nMCEhCoYLxh+l)P&^NObBuozy0nQE$XH@d3=~d{?SUSbh z5^&gUHxz6BAwJGoC@-2sO;`M{PecsANe9B&oQuT$odXnIcYryqM!3jB3{Gr}CvW3I zu}8iE+M*c)(Uk3&ZfT;wJ#jYD z=X~sR>E00qI8q>oQd8Yv%cpQSJF5XoL?%M^qkO^(s^h{_{#5aV8Fv3CfJ?ktuSsb* z{K!{FnebX{eYY8sl$Fqa$whkPuOyJ(M)IL*D(Ms23ot1acRP^6s zf9L!Azef*`2lsuSb6wBZ^VL)bgQqrgG95fg*dnAeO6tkGQgh7pWf}bKt<++y8%|*R z1dYneT;;Q;ME%ln>O1`@Cs~?;7b# z=`|a$<83Zjf3BwxY>sBuQvB7pR?f}uD7hwKNJoV?kbhM>NSD+M+Ux3q-zQYz=Sf-6 zsW2PW4&>8%DJ?jXXodx`i%=rw8FzcfQNh_M_0V;r2|Hg1cyhrHzujZb17p@7Ai5O( z;bzPju7=8MAy|CdhdRpcpvTO&@bB%Gz^HfZ8GP49KI9VM4W>g?h7@{|Z^aPmG0Rzi2oyLJNP~F2E0_ zKGe;s5WYRmC0EzGV(y;#l-EdxQ-_)%WqKLa78#8VgH`n8>~3C%<^KlT!muO=D886gH6L!{7oz5yWNmvRuE~g9X&e=lG>CrgP z!vgKhTv2~i1IoJAAophjoQ?QL$M>3m@Sg{)^S(w$dg$Vq9ZS*6Y$o`ODTd|4CrD|m z1lGwiKM?Ch+3yhF-JH}!UU+@#mb({8KYahmwYTI0=`{zrn_UCHP6 zTzt=4P@~3LI2Ra<1ye6ky-;KP*)x}hPru0P4@RZzIpxg?o93@CuWE1SZW{t8Xi*TNBGOjai0;TFbbnTD`mS(ZM z`@<1%p#KdSz7z^qL+$WLfH=r|?B-rB$)fgej&dfCB2ZgQ8=Ec9viH>)qGc$8yS0Nc z`HlpvxI?gba2n*V$_JmOJi0h@9cg(kht}05(A(ZaSB=Pl??FQ7ebzuE!;g}!%dXJ} zip&}OwHOT_yAaXDt zGHG8Wz`h+-ROd`R#%BkSG54IwpO2O}Az?bi-MK||#%_QqE@|lD!v3#w1LR*?BYtR3 zLgQ~q=pt_jE~2*3D6xYyeAWaV(=;Muqs;P#z_@C!4{- z8C~?=(Rw%)+{ZuNvldK3jp2;H4m#{}=pMVejDNC2h$U;RxbGM1anHI@3DpU3-snyke_i;c-Y4T>QgkK%Bzs9&U_;H zSTY-P9Cl!y4dV;;rUNIZ46iQuVr9`-=zN|5^W`$hBBOqKS45M34Qk+AZ}*XDmKi9O zU5N*eZX>TnwD5BMC^)OH0za7-ygeWX{4GLIR5B5EL@8kI_f&AM&PI_hQ(?@KT(Fv% z0Yy}rPT*1?`PO*q&scv6eNi0K*95-pZ|UMT=0`4g%2_>6!KZ2stRGtv{;g-byfgK% zc26}(9c{!)mbYhfcxvREP0r@kgY6wxUUADe@<#jzxwWDKI!qIZr${W-SiXrE6ga{u z=`8fWITlBY6~n-vGu&k6+DsFEr|*YYx4CL9=oeLCl6Egm+fYcmHJCewaqZKm7NOkX zE;2?n3FbX@#D@pWQN}Tl7?g-o;n~Avyg~{ptSUnF7Ulp~*T6Kak-DUCUBr)HIvrojD%XVbUgXj6#p&X40)E7Wc{aD`tLr=&q~BXq2nA_ zWbJ{Q+Qo47_)Fctx6KBZyQF(LV-&>MfZ0_24l&hg=S(^_!m)i>cp^N5dM5e7&VU%S zPHp4g$ExA6S*)8xI}JV?t{0q|R>kKSU8eb76G2!UO7qxxDt7W(Vt1v8@s>sq64SvO z9;hQ_ueQRL8L#MbuOzT}yBZ%lso?retbcZF4p}m}999e%qLFnAeX%2h&YEfoTQe@x zYkx{%dNohg%cj7PUNZGwl?d}?7Q;aB6Jpvu#O;+_OLrw0(n%91g3si~6f;}kiEt4- z4G)E0*88KlH~bX^s>jSAL4)!$r-Uvdelj9lA<<15M%Tns3xQ zCWK)I$ylWGm!H}#grhENzAT z*6#O#$MUY=g{_+)q_e<^I9{E85oI{$5XrKhE%}XMVIIf zHj9~Q!Mt~VyEu*7eKLV~G&1ug--W< z!TLJLa+(T`l^ib>wU7I;s|H`X=i^{VGwM%!Mh05UP}5Hi*Nr(wx3>`Zq4bTGYiE*k zl_Q8-z7EvUd?GQMgB~weIHQ^6P&{2aquO~tBGAle&RZ2d++sjR; zV$XM75;@Vl90kmylBuzeQ>v83@#Au-@hmNzVNn2a6UIRq^F_8BFT|wSzqGcinC3V! zAK73js#^PC+k!9L(0Aq-@>N9;6w;%W%kaIcCdgLGK&#dfvT~OU2!v9wzDt>&I9LoH zmX_jecLA!oForm#34|+B@#KIUEK9M4!w+ic5oZOgmtR2xKKbIOIdkymk4%(38-}5* zd&YbDIpS@(0zB@nfIK**0qe#W)6uVI!Ip213|i09rHdq#$;D6SXlafJMqNq9YwU04 zpw|u*ilV@NwG$Wqy%@!cKhpmajX?aL5qh(|QqK-iliyKZ!|dG-M+Yq1SaE3#aa{~p3e z>f(0qKse|2oL-G#4ni|2esH8YE_*NvItOZTv6T`rsI7-`_Y~N9p^OW+8c*M@IYMJTzDV zV@nL_HKjIkNXDIpUt%*oxh=3@)Dk-4d>S};wh|4^7JA%Q8u~{u#!WYq^hJb`{O}lH zSy)sHc7W3bP0X#8z#Xs;0Ihp$|FK{%jgoJIPrEwl0Ed)X1!ADx4osI7gXV@%ebk8c>BA-MKm-=I5v=hv4WB0e9{) zIm=WCEY;y|9(U|Mm&I~Hm_Z}l|Iy>0igCX!<9;nQ@!j@h2-aSJRmGv)K{@8xnU~2O zw9p{ywii&RKL+qaN{R1mp8?vJ9?_1KQuy+09jPppG;N`ISg<<)tZm9sWDn~n_>+i# zk7c9uXnQ#ITLbPrTmqz99#?V;=$A`ZI0xGyVx1Sp*IL)%-240K;C?@7Ih@T0UNI89 z_mPI(5%cMiB3G!LDve$isw7&)lRV#@4L8U7^NHnVXs5r4cG_j}&i`$p)AM!cEad|< zIe9!}pRLA_jPFEgqAPxQQweV(Jq3AL-#BtC6lJd#(d92Z;nudvaL9NSZm`b)>2o9L z_^lbBca-N`5C5ebkGPRB#su0A)Zj2qMlCl#Q0ctSwcpXgsyd$EcXU*^MV(P65QpG_{rFlR#hMwBA)w3s=0 z{$rifS8^mU+xG(%RxQB$!{7dplcFb5-8g- z33ong=G}VdfEMdr3mg^0_98opV<7WMEb1n4pZBKzk@!N38?X%pi>@ocO=D(zSwl1-5V}9G1OnP0S6y$ET({{G^RWc8O(JCpV z;^-a{aJ832wOPR^k?GJ~@Q9>NH6y3G`na1P^x@|7GKkY;OcU$Le7I0ZvWI5E>*szr zrN{%L8j?`|MmArxzzogKULtchYk=W0Wt`Hs6sN4MMA;}$d}k0zifUK!kt*Nm^eM@_ zrrj`gp5KhcGq>Ti%RA{Qhf=J+B!F9|)KQ$V5D_O1^MOj)SiCtD277Fva(5=V))^*Ah5Mx;xZbBAhMXAj)Jx33eUD;8J6q5VbpQ;f}(a~SE!c&V~U zSLwqeRwS@9m7d?a9O4_CAjr`N4UC>~$9!t>tI=4Tw_`Jg+n%Kls`hp@kQa2iw+N0+ zze-#FGbhIN=?Gg=!D^EdN^;-37rRH|*lq0RowF2WCuiZ9TMei=)(cXGYr#5vF`8W{ zhEv%ZB=KJ*&oHaH!awCs|mvdQ&2P43B5UO%x{@~R7t4{Lj6Qg#X=J6HUZJ| zD#K=+rg#n@3JK%A=xZrJzQbm)E>#y&kmw=}MW?A&GW%IgxT&bPu8-PJ z{=ruaco2mNMR=@XA)l9{h&_iVW9{~8GTQAZwfT9Pl-+$y#-vuFj?GfeTRk7>Eqk=T zs12`uN^qroKZ$x5iId)Lh2F3f{B%v0yWA^;yW`bC?XVC1Z+Hwymb7nx@9>)ePHWTf<(v?RGb@7FEOQKZk%Yuc-mphp6N}Q-;8l7lWL1R0 zy5m*YQ1Y066?UEPogoL4HYkyTgVv~=U5-!mx6_|>LVodq)%gC#7t$<$m(Di3L!(6X zQDL1Y?pf^s5}jM&&WZ=T>Qg<88Eoo4cPNXB&XNb;nGJNa#5`Qby#IrG=V(nrD9cEO za+}teKx=3_-@wk1lFC|K%%a8g=h8D=mr#x*?UsXcK63;ekKglCZVU;6nkJIzTgqVE zrv~yqBM;Y7#HBqMf04}x ztk}!#6=@?PHCpf?M---7)!}i&EY?}K8btRurJ)DXpo)JrfUbyl)ux*Y(k9rq(dVumq$%pHgA87t}|WW1-Af?j7Skrg__8 zzv5h2Ul|QQ^?k{ziF#m_%D$a0LgFk`hsoaK==D93f=j=7Vt%X&;`gSYuW&OM|EeY> zX@IT`GH~?dL({_C^Yn0M0a!XD|9{@Pr70P-7L7%IycHy;31H>sb@aK15JpDN<5%zT zfoUl$aAV1GuBQGh$GDHq^KqiT?-)Z{>@L#UQ4h+OyjaKSVs0IqU!|RSPKQ`_H`-qm zTk@zcNsPO=J|mZ%1i|0oq7 z$feFhDWvyu8AvOYK<5h!6uZ5S?f&G@;R;V1ywgeQ`(Wry7NEr*6T*LX#&ter_-*M- zoZNDfei&ShF6}WW+?qqZa$?ET+Iq?_TTJrplu%!)6mR-R(}B<&EXuKgWez!L9#{#R z1AMUO++UJnI~TqNMWC@s4%fYp?E;IcVQfMXI`$-yS@G_Wb3+vWyQl$6)xOe$=hj2p zQD=~A)?|H0#o#k9huoN;K~FcCLBZk{qG**#A4R>PHBM=eIARVKr9`2W_+cs?XNt4u zbd!f44smKN^~8-v;3BX)6t)lF)x;b#8ETwb`5h+ZM;a#pH^_w_8h1G zm;=K2>uN5Bm@sF@S%Le)QTX#z0j`PB!n|NLjJjV2uOjE*J2wsH9BQN9hg5ObwE}o{ zwU9qv9Eh`So6|bYWKf@+MjH#taq$9ktnpN$+r7kKM8#Jw(SHfov-5`oMV5F)7TAlZ7O*5Qn!r;JlX$ zApNgCDQzu=k%APMl#&90f5TDr%sCpK6+_1`zC^3KiJtjXjg?I*Q1^Ze=m(U+#ciWd zSbLo;gJ%3*nN2>vQAC$B`J}(j6MaHwBMIuHvi1gog9;h6>9Qrvx*-C?6>W4zLka0; ztVej%Z2Vl1NgWEs;pdxbu6|7s{-@XD=hLWfmZcwka z1(m<|5YK)|zIEUf-&>Z4E<{%TOI!!`+)m=sfEkgHc00TbKFxe zAw(`XOAEf)kW51#v|W|iU8Io&ll{YpA#;<88{H7}G;gJse7{kZktOVIxD_&t6Y*r6 zFPuv%CAx`1{4qlhs&stuPFz2o+-^V$+>UmCmvFgaNnhSKYoWb}0*$~1!4au1*aCU78 z;U73-@}4s!*?NGxXBNo|f3=cf`###>CyoD<)$sc}A*f1->#c-9xTtG1!KobGJYct=NRuL%0@|8_%<1;BiE5@`nK2}I|gJO z?va)IM9_E!>pgK{j@24Fe57-l?%!HZrP>4VbXpSZyA+6{N=CxFB%8 zg=pE9k(CFcXylw>a?X=+VecN(2bH1Jaf$*}F$Da(ri2exHHJDhU)Yq*wh&L(;g#Pd z(9uwgJC+l+2_^k+yKv|s&9wM!ll znfJ+f{={g!`e`;<*iypn$(cynw8z3xQiFD0Pl(k1%j8t0DD=Ev49YCGVzJg42Cqzo zO`wLs_N(Dg!X~WK*-jqYCX=FqWgx{j!oQTYu(tmQ68zVH@5g`(Mh;Dk)|_)E?-r)O(8H-<7F!H~oFVM{R^45{HVhlWity{LP|`5RAHImy z!sBJN&=c89_A7eh!u=aDBS4L8yPitjE}Y_C-j$=jr)JSlg{*5iItX;EN@>&C5%?=l zkGSl-$V*oRpo7|2#wt$6cmMQp;C3zg-sm82Yy9C3^V{`W8}sLb+XSxXL`l%h4WRy1 zf;X}(h2HSp#MS31S!XIuW*rlSySrDz0Q*+U9}-~>gldeQ;e`t>fZ$&FZf?23TaxPa zfYaVC36q1y!k2O(fASdnx!a^@w4xQ?VpKpL$ud9rU2QyXIt8wp7LkjSlX!=t0<8Ji zj5GZ%5sfcpA*UPyiEi9fy7QhF zK3(jKOKxSecfxAWp2y+eB|Rj6syohqUdZsi=VRT=gE6$ z{DxEf^1g6virY@EMQhQpAGx^0<1XF!*A~@&rr{Q=eDsax;JD~5LD(O443^kW{z>%G z$*MEK?BN_J98(3$eKPpXuf$;L6%};RS_ox>QBZovAB0tDVDn@uT(!}Lds^98xOF*s z8Jz-3NB5C43ua>6f=%c=DjVFp>tS5MeInm0i3K4sIQO(K89T3uZVD{M#(x=j?NAB7 zdvOX}EPTMdA2Olezci95T9s^9mI0-6jq%MzeQ1*mCSiO7m|c#A(WABSPvl+tHn5Dy z+b6=)A#eB<^^ICKl!3}x33$wSTKE6V;cWV4?nO>5ooU5#ZMjQu^`ABP#;X8cNm${~ z8BwTuahvYkn#1iGk<$Ivy&8618iB1l4e&pFLq;DzPSw;iFeKm<^trWZ(~TUC1&SN%~E0(iS~e{G*!%s^1deHk-vd z7};T|`X~^Yyc82;t66T0c>#-6V5ENswXk5`;a3X;_e$;&DY*@T>`58$>)JMmlo1nz zU7QUm5f_Pd)&$r)G|1OAj6pNwT=?O^X2+@xSY=R24$UZorYL)`c3uVp8~mZnG9BH$ z2uiaY(SswC$OyrE8vm^t40?7`RmK?g7nac}Pvar^-T+BW5Tc916Y|D^c~g4MlM_E$ zNrQwo)Xy`-L0y1eNeg(hxC95MXMpheXtHuq1+*DGqNN65cxPig_x27?v`*C$Nv~mE z_{kA72M_YDv(lifY9ne*Pl933rQI{`T&0Po@9EC!ToC87XVY4UIdK^{=F)d! zm2Zaep*qB2<_uVWAQtzH)Q9fhRrIr59s0X{BtKTOd`edq^-{DEILvkcRA3qL@U`UO z_VMIR%Ss}qJ)XM|lYoX10(_pYKxM9k;p+OI^u_TCqP3&d^yq>DQjxEXE^=SU@mL`) zVxU;o$>l_D%?c>}VhDwAb`XhpKYBdwHQ#e}6m&S3K$wLl4E$^5f(mT0bDjd+HZcR~ zf_czh^V)RD`CO3k-vZJLYw7UK2w1dJ0He3n(F;0CuxLpjkg`GWS|&C!wsw-N6Ks=QjX|GoGPd88YK$wz&*MtKaAhvq zj}rxp){8Xp{2bh$r;P~?kwp1Q8l51VMPjYPsQB%-@6m%AYJ@YDU+g1p>9t;x?#&rJd5r@Of5%~0O zE_{4F4f_2n@YTgdFr|MV{d8;tzv1jEI(#*PEZ!_2Mv;GMsq-34nl%qJ%xm!ZP&s5t zFrS>*4!(LQ85X}f-R(Uu9VYg2Xm&`GI`GU*bj%sjeq@pK0hTfD&j#)HdLVM-A!lX1 zkM@4vMP4!=V7Xxv+#eGL``m)*E9MW}5mSbXl-84VpS-|TeIg(KGZY?NGNbZYJ`fbN z3vD(|ft}-alV|SsFn(ei{TQ>KUSJ*5CEXbiEUSl=j1{U8t%oBEUXX{MJiw^E8ZX<( zz^{RFJaRsi&Nuw;YGG$5ZE$&6uXd_QmtmalT$E-Fw@aDCR}tgt|)5 z(XGem334EPyBu2&=h8QKzw_$Gg>e4{bF8H}KFsrC&8bR^uinKS%0H<3!U_&r(3CUBwQOi0-Po>mSgvC8onb759|@* z3a!O(x;zPH=?5T|%A$OOCQLe24cAjQL2I);%v_X%R}NUCwn-v9qeW=qFNqDJ85sK^ z0Y7YE&-2xrbn5YP+%A`hJDSs>mA#)-gA^exUKYYC*t>7_2Rd_$5x?!;FbQ{-QRYhZ;J$OGfLTh-V+z>OQ(85A-xd!ghn&}Q9x)O*%@C08{bva z%4d zz7WXYyW>P-h5u12(GhTeZyvpMXMn3bW(US9M~L$P%Zr7rqTYQhlcjNh+d6MC{8Su8 z+hYIFFP*dT+9ne)RzF6A>@&D7HYaS|5P>tEy(TM_Qdpm*CBC1aMl9c+C4pv#sfc_q zRi8YPFa5IsR!-04Ek87~p5`0W+&Tr$@w$Hfr8R4K2C$mqaI6&_}iv{C_+4(h%LfWV}>4q@TM>_vb&N zk51Nt%-=|OWVi}<-6@8(InlIX`XS=4Hk)Jx>r-V-Yj$c@;X)<{;o&!FG-WP<-v=XL zxt|*@oFWE^KRbEd6JfAbj5!k+L;JvWHp_18Cil!`;ZdJG-hFF9#uYJVAKUq8l2Xes^OFTo%EjGP zmH6bd8@w|qfE7=RK>RPuZAEV0yQLiGpKV%!iZ6ePlMqbJ@vR{>$>qA=!n5g2_t#R+=N z(AYf=*Z4=nj&B0|QErb4y|1b5uNM3*DHMEnpFp?ECbJx-bhpJO3&v41CbC?KWkT}t z%f(G_A~6O3)o#M7yOpRUX#nbDsz|9vBs90$!M{igOnsb1Zf5mz3A-%e=miDzjLrnX zj#}E75BOlgEb4q#9o{~lM?BIJK|K68QK%~bN55kv=VCM7U;2U%dvkz(Oi2Uh@LU-F zvY%WxWdf0zBd}b;7QcBH;xjv4ls~@}9M_9O!8sF>wIUBz+C8K4W)-lsZZW==HX=ot zC+Jik892-CEU)@yAXsZFS@|s$1Nw9!v8$4(1WJHZTRQ7utVZ{x<}hX4Qc(Rf6Wf_{ z>d6Uq24lG{!RQb)V1L6->l;YxP$9%C)W8~;j)B5>NY%O5rM54GbOq+&8zVxMXJ+xX z>5}C0pJF0ByI-)tPL|hyzLfMS2vBO=S||%#ilP!JWW|)N_#~o^$lWLe_a#v{BXvHE zka2`~&&5FN%D_*GIVtiQKq+4hy}x_XbFGZ=+q4W5q`Yax&PK?JYUb)LS)hEnEDEch zk>($a*^Wly_NE0@&zDT#* zoQ};W@=q@j6lIIhSS*RCK3)pnW1myk@NBxvM}+K^S_d#Pnf11%LZ_MtS$x$P+*(!e z>LnvAkBEa#)nvRVYyh_&3o`dz8Zj&1LH~3~;?G(Wy1U*SEQNviZTn1od>{*-4_1*h zZz*6cFZ>*ZY}j@91gQ;KPlIO4;=t6o^mouS5cM%4kDcq$?Q{yxS4n|wD=yJ(S5k1* zfC~L|brvKVS7TV$Pl0J^BN|m|5k5@_^D-Xtx{;Y^Np_RQnfa(XZ3eM0sG(gw29S_y z3aI{+8a+vZ+x7mW_1i{pD!0Z#$H}llb|NIPd%j^oI9|;t2meGVSd$frTOV42;xs=b zI!0uJW9BH5f;N%LNcL3y|@neu5GIXC$t3H7QWx^HT*s$w2^va{mD_$)gA zi7g#j?uj~F8s@w#fr!G)DF_)aYz8(mlitvwNLX4jLe`ONR&nMKb8`0(Of7H7LY1*;zw!GZM! z+?DM`Fm6pTRdr|Cr)lL7dcFdD9#p{bPCeK)Bbh&7e~>@*a3$`^Glh+1ee`B`3{*uW zpyus-)^~M?a~Tf8g&tK9b!QXo65CGsgnaViP6mA4T8tC+MuEH5L>QyE9Hy&k(42ne zK5O5KlbVF=%$PzPck99>Z#^h1mcx>eV7$q41y?Ra;|I?MqWbw2Np^2SsSd`W%GjaU zSS6JGY6e}c=Fl;#8CJIY!-mbw)vD#jyUhv4D+V68h3(@XH}~_MGj@}X{3ukA)kpHKh>^W_VezpPdt||t$oK-@HEHUWJYCyy4BIdEZPGxrFV#Ra^h)~wVPTR?_ z>P0i%Jbncje#(Huu5PgNnj-v>)PO?=Cjx&s8G1!>>G$KAkZ{`!e}4UsSTDIxN)K9t z+HyOt@9IkIcT*+bcT9rE>OwFzFvpp>0PWur$RCqNSQg+%%>$Q0`)0ORv5A9^cZ*Rp zFoRn=rJNji_nElMUMD4*J#@}JH*5*6!3U3GVP}~i_e>`ZQ=&9ryTu&z3(LW4moIR; zU#^BNMFn`jR~oX10&(&A6~yZKF`sDX)T-MP{Pp&J*BEF`(^WZV6-LeLEH5-7oWKXVM@q#5J5ZBJ1(mkSZFL&I@g-)IN9&=j3buH?=Fd#zh^m&1CD6RjY8 zm>Au=qzOIZi?Nf<)fd{%2MzT~Sm#wpRc}?}zB6ZNneh=Sqg)6>xlJgh5>0Qt%OVk7 zUhpVX9oHXX`^|h6JgohaWQirAUc3f4uU!r9o#D)_sR@v~3PspXzx~*5dM)V;H+_B! zUKu?G+tc&Gy}pD-=ZIqWYB6*nniyrS2R*$qbRhaNJ42X|RBGscA9?!HgFZ=-rYOxb##7M`_CuSKF_gc8ViZe7Hf2|K-yDnWwm=x3-a? zm{f?{WkY*o%=mv-5+QV98Vzq&Cl{^(p44gXRxDWpt8He}+>5%%o27%}ggUZss~X8j z8;6y8j`Xk51k9eOh?5g5u&bQS1oMiKnd9-0Q4PK*O9aQ&ulR=!FX+vLS{Pwzj&@RO zz$)Q5edN?h9ajFYW4n-+_cc=AEpGhaYa^7%8>X4cQ7~dm5nOj~#A~85FkU_oz9l{- z_rI%q0L$zMzZwz}PeM;RRvh_Sl*zFXUoDN@Y8ZMjoivy7we!K zOYlT;4Ji60lM54h`a+C#qTkEGS&vn4#`0Cf{@o3lX8MUAp`Z$~o3nA<@+$UkqoDDz z6sX3A!;6pXtZSSJV!8=1@~u5is+|o#nb-PJkP4aK&*8s$NqpPbwfv~uJi30S4!EBw zr$goW0=Wnca4TDby#x11Y}E!qefJk?jpAT?;y=<^HXHhWP36bgEe5xhKgqSN_SpaC zVApHr3XRBgV}19xC}$)Arvlc(+>UYJ7_A5M_6dop;(F%88%sNn|0PuqozYBior`<; zhHU;(4MW$zP}pltLc9vlxVlqtWraC-s(O;WY7?;LP97IED-)cYY+!rrMBJ=;oVGmk z!tw0>*0p&%x0X3;ULUl=;|@Yp|7uD%t-Q(`80lid*17oSRv0=J&j*n3Mw7BIQZS<# zcHDCS#Y1~|z4$DMC>#gB)@I^PGilJ>QbY@N*FcS}kjS`-LGP;(csA#eX+AdK&x{N( z+@=j(q9(B6gFjrnS_rPRpH|-8i}$8Y0C%}$lzYDc(!8Tkx?~RayIiF!dNc8bk0$(? z&s>kO12lBYH2TtB19WYWId zTOo)!997Pma{gR74Env|p4)0++Bz>x+Zj)`se0n^@r0^RU5FueA#iw90$owL1k7K~ zr~4v`F=~fQm+NIWc$`!MH9d{=#^@{5ZuBH55U&Cw*_Tw@a}}g+drJ3RTL-~5^|*iA zeJX3`1#5q0knh(B@e&eD@KvStwqmT??IHajJr&L$n~!I`A8>NZ_u_f}an}!54w4HK zVA{V77~?#RjEk5Giyzt3_QTa=-}|I)KXU>@Te6@bq7WWw4w2QlE#&bvV@$i;jF_Gd z#tT32Yi8x)la5l*$~r-lHVCnC+#&8(?@j*fidW=);!ApPe*li2@sAt|UjrIlS*$;; z3O)NK5XHEWIEyjrbEcH>S92$V^tiWVqLT-5qc%V{^B{;@jpR&@hvENx&WHY87VPqf zxs)A8UyFhj z|43<~A}(`1!oRq|{C`(0u_`c*bJ;cl_KR8KHg>LdKA1*T4XPo$EfD|yl!SX*=HY_) z>qPa98MWyQrnYZ4V7>|Klw>TGcy=kYb=s4o`h9#ha|ujUG9-4vi{NI59{JSTNAKK{ zBs|Mz2OSTB$mNFwZEr%cTVW$^xHBKd47KtjuP~ONAPQ^Q-SEoW2$YajK<)d?iTkaU z?$v%wuhuWZ{QX1xu0JEtPAwaTwr_*BR3SQrRb#`_X*k-lpQO|z;j1kyhuY~5*_)^0 zFw09!?4JoPvhL8oe0o~zf#D9O-j0Y5DK<{4!ou{vcUeV0ga;}p^eV4~!M=97mFM;(I zX5jbcNqoVcGPF5!l#DqsO!AW&(P1-lhXvVyi%S#S`6a@7ai$<{Ux>*v0*Gz4gMN`n zh`Q(xobp((EY2pcmIad!@k0LB4{fk?n@4s=Hjw2}X)xzh4J4%m3xY3YL0rK-PNl?z zERrrFlfVqT1T2%ge29B6&kdHWtii1}O<{ffLYSHn18Wv0Fy3*9fRY0opHd7HXXv9Z z?}?|^Ft6BPBXcQNfa32>7=5AyQtq3>B$0ITXPXQb1vE3~@_ZaSHV#CcN^p$rKDv08 zKeyR+C2BkwCLL^U-7vL>>&<#ccgtE}PQ4B84XcBwN7Ffx2{{=5NSjlfR|`({jqq1n zh4wp|qVSM18rA2*;xk>;sd^W`TKpAvBQgw^9nZt(2C-}(w-Qdi3dHK|MsP@9$UJ{C z{9Xm-W%yAJyPwr{r~63Kvvs+k>9`F)T>D7Y?25rUM1vKi4Zof2xQ)CR}8E#0?A00 zztQ|aCe(4zud|;Hx3|$P?0e)nL5_~tI~%UD{=~6&GwA%ELGUC=2+q$kxVwtQ$Kc3=;!J$0P3+tfpncQ?TO>>-02Ixg!?_<>wt8I6V!F^)^FO^J0i&&r-=BW6(XpvN544 z(DjXZ3^s-noj1tU`aa>yUb?-<9Nd(IRvtUf7G1WN|OjHg|#J73O z$JG?fvaSVW!Bb-rx_Y^Q&r?OOvUuKp!W$Z$$aZg|#^F74YrI$3MfdD^%2#Vt!T1pp za6117RR~*zna-x*)@2SmpM4>-VFG#l?jbD-Gs3~%G&TcH#=PSpoYIDy+(hG%D0f2{ zI+kw)qsk~Wf4i4Fd0`B%USvRioH)GORtyC<gR56dK zVle-usE&qDxj|EAHSWhL&8fwV&)2;Hgykz+LQoq__2Z=8**uLrSg);@Y6teI|A8H0(Y zg}9X6d+OgT0_PXS_{Px$Woy*X;wz8~pJqZxrY-EfxEchvd9q(41Y}qiN3^qzpIbH$ zZhvVeV$M;R{y+sqJv(XnANH;PR!r^dw&BV8lXQizE{;2WkUo{TL#An-CsXxiLP}pW zdf(rSqrNa@{Rkcif1pQ(z!T3>4j6Hsc{`RbeNnW*h zc4Iy@>@h~`Q4a(MQ?dnz_H!h}bscGatq;P{iSSZ(H(x9n1%2+*G2nIy?&>-~y8e%$ z^Kb;R3!^w$A;~OCSxL%>vflehQy+>DB@)>}X_(o2@4Z*MR2sbZX-`T@X)8_9-lg%~ zzu_JCKF>MlcOI;w=L;J77KwTqq2`m5VC^yL|Fi-$)tboKky&V+=>xNlY^PpN z6Y!(Ti*KB{}HhTyGg_*wcCO^tmS8Nxq&gs_8BO^;=`9RDT1OeqKwA zM6%FgS`o_VKcZPt+u=RS1r^5z^2*EPv1Yn4wOJ-dcUQ(iP~dkG<3->gF2kqi)Nst@ zN906kD!lf7PaU1rar}}B5Yd>A!qyr%vf>=QKSdR$OnpInc5KJk7-2e3t(K0l49A}6 z4`ghj5#1zP3f;X;7~0Dy8ar#ixWyHwoC(KWqPjSeumoOO|E2oINoZkN0G7A1ptx;1 z8W==?l}rtWEXpHiCar@lH50)TM~I}49Q;=O$VtD80V9E2xYfxzQOmZB(%XUe6gr6Bzl-J-+-cLgj7E67zY;RoeLff0lEh0^==Q3MOrE|HSBxvcca;^; zk)nL7?3*hX> zD-^#?#vLvjJE>wDxq8h8O3eJw&BX>jo=IT$VO>(*9u7JmR+39&C&5VeL=u#iix%hA z2tECftG#uNMrRb$l*2`s(|(1Hy)y<6SiT{q_ssDN<=&L4=)q903z@NvP*&V}ZW$lKTFXt=u$4qWiTN0AO-Dabt1G52W4o0~)c zXt(qDQuaI@5r|?pDFed=adC5C=0X)48K9W56$<3cE8@@v@sc>|4=8ou;VL zIr9ZT<9H=zg&pDkcc0B$HT8U2{`jf6*7nO8Sl>z&IYnsy+OhJwKYeD zHWCczP@x|t=OAI)fubTiS3b&tW2GYarxu}6>{499<_VFI1^C74Jy)CW3q}8?z*LLH zRQ=g(qB4CB+~tZHub~mLH%~J6AIRmEf5(&c4~ucdvj{%ngFac}nhg$(6`*Jf;QnA4 znuImtmSdO9_sp6ETMli2s4{UBe1D(*YIo%(i3^hAJ>}fkdo$poqBs3Asz&q!{qbD9 zC|=$vOb@uG!N4kiSQ+aD+p|>Qm+@@$8X2auhQ{NAXVO%J7LmFQo-FTjfV@B0MJ)q2 zqWR`X)M}gnVPDJW(eb$$yQ7|5lMjN@ua5Z6$&Od3dBx`@D=wJdHNOn1>Vb+|^+6+`|q3e)vO9)js0omW08y zlDSY4u!?it5eaAaOQP;xaSSv%LPg*rC%&eNix@ggHL_#yyF@PUb8<75Zs+i8dm=d; z@|$YyG9;~U-AL-~{q(7#J!~+T3*suX@sdm<8aOxL{gt zJ{HWg!cXP@(d3;%WbT+#R;flqet168a|AT z0a=Y*bjYrR{MfAoZx?#O1a{6Il0HCUf@VWTd>L%}A_fv%B5vGL4nnzWK})xWpMT8* zBvWh2?|q`gUaS#5uuRIi&wh9}cPBZ#$N>W1+e73pCFt!N?EE>V7?&zbqZnhD#B8p@ z)R%#{D!+uRcNisK9&du@-&fJOixhCBU@nfxGu=Ps54|#}ifWmxLgS}dG+i(ep6vL^ zbwoO&li4iN`*%5GwAqut0SC=ZjX3PO@s;sygD^tEA9KURDTa4&!%;pM@^}lhJ^oE| z&m~i}(>rPCpdEL`Bnq+wM(I8Q;m)cmVcPaqp8x${0jN)!0!@cj5zVcY#B49kT?f-PVBD5v_7?3Lkg ziK!VT{u?C;H?u$@#Fu_PoP=VX%!6=nHr2b+M2lP-ao(JDS>v{KrapY%}Jly$nmRq**HqG>}r2I-smi8%QY|bSbH`f!EyT*d6@n*7& zc>`=08Izy0S>JB)bTZ}OSP;AVmgK9xrMLVXVP`=Gx_b6-moMd;?_cteZ(*9MUG8f9 z_}B)jHs<2ho%y)GwhZ4|{~(OL%MU#ok0-OUL6q&X`~T$8dY^mZrX>K8!&aBClm0&9X@|CTzy` z5@r0lopD~gM6t}tg&LY!fPZ2H4BtIQiS-;`!k$_;`eJ4xZeB4;R zTW3l4%qS);4YlxXa|eB8EeFB&Q4m*Jfi=g9ATDE+bB@--i1VZ7^#Qj@-tloDdx3d( zT*iQMf(pF(>w#6f+DP`FY?y`iy!CZWe$bAqj`ZD^_he{yJYU{64`<9&;7V(J z*(}5ET=uugY~wQO;kpgKdt`U+8to#Ld^&an z!P0+G_#e|QKbDB%iU-~>5~sjZ!C+|D(!?DLmeW=KKG5V>NXq`D;-61nsC7j=zh6!j z64=h?L@PT_m%kzd78bNH>lHsCLy7gP+3_0=$m2TS$q?}319=%@h_y;L>Aj&WxMFR~ zACB`vjgA#?`(z4CJz>aiwv5CHg-c;?xhs+bsW5jU&v&+I)A+VHy7kZm)9hO>h#}ub z@@hsIXG@62OJ;(Vr8tp!n9Hkcr$N+$6i7KS86(>2iKvq@{IYQ;pAP3S&c6@cYA{G| zUED&l_Hj6C=_~GFOc!~*-4wcQkC2BCWznJV1Yy;hbZRE+!kQljZ*D#yHgCp~_TY)w zblM%hvE0xJ<`E6CdPiGhS$4rIiFhnqQlCy=x}4tx3m@UTfO-_&aG3oPxbg z{&+kz2_{RhPJzyFvVP-!k~J=$bd~Opyf~?`4`Aw*bmx4dB_b(^M{5 z2WFI;p>$puXxqACKzAN+zO}INN*avpaKY7$#W--c2p)Zt;k7qg@zbVFgay0C(aCey zk_{7B|FQa1&N^j3Z#!o#ykp|pi9%i;$D_s0E)@Io>~a$r!?k- zGDdn@KtNwJU7Mzc#g1&AjF3Z3=@QnHlngr+365)1!7;K=i3(1IgwAW+vZWPhroR=Y z$uKs-hbEYvzXs>OuRzt^;_z<5dVHJpnI^~7(I=^OI9_lKglp&$l?+J`{d<;J4{W7w znG?YKUnCWOkcHNEXX(8=|2lt0CS#b=I4sw^M@fb;He@m{gRMS&H@6wjx>%!Cdm0t- z%IXwwG6j)y{vaz7K|CZEqS+P?*zWY6I})^%`(vL@%reSw-pvZ$BUJ!Vw-DW*G!lVD zHMAng6pRGDK~SWPchZ`Is_V7jU2_!d?r9)}d=-`ohCsySM0|SVBxxSZL+DEd9|6`W z%~eD38BG#<+?K0WuR+VOA@Xs&0;&9D$#kSz6qBssatsd>#S4GQx4vT7^Vx*yy)_V} z>WfX=lEB5o3*PL!P91{c@Na7kG|fvO|KJPR*%=95xfUd6qAzLNABN+vM-YuURrnz_ zl5y)(@xI_6(jTn~xIT48ZW+4hV0w2$pvGawI7uPsFB6D`>5ISZtRC(ySR zGT0TEhps6(RR0poX*|12b}e~JgY3qF*T5R$Gdq%exo(78ui4U!U^N{|!Ec(rmnc+yrxTyZkSpVa;o+zz_p-MFi&nd0mtj5Q(3O&TXSacYT{IteD*(;E z31O_KGSuDnMrA*FOkK0ctRY()=A9CPi}B2}*%68Anyx75+(HZcB1sUd^|>8LS&Gqx zyni5$CgYRo&G`-FeNQgD%BaJTfk9I6B%kg)^@d~{CE#55au7f%Vm7i6-zX=-jCfu2 zIT}SheLTsQ`OZMp>qy`JW$13Sir%)fB)VBTxazVSroY&NGWW%>U$=~EI!=d=iBk0C zol)wrPXaoe_mGVBTOoeP92U-zhV!>4!S$hJ7;M_a^~BT=eoPd7{#qSE;;z$Qhu6Zj zCR?;Wu^k34)uYs&X6i9>E)2CX?&Dw~=o=ZJ$?H;Ts51v2RjkEzGz&5nUEq`I6x2GA zj;o`y!A+r(U#hnn-stJ$;x*5Cx5%?}&2dGz!B|V&!U0nDD;=ui65+^XHPGxXg@(9U zczDMG{O)IhVup2SEOV6_Egzwzt%Pn>VKep7SSXT|2LF@3jG5R)6;_JDHnmC=JawDc zMXran>%!^U$X#S}>{R&LkO85)edr+9O>E?C@x?$8YB8>1=V;2q0t^iL(cNIhy* zZw1NRIP~=kMshR(V>cCW6+ON{NHGpxsYQW01-gB3I=YSYb5nUg*sWKO3X&<1%<@+G z73=VH)N*!b+Rp9Pc0sFo@pxmOC|npm+}S2C18L(MF(P~dj0q}&!0vo{S~v$Jhx_U3 zv|GN|lr*xA^<`wvBLkud76M-8A z1yI#~@sQ)=UQN z(>qY_b2O$DrGoAJHrnBModkDO;^@Mu*fY2cbjCQ+A`M~GJJ0&!7dFC9fr)T2G8QDZ zm(ZcH_lbQL+x1kW&}JJ2I&qH|u8n5hb~TA4KTsM}Bz166*$|D4IYO#lo#M3%^*LiV zQE<5Yjc-w|gpsyf2)2~Npi3p_vrz^IZHqxZX$noU5ruz$Gmt-=3OVTsa5u<`)0)J3 zOSTS>qIKmcmbwU)^~T`LQ^_RQE)R}RETxa{2|}1pRY!kNDx6w63Cj;>L1Ef-)|FI1 zwqNujtH-B-fmt7IFO8!8K8uLFs0GIKH{zUr3;d)|#5sh%CL7L$<1EIql2yCh8Q@$4 zAJ)`yZHmS)HA$Y$it4b!GK*}hKSq7pu9Eu?dprNsA$@vwI=R$2VkWaIiwirU4arlB zU=`yYX=xT=TSheNIh2HJF1|FGIK$k)Y*?p0(R@p111{Clhp7V-K{skTZ<1F}FYNh6 z#M7=2%k&+@V9ihRLuwp|%l`+~0vsk@DCZI<#PIHA`LLlmoc?4s+&>{1%pYC@_aoEr z$dF0r$;UZxroDteIBbs^ac*c_u!VkY&caXHYjINCcQT`ibydeEn(avuKo`4Wl#Pkv znMxUwp*}pV);~>2*R{{?jnyG+B5wTeo4>G3|VO)qY z*t2iHHpq=P8_j{>&YMPk`l za}eNGU?jfPT?@inQ}N#^KiK)m5e7o?;m{I!WTtFY)ds9DP0W zI9Ync6N+k_K~*{w5ABb~eZDM@?h(oL4*#S|7v`g5ZzYa@)#t*^sDFH`@*5g=G6(|&D+ z7c(?St@AiCZ@UGtWpnu5BZav1;}`lVzzhN$^T2P*UOq-ygz*~Hsk(4HuJ}+2L!dnZoq6&TADn-3^e&eb&o2fxv6E1GDqT|k0(U`oAbZm?ozOk=C^}m)l zP&gB~SQYer-bnU5a3-<}!{q0Z0yO-w2|k}rB!Y2!iD^J5$saFGotAr3*%#AsbuT5G z@|tP+lmw8vkq!9eygj6gvY9Mw9kq>8z+B}`c%UzbmaGe8 z&XoXMzj+nJEtG?l;tEjPn2*C2S3e&o`m0})TD4H{92Dh; zuO`v>kP0-EQpOvM?|;3~4H{)~;ii~-x*USB|-+o(ZC zMIzkZp@(07NW;Ll$!wpyklXK*3$b1b;2T$sO}8G?RiXY^e1AKcKdMcAxC4u=~oqQvC8v$?TwvCVBpEDs8Py<9~{chiQ*yfa8~6r08%SDRp1Z_;JZ} zyPiHZ>C2-2)1q;c{s5V`ca*-B86jyC%5l#P5!@Vkl>VD+0d^OrK)Y!@&iXeW_lpkF z;T#z>Qk+H}y*o*lu1)7={tkqfJE|aUg+1&@S&T-;m7u+@4x?m?VZiS)edk~W>bHeY z=*`oEHC6_+F5U+EzlNfRks9Bmp@fa!_YlP&(?GUeof9dk$Kp@(>2&qWL{3f(%I-IS zjiwlChISDr?|6)QEDdsp&eQusx{y6Y;1TPXzH!)%{#~{KyDsE`&n^l2s4g0R<&DRe zvpJ|(or5pOWkUS&Mp*E-jcOVSK>1TSkh`;=8rMs~p&0*gP#(`MD$?*>VBC@MBA&$<~kKRIRCVH?Js-Sx@ac2T$zhe*#fX!pE0vP zCPHdqI=%cY9y(`#AVU&yP_R7)%~{Xv4wK__-<;R$6qnLv~woD7&D&64>GUXa1PryG@Lm7>MPY~Yopd?gr;9&^QBTL9mr2* znbD`jHr;_~R8erPWEsrdX@_Q0KGW6S^=RN7MdR7dVxLAIS#MK>xh%7%HLiuc&t=+i zj}JIZiR1lu-=kZ#($Sjrea?Qw5Lz=mz~~0g&3Rpf8EvJoNi`Z>b=Jcwt$wa`paD({ ze0nOklmuSyZPkgr=It5TR`&ly!=t zL1H*|J+4RFnIUMiR19vvsK8rV4RH8$rn%rsQ4)V4j;hr&ub*o-dA=r=Ojx;+`FheJ z+IbBg(_f08t8$^sUk-a*0Oe1VphQy!c`R84Uv@;|?Q%g#uPKGz^g2M&O7=>nqY=wm zvIV&*{~-6}m%fQewf!@)1eQaN!!2Br$7N zD_v8Y2@bom`9(LQP*p$y?{ON)3rv9PZ1(Awtpz!aEHn@>2QBY5-ow@!x5~srf=nm9 zI*pj8CON>FACztutiz$5hWzh)Pxy=J8EC1Jjhat9nHQ-PYMSNn!pw5qFm@VjQQJj4 zJC)7LT^HlX@q=V`tuiKGG@(Cym`?lC8-}$l#^OdC)@|Dx=6atC=LX zEdlm#Fva8DyGZVTt>p8&RD9iCNP@MKVUlVF{M*BH1b#9mnP<>Bip9k5pg27HYeY^q zvevH!1)v(S3X7)1;Q(fmv)e4taiaj$EpEg!-$S8Kcq7cXf0WF9(SXUWi~;=X7e75q z15AUKz)?9_oOG@T7I#FzKQ0f8=Vn3PBi1c&G8;6$&4I6~FF5g!+v(SBPdl$(*u%XV zr9@j#g(@?h^xT&TP|5N@#xrK18)tyAW@Cu=%LK^N@Wq7v`Oy978$b518x9?G2S3*` zdQmG4&Uq&hO~y@5{pJXfVF_gN=3wUGcfvX<2Tt}zw0KMttofG?uptArubrT=%7XZ< z*PRMwvv*If8vd}n<@H6n_$3hN?7>vLGQ5jCj>yA#OL9O>Z6lpFX*4Uwjgb; zfDyANVxQ&|=*OE>ZQ53x@Ua9u0vzbZ&QyGVssa;dbZ%S_8HR4VJ(gdflC)j1BDGTE#q>idfgKjHyWpCWDa}}(AsebObb1g37e>HA)(!FVKS^jXSO^0%gCRk4 zAJ|^=u<5n~Vhq(K0+SNgw3}M2Yl@H{|-hRywYz2(vV^ z$+l(I_#{RgT1Db<%z`cWaJDEv=Dz||a(hdb9o&seqV2Ht(-e@9aA%A%Jvga0g=SRP z!tWK|_`io8u;k@LPHX)V*j&T7WsO$&=2{YtG|tAwyZgxF35#Lrw`FKxe}vCed&o&! zs?wD~+FTKw9>>BK5`lC6# ztPWv*<(cG{*+e)KrbMd@T3J_844KOAz=ClJ@M$y>z5MoYb4|U`xRj^M$1x7kq9puP zoJV8FH=vD4B$c{Qi7mt7s5I3CjHl(Jgs%^dKmu)J|C_4E8}V95IUQ)tCl^lm1fb5&sF=|qTiI!DjGtj1`qI^5_b0_)=S$a>>?Yyu0kxx*_~6ncm@|+K9ple%Q=iE};2&KWeVj!$m4>0f z69e2NnFei&fQdhyiFQu~7(ep>F;oH!SDe~-qumT?d~p_*KYWLo7`1H8Xlnf!d` zMtWvt|s&^;a=`Ng5B~4=$3xhK=|jc|F`*{*6fb-=(&TUlaSBTbzAB8oeHLiObm|1tzc7 zL1LaW^j{34Q>=98mUvkV_*2TKrTD;Ew>Umvjsn_M7Ne0=1vY=0gIkuAz?GpY;@&HhpbqZtrrf_-vUKysmH6e04BQyImJSCu!4t8YMBh0J>-AWs z@P-5$G_Qcd2XDA(zZ`Ljz>R6$*1BTz{fAC}y6l{|dlA&+}W4b@x_gOWD&I5=QI^1qLg4Z0*s_b73ivHsWLzTh?{h9Pn>^2awZGg>8<1&mc zLlt%&M_+Hij9ZPwcFHy&7n9&nQ!U;4$sEq#|3I}F+orE?4b~Whl5r!YQ1&MXVx&{( zDg~w|31?!of{(dk=$j!8y4+kgO!E^QJMCIUlYP9b% zt;#9q-uc9%pJ^FcD;I%E2gbnVATRv;&4eB|a-wC|&X7&F9P#L~I?znshI#VMI43L= zoIfqbw-rV3^6MJd+&vZFcI4yUhzh)^Yeq}_2;{s?0L9m4C>oQ66IsWGluiwZ7p0?# zXb^fIHiA~~C-ifIG3NcLN9(Zr)cU3;85yTUw_SZsZg$PY$=c@R?gS}x9(BbrN?YjS z_^}YASBlZq@*rH!vWJb&_>TcBzq+uHJhfj6e`3Z#Lre~7(-EQLgBt0t+k5yK^S0xD z#mOC(_aeB+`H%QhQwvzf-3pj%*-4ad{-vF*OfwCzf?%l%ey*S}{rr!BN}LU;W{l#& zZ3WO@WdWOc8QiY=f?RpfN|L_5ChvqB;p~T86tYXC!Ou5g*2Qeze+q$V?@Ul6Ef2zX z+hg#OYS3l+OWuNPDq5ONwLhoQAzKcw83|Ea-WM(YF+cT>Ub;$m3H0o%!l2u(VA}ng zj&GBt#yy%~nz;gt46`w)TZp`rW%uOCRphLz1-^4jg*%d^&{QWt1=Xvu=Wio$fAaw> zbRl4R4y+URz=D`k982V|Jj4($YOBH9y=*qQ<_Lp_O0e#?0B#dDp%;u}(Qc>~TD&t^ zHvKyln0bqya9z%Sba3V!;!B{*Y#fc)6;0%_OCc*l5Zy)HQFTc?8F;mUJM&`&xA|o@ z*`V{M^UXt1oRFo7cK$VB>i3?1xjrAJiTZ%6+-@40*T5JPlW^(7+hoJB9JrG$ImEK{vfZ|rH?5wE2{3Mvdn`XD!4IS^nZUI!Et>gEf(Q>B<#y&)!ZO1E z6m*rq*^f_h9X}EB;1nysxgPE^`sZp5>jBE(RMdDs4eE%h#8Y0^sbB234?bIPb8{TB;@|Tqh_*u9Owv<$%(EfUO6R`oB!2wt8K2C%7C6KMp9EeV2 z6q^5u;#`J&X<|VEW*DV_0RNf0YHdL)W(ncxuVMH-i`_A2XyCi%Kq_?L15GgAOSzsP z5Lhu8g!E^k`eHL!DY%>LE>lK(#*6WCs)1O~09d0`1Pfc`P&=ul^Rsy#r0tsmm8+C- z{V`kq;NU0os~sLty_bOYKm)$)52jCkCBtZ>IO|uFr7aq*X6AQeao4=ZV#9iS8uFN8u0mW!Bua?X_c8#NcjnEdx=Cb?#X!SvYD=`irwQ{=D*K{nV z`t)&Ew*27Fn%)ghXBC(p+?@o72LxT>!ic1Lc3n%wOW(f*sbIQ$}6vkvnf&c>kq`zfj2 zOVBQM41WHQOmEMv<%1?M2BoGi7#;AYreb#)UuP^el5d1h71i)wP=@zTOFH%MpwMx-Y;eNy5%)DWKso6OCoVR*TM_?aqztN2g~Oq zp;r7XKIoSpobO#pTx@odM~+r#dQ%2HkFt&g`#ea`k%I}-6~SPrnbTlrQMgzz9GcDY zhgm|TC2$&bCW~-FZx+^)JB+P*oJ7fd@4U;aW30w4b{%v&mP;x@V3^qMAI zv%-&^lQF|z6z8N5koG91=Q_!eZ?b;4qdJxRmD&Oy)%74MJDqO3ElF2L=ioP$`9$)D z09?ISLv~EQMywaKlBfiAUibJdD%zrg0v++RXo3ku&0T?>kE-zIhIMq!T{#p{%0p## zZ_rPTr&clg=mu_$+B7}()G3;d}pLW^+RJ6DJ{i)oh5j0avpL1cz|`w9ir~91L2-+GMVvc4(V6W zq+RxlSjT2R5l;%l5yyS>$m#JQZt$3>k0i04raJiBm6v<{wYFh9XYd-(pu6}`2yNOy27OuLs3EtS=n!FaMJDdSM) zkTC30n}u0cUSL+U0i{nCbovera?#-)xcKxDzPczHk@=Bq%jZ9<9ChQOnHoxgBj|w3+=pQl{6%S0OTh>!f zed9`Ac5pYH8IlgaHxLjO)WD(AP#nom=Y+|45F1K?kDFG&(Z!oU_PHpz_ahN?o$R19 z<_xh~X$83jb1;poz{khVlkEOPdUC!hw(O~4y)Qx_SDOk0eJmsYF9J2rmV(5cZ0NO3 z#1nPrxOqKp;OP5-M9*Euo}CSvd#}a52|6ICu?Zzk`w&m>S*SNl5BmfPV1`O8`lZ%$ zF5LmJ;M_tq7JNbzNhRHLr3l`Qe?}(U(uPm$j_&=w1^4!QE7`sY^pPwX4D$B=o1YPzAQTbY+?<%2i z`U=Lz-APYP6~}M>axgh@0{lHugDav^ahF~b6*={r-{`*qM$C^8t88!9-^Cbv8#2M` zTMo!_HXXnLdLTwh~w-ix4^PBa%e3Ibpo?Ee9#ZsIsnT5v>b#wPr zrJ!!$G`D272hJ09#FJNkkk>V9$h8v$I>ohd&D*lStDRNa~NJS~A+w~P5x6V1^xHZ>!+IpL&+gsu7@s(siI*csNS%&Uj|X z79$>Ob?$zYUB<{SQm<-+2*K39lfrTr=k}y#X7Zq@l9ICdf-yASLxi&=&Eu zb100>H_Tgc;QKNd87ZKXD}~W}s1d)a7(ri+A&EX8fL@#bBUH~3Zz&(3o#m!j@cRKx zPR*nSAqgnlh3dR}F zCX@4Osn~E%401}0aN8Lv3^^u_(G$~ACnb|s-MmRnt9Rf8UoG6T@FkreQV&ZG3&CIY zUHq1!F7A2_C56-Ha?2*`p@32figTW*_o#%V&O!c~sx7WFo)0@Nvh3}dOybkZ)Zfq6 zcudg%n@t5lYf1&c%DE)_;1+yrtVTaw-Af040lwFJNMG$b%sLF05+TMpoqvNdHSJCC zk`5z%HN}I+7ct)NRRQ}~P33=H86j`Zv!vtOxnQqowt|#ms6j4trdeRJy&mq_ zt%W`TyU2Beelq+l9mS?rQ*-foK)e-5kw`vvF+IPQCV>8mV*ayk1FYWS19?IEnfX^G-1?}RL^E1ukxTO`?}vPzbE>1D+4u|j<5mIo3s{fBzHYkAsDX$csHBBI zmeSIXSzvc18>c3)|C(hn>AgJ$_A;K;ti>x}WN#`obtIq+`-V83??fR`2y`|Xfsl?0 zG5U6o#$C1qtLiYaXNw(9AJ4wENhuifHJ2n>SQ3X@>eTb&3^2?b3vXUMCR^5}U~^In ziStV$bjD1sU9K1`nXl@#^eDAmg z|6z6#3D{$Tnh!sa>Na5_6O>3B=G>vxMS2k5${0Aditz5zYH0s=fLiHmKmt)jccyoZ zv6cdX?|_*XVxV{p2cG_K`JTjswDr$)w6M6srzWl@dMuB;Y5Ftz_@gNJZ(d`5qP_x( zvcJ${xzD-KQ;Mkk(GUeQ>#+5^B$j;~2Mg`&VCUoQ)G6;VIa+j*G*$NUvo|==rvj7l zreP8l6hA@-)vC$tl}D+rY$1wQI>CR81!p`b9fprz=6cE%AXqsKdyb_*xI-NpX|2R- z?0M@23PaC^qvR)vBVJzmuzoGOk1m;j=Yq>|l5`;mTx0!4aSlYZbql^|^u&|Od3atT z2b$80&`~r6`j@(+4Yvr}HkGohtsC9)`73?!w297ijz@<>#bmy0H$Ah!2;P|mgR1Rk z>S2@$TQ0hDcbzi$?p5VjC?P~TY?Zh;8BZ+K4yKn%WHDsK6~!uq(0`LC|IjrXhmu#I zVE7zdJ2w^SwqvB2vE-CK%;$2>-J{P`l0HtX1PJ`ZL+~f86E68?kDb|ilheKtuD7B)Q%NQD^$pVtpPs5ukJPpLGe;a7p zZ++l24RDqIL+-s>0c>_%iN)P!xJ||yU&cKo!`i`o>y8s-;h|0VuvZ^S_pifx_iPe9 z>m=X#*B;-0TLZpsBP6k(X}U8j(R#5jI9eScn_Z@1>%?4;94UZ`Gy^=EB93=6~`YByvM!$gT>G{u#q7UPyo|MTGH_5AgdO{(~FxV_+aD0z1zHbuJhNFp~HWBHo9h zS;H6@OCRj{^HpDJGfK5FhcBB?Qk=I&;_rUyJ=3gafc z4mD;yK{u)0EPM>j1Re2M=np%=5nsSrFEOSsm+b8@S1os9f~mw_vV7B zQviyO=A-YKe9)^fq|Ol*pmO*vB}e12Yv3(4RFlHZ2F2Xf-gvrPwhq$83US%B>tt2s zRvf>}m2Tf%f!`k$Qr+g|bXLb)IBt-^ADX+8te+7GbvZr6z2Q3f+bYgat+zlrYYsFH z8{*gW@p$@mEL;oSfeHt5LEro}zmjF;k}6YBRcIk*h)STvpa|~sFvS0A6Unb`Rou9v zo`2QZ$LX6~;7z|KlCc{r%u|y~Fgm1@E?1n0s$+D}e`Yquw;FJLp}x2|#vLUDojH%{ zeI$*svCJ>|gJfX?#tT28d2e&!+rI)lC!z=)wtG=?(ku{7mI5turepM~srk#-TId~| z4)xRiL%kkLXiNh{clzs(j_+EaDkqH`Q40fs1%haAPU%tB z>b4PcHIiO=AwXvj>!bQj4xX*SaZKaMi126GwOp847>^Iw zekg7rpSS+D1qXz~NZ=E7c)}IJ*Ya&Jzg&EEH&3N};Sg)GC9+_xchxzB1L3h$#de1>R0bwA{EoLNTw#%g@(tj>C`ILOi)p=MXYsh7fPSU;}{Pqn3EVRkk(a;(EoN6+%j zlAp+xU3Y2WLPM~Y{10ox$Aa@xA2>BT3IBYx$IpA$G2W*fgbcl=ovU1+Z}n1Wd7nkO z`8k}$=udt}n=qdv@`yg3*h7TfZ_u(EOE5Br=MP8?b}U}S_M}%;(EDi(UN_ssy5|4y z>^p#>TDG-O1S3HKQ8C>aIBfS8f#r3VAS zqnNXZ5d{T3#v>~J?Y;N>b@lM7UcIjBnwpx~->lueclVwZzV8v;ei&O)&iTXMd;`d0 z`r{|Obs#!?JT8J79QSb(k?bm^Aa;a^1dG8Ri7AXjw~t<2HxUz0HIdn>6Cq^PYS>s2 zjP331w2b8@g#<@~m2D2}kDP>uk5$3vC!vfx#+htO6T_y;EfDu68?^2gK#z_M;2z~a4=KxLjVrV9*G*%jp&cij?VpJ$=!ky$uC zA_Sg?7h+o9p0_@W; z#}y%8$*88a7(Hdku=R!!oN&sb+@Ka>yte{moH9}0xg0sQlGNM520qz|!%kUQ=$cx~ zdo8#TE@ftL{bHiF|jw@L?#}}2a-?$ z9>rEn-$@YChD2Lp{vx0P1|U?XgO%#;yyp+a;QWJ9@M!Fz_X12|`r9n(qwURoeK8o$ zb+9yU$B+PBsnGdPiOf^wIsv+sN9NtFzp2?PhL%){$XqQy1fP>jB~lXE$J<-O2ec&+6F~4 z_Y#*4wLFW?Cv?zg6y=E}U_GAzCY|*|t>JqlsZboqcx$XVd534}RtWAFuC+vc4557G z!(`C32q(`d<;m&P(KhLA&@s0d1dYB@t+sBWV$27_MT|e|AbWO7xJeHEIR}-+ec|fi zbZFD5gw)lHnFu*O*^oJ>0M{B6;`}R2xBBu-oV`d5`2`)hz6X3^?bk$TKeCtbm=7`| z)*R9n72;^V(eNxf7LL3hL)M8G;=&^;cqeTqH=))R9CbQK&&@LIEX%}a+pA&aTuPe@ zL}|ZC1x}eA1HALm#5zU*P8r4kXXY6Kx3usD;}meRD#yyGb=a&I4BI_)={YlD2-Nez zn8tl%;kyhFeB41&?u{WAwv~|VwUSJ8F$;?1a`389ARO6eiT1l{c>*reh+E`(n6NV* zG}Sb~)$uXiQsx4qc1A%-W-QEpyr0-*s`5;HACSdm32^GT4AWH|i@o*NXxrgb_DX^`z!o1VB_)4eHFRF@QG(4ZCKLJtst9 zPx)j+Wji0tEU2WRHX`^w`V4*WU7lVDk;0&Nh1eU|!>zf?Y8y(V(Wj{fif);Lm$VmL zSl&%c>$X!&Q=!efg~(jfRT$xOjm&7%g@Nd3ZtP$v_Q&#L*6w)5Gn5RyYkg7CUu|K?;kz;s^{ZOcF_YBfIm z+(Ts#Sn;x4q+y@sa?~i^PlrmyxW3!|pC@>W3CzqZ7Pd zr)_Y%uMB8gDJ>nNjAnh-wDC5phb)Q*PNY4ED$a+w8D{vvMFgf!YNjTmU(k-?G~yR# zN;Yxt&1uTitNVqiA^c-3E$(YoI%{FOv&zBe4Es02C^@QTwiB zJRoQaH9Ar_tLHd*R;Pe!QZEhH2Wx?m?=AA=wk$oh-wBqF%^|jom1DA8Si;q0w4M8os+4d2ZzijPs|8S@N7F?a3*egq%h!5wjKrTi%q{M8 z!{kp2Ev^@a>Co0P7;sF(#edBJ5BU=6x{m2ce{n*+R%Z+^P9P4Kn<<=_j*8zr$*QTN z$ni&vFK_-t{I)U;Q`Eb;3l+Z8YM*d&WbQ_gxxIqAi29+F*Lhm^$9B}swg>rn z*Fwr!=nF?W%b8zVm1!oj8l{#?#8}xJ{9jgJ{=+~-i!LhH=!9FO)bKpxAFJ$7qG~d} zSgI?9`nB>z;O1(q36cVRiS2OT#t;>yn( zv93tcwTta_LW~oH{{XpiG=XG&EvE*0C3qS);hTsW`u%w+#4hqjl`q9m>=lSD@3uk8 zsCQI&slh|u#e}Vf0y^|L>5nO z?Pof=U?P`~e>KBI}{0Y188})$7 z#R4GgmxL4UU8H953UDiR39C0656!PNp!N6F*kf7oDUgT$T*+7Ej5MT5WnaTn2yJ zrH;O<(qK$M0vx-V01c3fmMzm@%LPH;EfauD_cD5KVn5xuM-Cn5<)PG(ZSeG{Jn3@T z1c~$fAis*Ot@RDm`}Rz@7&n`D_GBb)aC13ve3XbK58?nb8Lv|+)4BFlgo}H!K(9(0 zGnFQ=ICd4|(ei?KS^c!rC>u?xSw6mZ92~qj2|QLR!*1;yG#~9x$@0yZv1ko3Sva45 zJDX0Xz1KhC87EIKdP-nWs|CKfT8$!U2TAB`1L(^gOO%^ZAy#89a`=wZuZ#1c$zUA$ zXr{!d5mr;_+V`Y%z>wOm-b!0`Wn&6A4x|%{@bhD9bbG!GbBbNb=Q>Ho5oJNHo#BJZ zYI&&Rm_n;&IUeEfq#|po(CLaL4cK819sPwMC*Hxe3o|B0FSkRjLMWD-*3tzR93j0Z z5BWTkY3rx`G=IAlF21%3C&>y?3&Z z`sqx7js^o*I#UAQnGcXHcUEENayQz3HUsCKNdQOJY+m5odo*;PHeBf}!=9ZSc%e9r z_k!t59sN*2CQWFeIeYoB{#!Y@YUWFB$a>-_uRi)}Og!ktDS)zl750>s(AGnnFv##) z%l!wo=ZVyJ9HkLu4O4Hba@uD8VP6os9HtFZOx)Vmy@w4H4me!=5td&Nx|gIyDs)28xw z^!0`@V6-ZYx7U6Vt_^Qjqs!c}2LemW5wx|HYMxUbRt?gjlx*XV3I>FG2(@oZ`tZPwvWCXe$$xLr_3JjUe z#o9lwknnSrsH3mXYl$i+LV1gc_mM(;C~iV*PR>GSi(EXHyq@XCY~`BXF2h{8czSps z4G+ayW7EYrynD8eC|UFKysuYMCG$@5#CsmD4DF+nZVAFh<>|OuEQ8}=5sH4F%uxHV zIWLdZBo#s;#!uG_d%0dTN>}H&7`XA2f*b4o*}8Uls#=ZnFhDy3dmbk8QxdWhvw- zupIyVPI~CFC)UYKfJcLF;1utS7M0^6;ObG*F?}CbRUr=6d{L)oic4sHk|&-JNd)O; z6_6j73aMqfXfm?*JBc?@P*cVn}eSP-KdI7F5^p3hjV@oWc=90Q2u8UeIvh@KDnYw^;Kkf z4c<>FIJv`RA`dfZ6=b|C!;++QY-0R05`H~~2GJXcDD%H<&B(z3A!~HH&V0Dnb?Mwg zu^88#kIv0Ip!@hT-sj9kfOeTU(J`0DKeU(h1#Ls_;{tM9ArXUyD)ErRM6Qsb4S31X z<{(ZT(}11_G=T9wJ`dsDpC$*ld(~0yS~5;`e#-TJpoiXV^Wa2oI@ODFq4U~Yv3`*o zO}Q6Jgq!pj$AkvHk1eCuznfv`4M94{ZK85HV<6#B8rNgJEEp>2Lr_FDUS&0>TTkTB z6frHH?NzT9?S@ToDj|xRdY9wiDsLjwx1SD1JmMZE;h4}Wi)V3wi>r6SrNIi!OI-jr zP45s_c^~{sx*CjYWU=zYHtH_1oy^#!0bf4H(8k^AVEm$kz7JT1<^#@nT0@1DS%=_z z7c02I*@#&|*5uClT-q}b2mA6l5VzjmKp<9)JE7_nt(167O+H4#tz(n%%f)gCif8q? zADdX-`y_PkOn|zH+jyFPDU*jAmxAkaw&y2iphs;Wm>kX_=idhN5)MdWb>mU8s)z&X zA1p90?hhKflE8J}&3D*a<%@lH23K8`r7 z<$_`E>L8nH0^NBbsMn~&GqGjQ&QG(**cy8j*~|ENzxL7gg_+PPHcY)aTuj0Q+OaML z#!QuiT?R^Y`LGQxn_NIG#Eao~p)4XK7z+>L?4Z1$h5MTb)0At>205R%T&H#xvx~Mv z8G$-7|Ia8YH!~I!p5=h=ZxuAO%m}SoqwwqI8le5zP`rleY<8AH<`uwFnNqy6EC3WE z6Y$}-d@4Bi6XWK4PGc`AG2W&c0>3Vj&hEo%HAUR@a+vod<0AQHABbk{ zrBEaKmC8g}VDr;rtP0wKe}$eQj|cTo&BF{cVz!Xd(Jbe)m*twX*mp`xJURP{gT9YW zkiD)FFo$tq%}v{B*!wI7Hq^^u^>uGB8|?;hffG<{p)W&JSFS2(CCCH z$X`mqlTC>vKOhd3R^KN3vgg45CqeLSe-a4xRG{{>3JA-b14T=h;Nr_wV3-?@Nn8A( zdMVSlnzRAK-s=#dKsEd=tq>;(Jtl33g)l=$8>{aqLhsoVbX=e$=vMzp*RPkwGmfe- z|K(h`Ah8zvhdAW-&y(Q8U>m*4uY~J%#6wnC65PD8hZKB>fE8ce@kI&aI>@Ys$EDh+ zj1JVR$_%ZHmt*|68)W{h61-qt26}jV6Qj zK|$JkfFHh2-%CT#Khm0-wajy{Fzbpmn>1n!mmj|wzUx3N4)1c|r z8rZKB1*wurbbI}3NcGr;RT>`hppqRqG&H{nUj^Ee$|Ufu>PC37+L2Gd;@_rQTl6DSM}B(}b(H07oR`L-ku zcc@Q7*DGQ$YpevdYL6gO4!kEW7UHn>(jUA{OV+_qvc|aAa<|cvU`SWDzl39@A5eL3+#IQSG0Z-XKAle5eL7KlS z)LAg?GrOG_D_X(orkp@(RuuYjMBww6Rphv6A-vVyL_;k$gA~g@nlqq;>7O)FFT@|m z1g{`RAIGAlq%?-@6Q!SRoxy8QIv(NbpffiNuQFfEEDcQ*JZuC@xXE;FMIAl#ID_Y^ zJO_h6jZo8;i0!i&mvnL+wPJN|NB5V2QPy|5CoC3wdZJ;c;UMMTn~n8{ zD`3&H5ZG`|hfFP@RN7u0hFNSv<@hGDL*oQ}`DZ$fNH{`nIT}EbWiQRVB@UxI&QhDr zv3O=&J`M$S^ZaV_aa{zf<2oQqruFNg_KIG5&ax7}nyEoEi($ko>X5hhbFlDr1Wfn) z4f}mEa4=~P@3w?5G_$+_eLrn-=_SCp^hR=VYz$F4pGZs+hRCN>jgE5?$P=y{No~l$8lF9G zhixKcd|m@no6_*krnlsQusXh5(n?GSK&6uD$Q+si@1K350S8z; zGV_NHY!`;@Q|0k}auT%V<`FZ^c=%K7AgRz5LW}Ye?#e}D(AS=c+;BI*fNwDkBf6(&9kkHdpdS{9mSJ`D$OJXq-R{ks-#HDclSfYT5J=z3jP;;h> z+HW8XFW226??UQ%U!w2Q&1%b+@xdgHj|Y) z?)d8SdgMiLQETB7?xPv2ux85uo$Oan&$4`1wbWev&SwUKQo$HswiJZS)1beD>9o2a z89QSvd^f1zs=YSGhUbYe&G|bK6ZODemA?!Xi&NqK^kNJdNQKBK8(X^r)WK%#xG$?LJ^A=X z@%`uyHvD7w_{NQ6Z+>awd`5iXz7Zk5k)FY!KE7+zqkY380zyM9%nbS5{?AQ&!Ve3s zRIGL$&3^Ak|DP?G$v5|B3+DNFMS7`6_=X2~tqIuR70Ime4DhiqoAbXnNQ0xVrKO{x ztD&QR9oD4YF%OsBf0 z_OA%my}N%O86jI`L~{Xqe?}-C z8A0z?grllk-2Vp~v_`Z+|5t<~U-YH_9*3VVsNxY3{xKEBALS!P1`yR9$%KE*#7&XM z$3_Oo)*lJr9}__ub7DsZ5H%bL;2-nw-!u917-W-?0RAxz8{fT(`+J4{8DJWW1n`ep z2#fa!9{CiqVbloMv%#;^_}_cP&lZT9j0C{>72v;@_@A%EY?IMHTkxNazH{ng>?`FN+F zKOepy@A%`V&#tfcPxjm2+tJ^n&C0*4$@}bq%$uW+;_SWjj?p{xz_dS0Z@n{`qX*7< zckfQRZ{582&f0n22XYT?#IKNEYcA!j!@;Hd?z0QphgUv1W9lWHe)5w}53F+dU~8{8 zM;`P&^jVnpdv)HEdiWKlzV*bdb1&)mU~7*2rIGg>X-0!P==i%;|lj}wAm{)r#Z{6Ix z_RYy#uQ)x>dGuBs?WbO^-uliem+rdfoVoK4l|Qt1x4x&!;e-7D6(3>UldPi@b9ePH55Fg%(qrK7n>R$5Ejp-fa@GDLZ?9J2r zU(nyjyOZAPldI5o%zOLfTX$M>?1F3eM)#M#kG?Uz$GSN?Nnh$$yX?=#>Tc!p9_P%- z8C!Epd%btagT6n?*-Myq$tOSQqdoTMgXs5-qxb27>D~BX%31HtyMHOpyTMDnBi*gu zm3-^wqxVt|ebyazK=u>1=2G7}d!T)I<@e^ugRT4IN6~l8({5`n_1DsQ7s#E8le;hU zd$?=eoLq&YJ$C7X))TkxK7K2<_ERqD>3+&5J?&Kb=w5HHH9tF-zTbD?AU!yUZo~)w E0vyX6k^lez literal 0 HcmV?d00001 diff --git a/data/Wine Quality/y_train.npy b/data/Wine Quality/y_train.npy new file mode 100644 index 0000000000000000000000000000000000000000..45bedb7de4067c4b45b3381454dec844d22c655a GIT binary patch literal 41704 zcmd^*y~61U6{Q2<1%h%sOK797<;q#wAd3gTp;m22xzdyeC^}AP(KfL@` z|Ki0juV22=U;p&tx0i4D`yYMy>BrAMc>e6KXRE*Wo-F>J{?`N3O!AYSc6N2&GoS8P z{p2Uz_sN0wr`$UgKfUig-=F%lIJ;S9@u1Rp%XGcZWRK?VZY1cPqd9j@6yi zOZxOZ-boxCOn30Xlv~xkYtC-c@e@|P%1^spoqe#|yDE1t@92Kt^u5#k^c`32r*};6 zkq6VR^^{L~x9=RhyGuUdsU7xL_uMmQ|7|1hO33^0;azkO4yGM^&^hZBPyN+i+C%p) zx{(~1e01l`$(_dPPUWZf+^SdIseHdb<&#byAFOoyiKFkL-^HGFI7lvGH<$X>y=P7y z9ZY)i$#>B^?p1!ur(V+2j`fPC{i$BvnflfJOJ2#PrKwm z=aTN6dFr8qr+VHoPxr{#Hz#**c-1?nyX3*u|H=J|ckg(o>P_?2p8d3+^lsldcz2il zD*BH3>h9^@y}qmZPTmcYhlA)w`{vG@PxJKYB_wAgZ~YW!AKcwdJ4vtZt@hY;-<({+ zDxdtM`;K#P(0O>}lS@b+OgVJt;70NZ$*=nKsvUCoio74>o#yCWbRQ0~V~!41KDn#V z_u=5J?w-Dr-k~M9_XAoxrELqjt+V+>CRovt9RHp@4nAH^J-_Bul6cG-AnrFUe&|* zUGtPf2U8E!+NZ}3h;B^&RA<*cb8=wilS@e7Sml#%on7l)zS>{y@vhZA zJ&--)uJ1kb-F^C?_jftFV0W*|2Bpud)>WmF5Rzm z@0+_{`Q*Df@(Fi$y8SA*>eB<$cUC%mu*%zC?Rf`2-J9mQGsWtTeK^QHb96BINvF4q z?1SD*I=SR0o!-6TsduvjrteLD(!JBoxo2MOR6cv%9QiJGcTeZK?@Kwq2M*FRw{G8@ zJZRtCx#W`r$r8@2vdpooUW@ zr+xCfJM=+zLF?w^jNP1jUEbYy&YXKm$G;k@`}kmbkA3shvrZ4RU-8u*yVE{-keySQ zc9YKC%6G1tcMpCVr|((4pYBZcbidk3zW1CncfQL{?Ns-=cbqd%cgTS$XMHtK4`jFE zxPvgS;bQ%Atew z&Czd-yu-*l(BY?$U2t`Gx4(L~+dK6>cEIlat8%=r}wt-)O)>S zp1v>TlAd;MrLVq=9_aU?gZLFE2Tt>;hYordeTvl%J~-V^`PIGEUV2BRPwzQj@#;Rl zchJG)C!HSH&5;LJd-N-0A0!V4(eDkX@2m3o#_GGtS2_Gsd3Hedr|3T1$Syj(!fC#{ zSM64Qx>NNk-@8@bzPb0{&cTi3jpUMU9}d!kgXqS}Ctu|%pB-?TcW<@t9r(Rt_j{c) zztwlH-gVF1@2`C46HhznMt0EQU3Aa9yVK2`-mmtOZ@uDtpL3Ote--i`qwnEcPkfqB zJG=Vy9aX;a{oZNb{fe^>I-hiM_RYyvSoOMoH^+OXd3x^}rth5U>ATYo`N}7^itK~k z9r9qecU3OEBk8=uJ{%->E3ED&Kk3tVd&fNOb#tkYo_3N>Zxz`Ey_0n365s8o-K295 zta8b}wa$CN?)}sGw3qbmJIS#FqJv4tH&*%NC!PJ|C!L;gnx{YQb@Svv?jOI|^Q@QSr|JP}6n)kgf=MLEIt>#wmxM$9Ltiz4u z6Oy-|bmz?Jfm44qpYD0D;@zEY?$o{M4(|g~-@Z9LbP(Ox^{bqBclp)#ylaUV)!o&-ZqK>It^0fMjpWc1 zM^Bjg)>Ho~efNI1pK?jx{a$txvX2i3t(%jp@TwlW;OaZvH&46WT`?(9IJC#o_<&sYCRych}H~$Yg&>x`y literal 0 HcmV?d00001 diff --git a/experiments/__init__.py b/experiments/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/experiments/scripts/__init__.py b/experiments/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/experiments/scripts/explain_wine_quality.py b/experiments/scripts/explain_wine_quality.py deleted file mode 100644 index 43b6ecb..0000000 --- a/experiments/scripts/explain_wine_quality.py +++ /dev/null @@ -1,36 +0,0 @@ -import argparse - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Explain Wine Quality Dataset") - parser.add_argument( - "--data_id", - type=int, - default=2280, - help="Data ID for the wine quality dataset", - ) - parser.add_argument( - "--n_trials", - type=int, - default=100, - help="Number of trials for the experiment", - ) - parser.add_argument( - "--analyze", - action="store_true", - help="Flag to perform analysis", - ) - parser.add_argument( - "--visualize", - action="store_true", - help="Flag to generate visualizations", - ) - - args = parser.parse_args() - - # Placeholder for the main logic of the script - print(f"Data ID: {args.data_id}") - print(f"Number of Trials: {args.n_trials}") - if args.analyze: - print("Performing analysis...") - if args.visualize: - print("Generating visualizations...") \ No newline at end of file diff --git a/experiments/scripts/my_script.sh b/experiments/scripts/my_script.sh deleted file mode 100644 index 70fd145..0000000 --- a/experiments/scripts/my_script.sh +++ /dev/null @@ -1,7 +0,0 @@ -docker run -it -v "$(pwd)":/root/pnpxai-experiments --gpus '"device=0"' --name pnpxai_exp seongun/ubuntu22.04-cuda12.2.2-cudnn8-pytorch2.1:base - -python -m experiments.scripts.explain_wine_quality \ - --data_id 2280 \ - --n_trials 100 \ - --analyze \ - --visualize \ No newline at end of file diff --git a/experiments/scripts/wine_quality/README.md b/experiments/scripts/wine_quality/README.md new file mode 100644 index 0000000..a8c0c47 --- /dev/null +++ b/experiments/scripts/wine_quality/README.md @@ -0,0 +1,394 @@ +# Wine Quality XAI Experiments + +This module provides a comprehensive framework for analyzing Wine Quality predictions using various Explainable AI (XAI) methods. The experiments compare multiple XAI frameworks across different models and explainers, with automated hyperparameter optimization. + +## 📋 Table of Contents + +- [Overview](#overview) +- [Wine Quality Dataset](#wine-quality-dataset) +- [Experimental Setup](#experimental-setup) +- [Usage](#usage) +- [Project Structure](#project-structure) +- [Results](#results) + +--- + +## Overview + +This project implements a benchmarking framework for comparing explainability methods on the Wine Quality dataset. It supports: + +- **5 XAI Frameworks**: PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI +- **2 Models**: XGBoost, TabResNet (Residual Network for Tabular Data) +- **8 Explainer Methods**: LIME, SHAP, Integrated Gradients, Gradient (Saliency), SmoothGrad, Input×Gradient, VarGrad, LRP +- **Automated Evaluation**: ABPC (Correctness), Complexity, Compounded metrics +- **Hyperparameter Optimization**: Optuna-based grid search with 25 trials + +The goal is to optimize explanations for various model and explainer combinations, finding the best hyperparameters that maximize explanation quality metrics. + +--- + +## Wine Quality Dataset + +### Dataset Characteristics + +- **Source**: UCI Machine Learning Repository (ID: 186) +- **Samples**: 6,497 total (5,197 train, 1,300 test) +- **Features**: 11 numerical features + - Fixed acidity + - Volatile acidity + - Citric acid + - Residual sugar + - Chlorides + - Free sulfur dioxide + - Total sulfur dioxide + - Density + - pH + - Sulphates + - Alcohol +- **Target**: Binary classification + - Class 1: Quality scores 7, 8, or 9 (high quality) + - Class 0: Quality scores ≤ 6 (standard quality) +- **Preprocessing**: + - StandardScaler for numerical features + - Stratified train/test split (80/20) + - Random seed: 42 for reproducibility + +### Dataset Download + +The dataset is automatically downloaded and preprocessed using the UCI ML Repository API: + +```bash +# Inside the container or with proper Python environment +cd data/Wine\ Quality/ +python preprocess.py +``` + +This will: +1. Download the Wine Quality dataset from UCI (ID: 186) +2. Apply feature preprocessing (StandardScaler) +3. Create binary classification labels (quality >= 7 → 1) +4. Split into train/test sets (stratified, 80/20) +5. Save processed data as `.npy` files and metadata as `.pkl` + +**Output Files:** +- `raw_data.csv` - Original dataset +- `X_train.npy`, `X_test.npy` - Normalized features +- `y_train.npy`, `y_test.npy` - Binary labels +- `feature_metadata.pkl` - Feature encoders and metadata +- `xgb_model.json` - Trained XGBoost model +- `resnet_model.pth` - Trained TabResNet model + +--- + +## Experimental Setup + +### Supported Models + +1. **XGBoost Classifier** (`xgb`) + - Standard gradient boosting classifier + - Compatible with: PnPXAI, Captum, OmniXAI, AutoXAI + - Explainers: LIME, SHAP (model-agnostic methods) + +2. **TabResNet** (`tab_resnet`) + - Residual network architecture for tabular data + - Compatible with: PnPXAI, Captum, OpenXAI, AutoXAI + - Explainers: All 8 methods (including gradient-based) + +### Supported XAI Frameworks + +| Framework | XGBoost | TabResNet | Explainers | Special Features | +|-----------|---------|-----------|------------|------------------| +| **PnPXAI** | ✅ | ✅ | All 8 + HPO | Hyperparameter optimization with Optuna | +| **Captum** | ✅ | ✅ | 7 (no VG) | PyTorch native implementation | +| **OmniXAI** | ✅ | ❌ | LIME, SHAP | XGBoost only | +| **OpenXAI** | ❌ | ✅ | 6 (no LRP/VG) | TabResNet only | +| **AutoXAI** | ✅ | ✅ | LIME, SHAP | Legacy HPO framework (separate venv) | + +### Explainer Methods + +- **Model-Agnostic**: + - `lime`: Local Interpretable Model-agnostic Explanations + - `shap`: SHapley Additive exPlanations (KernelSHAP) + +- **Gradient-Based** (PyTorch models only): + - `grad`: Gradient (Saliency) + - `itg`: Input × Gradient + - `ig`: Integrated Gradients + - `sg`: SmoothGrad + - `vg`: VarGrad (PnPXAI only) + - `lrp`: Layer-wise Relevance Propagation + +### Experimental Objectives + +The experiments aim to: + +1. **Optimize Explanations**: Use hyperparameter optimization (HPO) to find the best explanation parameters for each model/explainer combination +2. **Compare Frameworks**: Evaluate explanation quality across different XAI frameworks +3. **Evaluate Quality**: Measure explanations using: + - **ABPC (Correctness)**: How accurately the explanation reflects the model's behavior + - **Complexity**: Sparsity and interpretability of explanations (lower is better) + - **Compounded**: Combined score (0.7 × ABPC - 0.3 × Complexity) +4. **Enable Reproducibility**: All experiments use fixed random seeds and Docker containers + +### Configuration Files + +All experimental settings are managed via YAML configuration files in `configs/tabular/`: + +- `dataset_config.yaml`: Dataset settings (UCI ID, split ratio, target info) +- `model_config.yaml`: Model hyperparameters and training seeds +- `explainer_config.yaml`: Framework-specific explainer parameters +- `optuna_config.yaml`: HPO settings (grid search sampler, 25 trials) + +--- + +## Usage + +### Docker Setup + +This module requires the Wine Quality Docker image. From the project root: + +```bash +# Build the wine quality experiment image +docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . + +# Run with GPU support and volume mount +docker run --rm -it \ + --runtime=nvidia \ + --gpus all \ + --shm-size=8g \ + -v $(pwd):/root/pnpxai-experiments \ + pnpxai_wine_quality:latest \ + /bin/bash +``` + +### Quick Start + +The main entry point is `analyze_wine_quality.py`, which replaces the old shell scripts with a pure Python implementation: + +```bash +# Inside the Docker container, from /root/pnpxai-experiments/ + +# Run all experiments (all models, frameworks, explainers) +python -m experiments.scripts.wine_quality.analyze_wine_quality + +# Dry run to see what will be executed +python -m experiments.scripts.wine_quality.analyze_wine_quality --dry-run + +# Continue from where you left off (skip completed experiments) +python -m experiments.scripts.wine_quality.analyze_wine_quality --continue + +# Run with verbose logging +python -m experiments.scripts.wine_quality.analyze_wine_quality --verbose +``` + +### Running Specific Combinations + +```bash +# Run only PnPXAI framework +python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai + +# Run only XGBoost model +python -m experiments.scripts.wine_quality.analyze_wine_quality --model xgb + +# Run only SHAP explainer +python -m experiments.scripts.wine_quality.analyze_wine_quality --explainer shap + +# Combine filters: PnPXAI + XGBoost + SHAP +python -m experiments.scripts.wine_quality.analyze_wine_quality \ + --framework pnpxai \ + --model xgb \ + --explainer shap \ + --verbose +``` + +### Running Individual Experiments + +You can also run individual experiments directly: + +```bash +# Run a single experiment with PnPXAI +python -m experiments.scripts.wine_quality.explain_wine_quality \ + --framework pnpxai \ + --model xgb \ + --explainer shap \ + --verbose + +# Run with Captum and TabResNet +python -m experiments.scripts.wine_quality.explain_wine_quality \ + --framework captum \ + --model tab_resnet \ + --explainer ig \ + --verbose +``` + +### Command-Line Options + +**For `analyze_wine_quality.py` (batch runner):** +- `--dry-run`: Show commands without executing +- `--continue`: Skip already completed experiments +- `--verbose`: Enable verbose logging +- `--framework [FRAMEWORKS...]`: Filter by framework(s) +- `--model [MODELS...]`: Filter by model(s) +- `--explainer [EXPLAINERS...]`: Filter by explainer(s) + +**For `explain_wine_quality.py` (individual experiments):** +- `--framework {pnpxai|captum|omnixai|openxai|autoxai}`: XAI framework (required) +- `--model {xgb|tab_resnet}`: Model type (required) +- `--explainer {lime|shap|ig|grad|sg|itg|vg|lrp}`: Explainer method (required) +- `--data-dir PATH`: Data directory (default: `data/Wine Quality`) +- `--config-dir PATH`: Config directory (default: `configs/tabular`) +- `--output PATH`: Custom output directory +- `--seed INT`: Random seed (default: 42) +- `--batch-size INT`: Batch size (default: 32) +- `--n-samples INT`: Samples for LIME/SHAP (default: 64) +- `--max-test-samples INT`: Limit test set size +- `--verbose`: Enable verbose logging + +### All Valid Combinations + +The `analyze_wine_quality.py` script automatically runs all valid framework/model/explainer combinations: + +**PnPXAI** (10 combinations): +- `xgb`: lime, shap +- `tab_resnet`: lime, shap, grad, itg, ig, sg, vg, lrp + +**Captum** (9 combinations): +- `xgb`: lime, shap +- `tab_resnet`: lime, shap, grad, itg, ig, sg, lrp + +**OmniXAI** (2 combinations): +- `xgb`: lime, shap + +**OpenXAI** (6 combinations): +- `tab_resnet`: lime, shap, grad, itg, ig, sg + +**AutoXAI** (4 combinations): +- `xgb`: lime, shap +- `tab_resnet`: lime, shap + +**Total: 31 experiment combinations** + +--- + +## Project Structure + +``` +wine_quality/ +├── analyze_wine_quality.py # Batch experiment runner +├── explain_wine_quality.py # Single experiment script +├── script_utils/ # Utility modules +│ ├── data_utils.py # Data loading and transformation +│ ├── model_utils.py # Model loading and wrapping +│ └── explainer_factory.py # Explainer validation +└── lib/ # AutoXAI library +``` + +### Utility Modules + +**`script_utils/data_utils.py`:** +- `load_wine_quality()`: Load preprocessed data and metadata +- `transform()`: Apply feature transformations +- `invert_transform()`: Reverse transformations for interpretation +- `find_idx()`: Find feature index mappings + +**`script_utils/model_utils.py`:** +- `TorchModelForXGBoost`: XGBoost to PyTorch wrapper +- `load_model()`: Priority-based model loading (local → HF Hub) +- `wrap_model_for_pytorch()`: Framework-agnostic model wrapping + +**`script_utils/explainer_factory.py`:** +- `ExplainerConfig`: Framework compatibility matrix +- `validate_explainer_args()`: Validate framework/model/explainer combinations +- `get_framework_specific_name()`: Map explainer names across frameworks + +--- + +## Results + +### Output Structure + +Each experiment generates outputs in `results/Wine Quality/{model}/{framework}/{explainer}/`: + +- **`explanations.npy`**: Attribution values (n_samples × n_features) + - Importance scores for each feature for each test sample + - Higher absolute values indicate more important features + +- **`abpc.npy`**: ABPC (Area Between Perturbation Curves) scores + - Measures correctness of explanations + - Higher values indicate better explanations + +- **`cmpx.npy`**: Complexity scores + - Measures sparsity/simplicity of explanations + - Lower values indicate simpler, more interpretable explanations + +- **`cmpd.npy`**: Compounded scores + - Combined metric: 0.7 × ABPC - 0.3 × Complexity + - Balances correctness and simplicity + +- **`metadata.json`**: Execution metadata + - Framework, model, explainer details + - Hyperparameters used + - Execution time + - Data shapes + - Metric summaries (mean, std) + +### Checking Progress + +```bash +# Inside the container +ls -R results/Wine\ Quality/ + +# Count completed experiments +find results/Wine\ Quality/ -name "explanations.npy" | wc -l +``` + +### Interpreting Results + +- **Best Explanation**: Highest compounded score (balances correctness and simplicity) +- **Most Accurate**: Highest ABPC score (may be complex) +- **Simplest**: Lowest complexity score (may sacrifice accuracy) + +Compare metrics across different framework/model/explainer combinations to find the optimal explanation method for your use case. + +--- + +## Troubleshooting + +### Common Issues + +1. **Module not found errors**: + - Ensure you're running commands from `/root/pnpxai-experiments` in the Docker container + - Check that volume mount is working: `ls -la /root/pnpxai-experiments` + +2. **AutoXAI import errors**: + - AutoXAI uses a separate virtual environment due to numpy version conflicts + - The `analyze_wine_quality.py` script automatically handles this + +3. **CUDA out of memory**: + - Reduce batch size: `--batch-size 16` + - Limit test samples: `--max-test-samples 100` + +4. **Invalid framework/model/explainer combination**: + - Check the compatibility matrix in [Supported XAI Frameworks](#supported-xai-frameworks) + - The script validates combinations and raises errors for invalid inputs + +5. **Results not saving**: + - Check Docker volume mount permissions + - Ensure sufficient disk space + - Verify results directory exists: `mkdir -p results/Wine\ Quality` + +--- + +## Citation + +### Dataset Citation + +```bibtex +@misc{cortez2009wine, + author={Cortez, Paulo and Cerdeira, A. and Almeida, F. and Matos, T. and Reis, J.}, + title={Wine Quality}, + year={2009}, + howpublished={UCI Machine Learning Repository}, + note={DOI: 10.24432/C56S3T} +} +``` diff --git a/experiments/scripts/wine_quality/__init__.py b/experiments/scripts/wine_quality/__init__.py new file mode 100644 index 0000000..e98d8c6 --- /dev/null +++ b/experiments/scripts/wine_quality/__init__.py @@ -0,0 +1,6 @@ +""" +Wine Quality Explanation Experiment Module + +This module provides tools for generating and analyzing explanations +for Wine Quality predictions using various XAI frameworks. +""" diff --git a/experiments/scripts/wine_quality/analyze_wine_quality.py b/experiments/scripts/wine_quality/analyze_wine_quality.py new file mode 100755 index 0000000..02a10f5 --- /dev/null +++ b/experiments/scripts/wine_quality/analyze_wine_quality.py @@ -0,0 +1,366 @@ +#!/usr/bin/env python3 +""" +Wine Quality Analysis Script + +Run all experiments for Wine Quality dataset with various model/framework/explainer combinations. +This script replaces the shell scripts (run_all_experiments.sh and run_explain.sh) with a pure Python implementation. + +Usage: + python -m experiments.scripts.wine_quality.analyze_wine_quality [OPTIONS] + +Options: + --dry-run: Show commands without executing + --continue: Skip already completed experiments + --framework: Run only specific framework(s) + --model: Run only specific model(s) + --explainer: Run only specific explainer(s) + --verbose: Enable verbose logging + +Examples: + # Run all experiments + python -m experiments.scripts.wine_quality.analyze_wine_quality + + # Dry run to see what will be executed + python -m experiments.scripts.wine_quality.analyze_wine_quality --dry-run + + # Continue from where you left off + python -m experiments.scripts.wine_quality.analyze_wine_quality --continue + + # Run specific combination + python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai --model xgb --explainer shap +""" + +import os +import sys +import argparse +import subprocess +from pathlib import Path +from datetime import datetime +from typing import Dict, List, Tuple +import time + +# Color codes for terminal output +class Colors: + RED = '\033[0;31m' + GREEN = '\033[0;32m' + YELLOW = '\033[1;33m' + BLUE = '\033[0;34m' + NC = '\033[0m' # No Color + + +# Define experiment combinations +# Framework -> Model -> Explainers +EXPERIMENTS: Dict[str, Dict[str, List[str]]] = { + "pnpxai": { + "xgb": ["lime", "shap"], + "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg", "vg", "lrp"], + }, + "captum": { + "xgb": ["lime", "shap"], + "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg", "lrp"], + }, + "omnixai": { + "xgb": ["lime", "shap"], + }, + "openxai": { + "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg"], + }, + "autoxai": { + "xgb": ["lime", "shap"], + "tab_resnet": ["lime", "shap"], + }, +} + + +def print_colored(message: str, color: str = Colors.NC): + """Print colored message to terminal.""" + print(f"{color}{message}{Colors.NC}") + + +def check_if_completed(model: str, framework: str, explainer: str, results_dir: str = "results/Wine Quality") -> bool: + """ + Check if an experiment has already been completed. + + Args: + model: Model name (xgb, tab_resnet) + framework: Framework name (pnpxai, captum, etc.) + explainer: Explainer name (lime, shap, etc.) + results_dir: Base results directory + + Returns: + True if experiment is completed, False otherwise + """ + result_path = Path(results_dir) / model / framework / explainer / "explanations.npy" + return result_path.exists() + + +def run_single_experiment( + model: str, + framework: str, + explainer: str, + dry_run: bool = False, + verbose: bool = False, + continue_mode: bool = False, +) -> Tuple[bool, float]: + """ + Run a single experiment with specified model/framework/explainer combination. + + Args: + model: Model name + framework: Framework name + explainer: Explainer name + dry_run: If True, only print command without executing + verbose: Enable verbose logging + continue_mode: Skip if already completed + + Returns: + Tuple of (success: bool, elapsed_time: float) + """ + print_colored("="*50, Colors.BLUE) + print_colored(f"Experiment: Model={model}, Framework={framework}, Explainer={explainer}", Colors.BLUE) + print_colored("="*50, Colors.BLUE) + + # Check if already completed in continue mode + if continue_mode and check_if_completed(model, framework, explainer): + print_colored("⊗ Already completed. Skipping.", Colors.YELLOW) + print() + return True, 0.0 + + # Build command + # Use explain_wine_quality directly since we're already in the wine_quality module + cmd = [ + sys.executable, "-m", "experiments.scripts.wine_quality.explain_wine_quality", + "--framework", framework, + "--model", model, + "--explainer", explainer, + ] + + if verbose: + cmd.append("--verbose") + + if dry_run: + print_colored(f"[DRY RUN] {' '.join(cmd)}", Colors.YELLOW) + print() + return True, 0.0 + + # Execute command + start_time = time.time() + + # For autoxai, we need to use the special virtual environment + env = os.environ.copy() + if framework == "autoxai": + print_colored("Running AutoXAI with dedicated virtual environment...", Colors.YELLOW) + # Set the virtual environment's Python interpreter + venv_python = "/opt/autoxai_venv/bin/python" + if os.path.exists(venv_python): + cmd[0] = venv_python + else: + print_colored(f"Warning: AutoXAI venv not found at {venv_python}, using default Python", Colors.YELLOW) + + try: + result = subprocess.run( + cmd, + check=True, + env=env, + cwd=Path.cwd(), + ) + elapsed_time = time.time() - start_time + print_colored("✓ Experiment completed", Colors.GREEN) + print() + return True, elapsed_time + + except subprocess.CalledProcessError as e: + elapsed_time = time.time() - start_time + print_colored(f"✗ Experiment failed: {e}", Colors.RED) + print_colored("Continuing with next experiment...", Colors.YELLOW) + print() + return False, elapsed_time + + +def count_total_experiments( + experiments: Dict[str, Dict[str, List[str]]], + framework_filter: List[str] = None, + model_filter: List[str] = None, + explainer_filter: List[str] = None, +) -> int: + """Count total number of experiments to run.""" + total = 0 + for framework, models in experiments.items(): + if framework_filter and framework not in framework_filter: + continue + for model, explainers in models.items(): + if model_filter and model not in model_filter: + continue + for explainer in explainers: + if explainer_filter and explainer not in explainer_filter: + continue + total += 1 + return total + + +def run_all_experiments( + dry_run: bool = False, + continue_mode: bool = False, + verbose: bool = False, + framework_filter: List[str] = None, + model_filter: List[str] = None, + explainer_filter: List[str] = None, +): + """ + Run all experiment combinations. + + Args: + dry_run: If True, only print commands without executing + continue_mode: Skip already completed experiments + verbose: Enable verbose logging + framework_filter: Only run these frameworks (None = all) + model_filter: Only run these models (None = all) + explainer_filter: Only run these explainers (None = all) + """ + print() + print_colored("="*70, Colors.BLUE) + print_colored("Wine Quality Analysis - Running All Experiments", Colors.BLUE) + print_colored("="*70, Colors.BLUE) + print() + + if dry_run: + print_colored("⚠ DRY RUN MODE: Commands will be printed but not executed", Colors.YELLOW) + print() + + if continue_mode: + print_colored("⚠ CONTINUE MODE: Already completed experiments will be skipped", Colors.YELLOW) + print() + + # Count total experiments + total_experiments = count_total_experiments( + EXPERIMENTS, framework_filter, model_filter, explainer_filter + ) + print_colored(f"Total experiments to run: {total_experiments}", Colors.GREEN) + print() + + # Track progress + completed = 0 + succeeded = 0 + failed = 0 + skipped = 0 + total_time = 0.0 + + start_time = time.time() + + # Run experiments + for framework, models in EXPERIMENTS.items(): + if framework_filter and framework not in framework_filter: + continue + + for model, explainers in models.items(): + if model_filter and model not in model_filter: + continue + + for explainer in explainers: + if explainer_filter and explainer not in explainer_filter: + continue + + # Check if skipping in continue mode + if continue_mode and check_if_completed(model, framework, explainer): + skipped += 1 + + # Run experiment + success, elapsed = run_single_experiment( + model, framework, explainer, + dry_run=dry_run, + verbose=verbose, + continue_mode=continue_mode, + ) + + completed += 1 + total_time += elapsed + + if success: + succeeded += 1 + else: + failed += 1 + + # Print progress + if not dry_run: + print_colored(f"Progress: {completed}/{total_experiments} completed", Colors.GREEN) + if continue_mode and skipped > 0: + print_colored(f"({skipped} skipped)", Colors.YELLOW) + print() + + # Print summary + total_elapsed = time.time() - start_time + minutes = int(total_elapsed // 60) + seconds = int(total_elapsed % 60) + + print() + print_colored("="*70, Colors.BLUE) + print_colored("All Experiments Completed!", Colors.BLUE) + print_colored("="*70, Colors.BLUE) + print_colored(f"Total execution time: {minutes}m {seconds}s", Colors.GREEN) + + if not dry_run: + print_colored(f"Succeeded: {succeeded}", Colors.GREEN) + if failed > 0: + print_colored(f"Failed: {failed}", Colors.RED) + if skipped > 0: + print_colored(f"Skipped: {skipped}", Colors.YELLOW) + + print() + print_colored("Results are saved in: results/Wine Quality/", Colors.BLUE) + print() + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Run Wine Quality explanation experiments", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Run all experiments + python -m experiments.scripts.wine_quality.analyze_wine_quality + + # Dry run to see what will be executed + python -m experiments.scripts.wine_quality.analyze_wine_quality --dry-run + + # Continue from where you left off + python -m experiments.scripts.wine_quality.analyze_wine_quality --continue + + # Run specific framework + python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai + + # Run specific combination + python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai --model xgb --explainer shap + """ + ) + + parser.add_argument("--dry-run", action="store_true", + help="Show commands without executing") + parser.add_argument("--continue", dest="continue_mode", action="store_true", + help="Skip already completed experiments") + parser.add_argument("--verbose", action="store_true", + help="Enable verbose logging") + parser.add_argument("--framework", nargs="+", + choices=list(EXPERIMENTS.keys()), + help="Run only specific framework(s)") + parser.add_argument("--model", nargs="+", + choices=["xgb", "tab_resnet"], + help="Run only specific model(s)") + parser.add_argument("--explainer", nargs="+", + choices=["lime", "shap", "grad", "itg", "ig", "sg", "vg", "lrp"], + help="Run only specific explainer(s)") + + args = parser.parse_args() + + run_all_experiments( + dry_run=args.dry_run, + continue_mode=args.continue_mode, + verbose=args.verbose, + framework_filter=args.framework, + model_filter=args.model, + explainer_filter=args.explainer, + ) + + +if __name__ == "__main__": + main() diff --git a/experiments/scripts/wine_quality/explain_wine_quality.py b/experiments/scripts/wine_quality/explain_wine_quality.py new file mode 100755 index 0000000..de74ddf --- /dev/null +++ b/experiments/scripts/wine_quality/explain_wine_quality.py @@ -0,0 +1,1113 @@ +#!/usr/bin/env python3 +""" +Wine Quality Explanation Script + +Generate explanations for Wine Quality predictions using various XAI frameworks. + +Supported frameworks: + - pnpxai: PnP XAI with hyperparameter optimization + - captum: PyTorch-based explainability library + - omnixai: OmniXAI framework (XGBoost only, LIME/SHAP only) + - openxai: OpenXAI framework (TabResNet only, no LRP/VG) + - autoxai: AutoXAI framework with hyperparameter optimization (TabResNet only, LIME/SHAP only) + +Supported models: + - xgb: XGBoost classifier + - tab_resnet: TabResNet (residual network for tabular data) + +Supported explainers: + - lime: Local Interpretable Model-agnostic Explanations + - shap: SHapley Additive exPlanations (KernelSHAP) + - ig: Integrated Gradients + - grad: Gradient (Saliency) + - sg: SmoothGrad + - itg: Input × Gradient + - vg: VarGrad (PnPXAI only) + - lrp: Layer-wise Relevance Propagation +""" +import os +import sys +import json +import argparse +import logging +import random +import warnings +from pathlib import Path +from datetime import datetime +from typing import Dict, Any, Optional + +# Suppress Captum LIME/KernelShap batch warning +warnings.filterwarnings('ignore', message='You are providing multiple inputs for Lime / Kernel SHAP attributions') + +import numpy as np +import torch +import torch.nn as nn +import yaml +from tqdm import tqdm + +# Add parent directory to path for imports +sys.path.append(str(Path(__file__).parent)) + +from script_utils import ( + load_wine_quality, + validate_explainer_args, + load_model, + wrap_model_for_pytorch, +) + + +def set_seeds(seed: int = 42): + """Set random seeds for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def setup_logging(verbose: bool = True): + """Setup logging configuration.""" + level = logging.INFO if verbose else logging.WARNING + logging.basicConfig( + level=level, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + return logging.getLogger(__name__) + + +def load_configs(config_dir: str = "configs/tabular") -> Dict[str, Any]: + """Load all configuration files.""" + config_path = Path(config_dir) + configs = {} + + # Load explainer config + with open(config_path / "explainer_config.yaml", 'r') as f: + configs['explainer'] = yaml.safe_load(f) + + # Load optuna config + with open(config_path / "optuna_config.yaml", 'r') as f: + configs['optuna'] = yaml.safe_load(f) + + return configs + + +def explain_with_pnpxai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: Optional[str], + config: Dict[str, Any], + logger: logging.Logger, + batch_size: int = 32, + n_samples: int = 64, + n_steps: int = 50, + model_type: str = "xgb", +) -> np.ndarray: + """ + Generate explanations using PnPXAI framework. + + Args: + model: PyTorch model (wrapped if XGBoost) + X_test: Test features + y_test: Test labels + explainer_name: Explainer type (None for auto-optimization) + config: Configuration dictionary + logger: Logger instance + batch_size: Batch size for processing + n_samples: Number of samples for LIME/SHAP + n_steps: Number of steps for Integrated Gradients + model_type: Model type ('xgb' or 'tab_resnet') + + Returns: + Explanation array of shape (n_samples, n_features) + """ + import pandas as pd + import itertools + from collections import defaultdict + from torch.utils.data import DataLoader, Dataset + from pnpxai import Experiment, AutoExplanation + from pnpxai.core.modality.modality import Modality + from pnpxai.explainers import Lime, KernelShap + from pnpxai.evaluator.metrics import AbPC, Complexity, Metric + from sklearn.cluster import KMeans as SklearnKMeans + from pnpxai.explainers.utils.baselines import BaselineFunction + from pnpxai.explainers.utils.postprocess import NormalizationFunction, minmax + from pnpxai.explainers.base import Tunable + from pnpxai.explainers.types import TunableParameter + + logger.info("Setting up PnPXAI framework...") + + # Define custom dataset + class TabDataset(Dataset): + def __init__(self, inputs, labels): + super().__init__() + self.inputs = inputs + self.labels = labels + + def __len__(self): + return len(self.inputs) + + def __getitem__(self, idx): + return self.inputs[idx], self.labels[idx] + + def collate_fn(batch): + inputs = torch.stack([torch.from_numpy(d[0]) for d in batch]).to(torch.float) + labels = torch.tensor([d[1] for d in batch], dtype=torch.long) + return inputs, labels + + # Create dataset and dataloader + test_dataset = TabDataset(X_test, y_test) + test_loader = DataLoader( + test_dataset, + batch_size=batch_size, + collate_fn=collate_fn, + shuffle=False, + pin_memory=True, + ) + + # Create modality from sample batch + sample_batch = next(iter(test_loader)) + modality = Modality( + dtype=sample_batch[0].dtype, + ndims=sample_batch[0].dim(), + ) + + # Define custom baseline function (KMeans) + bg_data_idx = np.random.choice(len(X_test), size=50, replace=False) + X_bg = X_test[bg_data_idx] + + class KMeans(BaselineFunction, Tunable): + def __init__(self, background_data, n_clusters=8): + self.background_data = background_data + self.n_clusters = TunableParameter( + name='n_clusters', + current_value=n_clusters, + dtype=int, + is_leaf=True, + space={'low': 10, 'high': len(background_data), 'step': 10}, + ) + self.kmeans_ = SklearnKMeans(n_clusters).fit(background_data) + BaselineFunction.__init__(self) + Tunable.__init__(self) + self.register_tunable_params([self.n_clusters]) + + def __call__(self, inputs): + if inputs.ndim == 3: + inputs = inputs.squeeze(1) + cluster_ids = self.kmeans_.predict(inputs.to(torch.float64).numpy()) + cluster_centers = self.kmeans_.cluster_centers_[cluster_ids] + return torch.from_numpy(cluster_centers).float().to(inputs.device) + + # Define custom normalization functions + class Pos(NormalizationFunction): + def __init__(self): + super().__init__() + + def __call__(self, attrs): + return attrs.abs() + + class MinMax(NormalizationFunction): + def __init__(self): + super().__init__() + + def __call__(self, attrs): + return minmax(attrs) / 1000 + + # Define compound metric + class CompoundMetric(Metric): + def __init__( + self, + model, + cmpd_metrics, + weights, + explainer=None, + target_input_keys=None, + additional_input_keys=None, + output_modifier=None, + ): + super().__init__( + model, explainer, target_input_keys, + additional_input_keys, output_modifier, + ) + assert len(cmpd_metrics) == len(weights) + self.cmpd_metrics = cmpd_metrics + self.weights = weights + + def evaluate(self, inputs, targets, attrs): + values = torch.zeros(attrs.size(0)).to(attrs.device) + for weight, metric in zip(self.weights, self.cmpd_metrics): + values += weight * metric.set_explainer(self.explainer).evaluate(inputs, targets, attrs) + return values + + # Create experiment based on model type + logger.info("Creating PnPXAI experiment...") + + if model_type == "tab_resnet": + expr = AutoExplanation( + model=model, + data=test_loader, + modality=modality, + target_input_keys=[0], # Current test_loader batches data as tuple of (inputs, targets). 0 means the location of inputs in the tuple + target_class_extractor=lambda outputs: outputs.argmax(-1), + label_key='labels', + target_labels=False, # Gets attributions on the prediction for all explainer if False. + ) + + expr.metrics.delete('morf') + expr.metrics.delete('lerf') + + elif model_type == "xgb": + expr = Experiment( + model=model, + data=test_loader, + modality=modality, + target_input_keys=[0], # feature location in batch from dataloader + target_class_extractor=lambda outputs: outputs.argmax(-1), # extract target class from output batch + label_key=-1, # label location in input batch from dataloader + ) + + # add explainers + expr.explainers.add('kernel_shap', KernelShap) + expr.explainers.add('lime', Lime) + + # add metrics + expr.metrics.add('abpc', AbPC) + + else: + raise ValueError("Invalid model type") + + # Add custom baseline function and default kwargs + expr.modality.util_functions['baseline_fn'].add('kmeans', KMeans) + expr.modality.util_functions['baseline_fn'].add_default_kwargs( + 'background_data', X_bg + ) + + # Add custom normalization functions and complexity metric + expr.modality.util_functions['normalization_fn'].add('pos', Pos) + expr.modality.util_functions['normalization_fn'].add('minmax', MinMax) + expr.metrics.add('cmpx', Complexity) + expr.metrics.add('cmpd', CompoundMetric) + + # Map explainer names + PNP_INV_MAP = { + "kernel_shap": "shap", + "lime": "lime", + "gradient": "grad", + "grad_x_input": "itg", + "integrated_gradients": "ig", + "smooth_grad": "sg", + "lrp_uniform_epsilon": "lrp", + "var_grad": "vg", + } + + explainer_map = { + 'shap': 'kernel_shap', + 'lime': 'lime', + 'grad': 'gradient', + 'itg': 'grad_x_input', + 'ig': 'integrated_gradients', + 'sg': 'smooth_grad', + 'vg': 'var_grad', + 'lrp': 'lrp_uniform_epsilon', + } + + if explainer_name: + pnp_explainer = explainer_map.get(explainer_name, explainer_name) + logger.info(f"Using explainer: {pnp_explainer}") + + # Setup metric options + metric_options = { + 'cmpd_metrics': [ + expr.create_metric('abpc'), + expr.create_metric('cmpx'), + ], + 'weights': [.7, -.3] + } + + # Set direction + direction = 'maximize' + + # Setup disable_tunable_params + disable_tunable_params = {} + if pnp_explainer in ['lime', 'kernel_shap']: + disable_tunable_params['n_samples'] = n_samples + if pnp_explainer in ['integrated_gradients']: + disable_tunable_params['n_steps'] = n_steps + + logger.info("Running hyperparameter optimization...") + opt_results = expr.optimize( + explainer_key=pnp_explainer, + metric_key='cmpd', + metric_options=metric_options, + direction=direction, + disable_tunable_params=disable_tunable_params, + **config['optuna'] + ) + + logger.info(f"Best value: {opt_results.study.best_trial.value:.4f}") + + # Generate explanations + opt_explainer = opt_results.explainer + th_test_input = torch.tensor(test_dataset.inputs, dtype=torch.float32) + targets = model(th_test_input).argmax(-1) + + exp_name = PNP_INV_MAP[pnp_explainer] + + if exp_name in ["shap", "lime"]: + explanations = opt_explainer.attribute(th_test_input, targets)[0].detach().cpu().numpy() + else: + explanations = opt_explainer.attribute(th_test_input, targets).squeeze(1).detach().cpu().numpy() + + else: + raise ValueError("PnPXAI requires explainer name") + + return explanations + + +def explain_with_captum( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + n_samples: int = 64, +) -> np.ndarray: + """ + Generate explanations using Captum framework. + + Args: + model: PyTorch model (wrapped if XGBoost) + X_test: Test features + y_test: Test labels + explainer_name: Explainer type + config: Configuration dictionary + logger: Logger instance + n_samples: Number of samples for LIME/SHAP + + Returns: + Explanation array of shape (n_samples, n_features) + """ + from captum.attr import ( + KernelShap, Lime, IntegratedGradients, Saliency, + InputXGradient, NoiseTunnel, LRP + ) + from captum.attr._utils.lrp_rules import EpsilonRule + + logger.info(f"Setting up Captum framework with {explainer_name}...") + + # Create explainer + if explainer_name == "shap": + explainer = KernelShap(model) + elif explainer_name == "lime": + explainer = Lime(model, interpretable_model=None) + elif explainer_name == "grad": + explainer = Saliency(model) + elif explainer_name == "itg": + explainer = InputXGradient(model) + elif explainer_name == "ig": + explainer = IntegratedGradients(model, multiply_by_inputs=True) + elif explainer_name == "sg": + explainer = NoiseTunnel(Saliency(model)) + elif explainer_name == "lrp": + # Set LRP rules for batch norm layers + if hasattr(model, 'res_blocks'): + for block in model.res_blocks: + if hasattr(block, 'bn'): + block.bn.rule = EpsilonRule() + if hasattr(model, 'bn'): + model.bn.rule = EpsilonRule() + explainer = LRP(model) + else: + raise ValueError(f"Unknown Captum explainer: {explainer_name}") + + # Convert to tensor + X_test_t = torch.tensor(X_test, dtype=torch.float32) + targets = model(X_test_t).argmax(dim=1) + + # Generate explanations + logger.info("Generating explanations...") + + if explainer_name == "grad": + explanations = explainer.attribute(X_test_t, target=targets, abs=False) + explanations = explanations.detach().numpy() + + elif explainer_name == "sg": + explanations = explainer.attribute(X_test_t, target=targets, nt_type='smoothgrad') + explanations = explanations.detach().numpy() + + elif explainer_name in ("shap", "lime"): + # Process in batches to avoid memory issues + attrs_list = [] + for i in tqdm(range(len(X_test_t)), desc="Explaining"): + input_i = X_test_t[i].unsqueeze(0) + attr_i = explainer.attribute(input_i, target=targets[i]) + attrs_list.append(attr_i.detach().cpu().numpy()) + explanations = np.concatenate(attrs_list, axis=0) + + else: + explanations = explainer.attribute(X_test_t, target=targets) + explanations = explanations.detach().numpy() + + return explanations + + +def explain_with_omnixai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + feature_metadata: Dict[str, Any], + raw_data, + n_samples: int = 64, +) -> np.ndarray: + """ + Generate explanations using OmniXAI framework. + + OmniXAI only supports XGBoost models and LIME/SHAP explainers. + + Args: + model: PyTorch model (wrapped XGBoost) + X_test: Test features + y_test: Test labels + explainer_name: Explainer type (lime or shap) + config: Configuration dictionary + logger: Logger instance + feature_metadata: Feature metadata for transformations + raw_data: Raw data for training + n_samples: Number of samples for LIME/SHAP + + Returns: + Explanation array of shape (n_samples, n_features) + """ + import functools + import pandas as pd + from omnixai.data.tabular import Tabular + from omnixai.explainers.tabular import TabularExplainer + from script_utils.data_utils import transform, invert_transform, find_idx + + logger.info(f"Setting up OmniXAI framework with {explainer_name}...") + + NAME_MAP = { + "lime": "LimeTabular", + "shap": "ShapTabular" + } + + explainer_nm = NAME_MAP[explainer_name] + + # Prepare training data + raw_data = raw_data.fillna("missing") + categorical_columns = [c for c in raw_data.columns if feature_metadata[c]["type"] == "categorical"] + train_data = Tabular(raw_data, categorical_columns=categorical_columns) + + # Get target function from wrapped model + if hasattr(model, 'xgb_model'): + target_function = model.xgb_model.predict_proba + else: + raise ValueError("OmniXAI requires XGBoost model") + + # Create transformation functions + transform_fn = functools.partial(transform, feature_metadata=feature_metadata) + + def prep(z): + return transform_fn(z.data.fillna("missing")) + + # Create explainer + explainer = TabularExplainer( + explainers=[explainer_nm], + mode="classification", + data=train_data, + model=target_function, + preprocess=prep, + ) + + # Prepare test instances + test_instances = invert_transform(X_test, feature_metadata).fillna("missing") + + # Set parameters + params = { + "LimeTabular": {"num_features": raw_data.shape[1], "num_samples": n_samples}, + "ShapTabular": {"nsamples": n_samples} + } + + # Generate explanations + logger.info("Generating explanations...") + exp_obj = explainer.explain(test_instances, params=params) + + # Extract and reorder scores + scores = [] + for i in range(test_instances.shape[0]): + exp = exp_obj[explainer_nm].get_explanations(i) + sorted_idx = find_idx(exp['features'], exp['instance'].columns.tolist()) + scores.append([exp['scores'][j] for j in sorted_idx]) + + explanations = np.array(scores) + return explanations + + +def explain_with_openxai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + X_train: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + feature_metadata: Dict[str, Any], + batch_size: int = 32, + n_samples: int = 64, +) -> np.ndarray: + """ + Generate explanations using OpenXAI framework. + + OpenXAI only supports TabResNet models (not XGBoost) and does not support LRP/VG. + + Args: + model: PyTorch model (TabResNet) + X_test: Test features + y_test: Test labels + X_train: Training features (required for LIME/IG) + explainer_name: Explainer type + config: Configuration dictionary + logger: Logger instance + feature_metadata: Feature metadata for categorical aggregation + batch_size: Batch size for processing + n_samples: Number of samples for LIME/SHAP + + Returns: + Explanation array of shape (n_samples, n_features_original) + """ + from torch.utils.data import DataLoader, TensorDataset + from openxai import Explainer + from openxai.experiment_utils import fill_param_dict + + logger.info(f"Setting up OpenXAI framework with {explainer_name}...") + + # Convert to tensors + test_input = torch.tensor(X_test, dtype=torch.float32) + train_input = None + explainer_params = {} + + # Setup training data for LIME/IG + if explainer_name in ['lime', 'ig']: + train_input = torch.tensor(X_train, dtype=torch.float32) + explainer_params = fill_param_dict(explainer_name, {}, train_input) + + if explainer_name in ['lime', 'shap']: + explainer_params['n_samples'] = n_samples + + # Create explainer + explainer = Explainer(method=explainer_name, model=model, param_dict=explainer_params) + + # Get predictions + predicted_labels = model(test_input).detach().argmax(dim=1) + + # Create data loader + dataset = TensorDataset(test_input, predicted_labels) + data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False) + + # Generate explanations + logger.info("Generating explanations...") + all_explanations = [] + for batch_inputs, batch_labels in tqdm(data_loader, desc="Explaining batches"): + batch_explanations = explainer.get_explanations(batch_inputs, label=batch_labels) + all_explanations.append(batch_explanations) + + # Combine batches + combined_explanations = torch.cat(all_explanations, dim=0) + + # Aggregate categorical features + processed_explanations = [] + for feature_name, feature_info in feature_metadata.items(): + if feature_info['type'] == 'categorical': + feature_index = feature_info['index'] + onehot_encoded = test_input[:, feature_index] + explanation_values = combined_explanations[:, feature_index] + categorical_explanation = (onehot_encoded * explanation_values).sum(dim=1) + processed_explanations.append(categorical_explanation) + else: + feature_index = feature_info['index'] + processed_explanations.append(combined_explanations[:, feature_index]) + + explanations = torch.stack(processed_explanations, dim=1).detach().numpy() + return explanations + + +def explain_with_autoxai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + raw_data, + batch_size: int = 32, + n_samples: int = 64, +) -> np.ndarray: + """ + Generate explanations using AutoXAI framework. + + AutoXAI only supports TabResNet models and LIME/SHAP explainers. + + Args: + model: PyTorch model (TabResNet) + X_test: Test features + y_test: Test labels + explainer_name: Explainer type (lime or shap) + config: Configuration dictionary + logger: Logger instance + raw_data: Raw data for feature names + batch_size: Batch size for processing + n_samples: Number of samples to explain + + Returns: + Explanation array of shape (n_samples, n_features) + """ + import sys + import os + import glob + + # Add AutoXAI virtual environment to sys.path + autoxai_venv = "/opt/autoxai_venv" + if os.path.exists(autoxai_venv): + # Find site-packages directory in the virtual environment + site_packages = glob.glob(f"{autoxai_venv}/lib/python*/site-packages") + if site_packages: + sys.path.insert(0, site_packages[0]) + + autoxai_path = os.path.join(os.path.dirname(__file__), "lib", "AutoXAI") + sys.path.insert(0, autoxai_path) + from hyperparameters_optimization import get_parameters + from XAI_solutions import set_up_explainer, get_local_exp + + logger.info(f"Setting up AutoXAI framework with {explainer_name}...") + + AUTOXAI_NAME_MAP = {"shap": "SHAP", "lime": "LIME"} + autoxai_nm = AUTOXAI_NAME_MAP[explainer_name] + + bg_size = min(50, len(X_test)) + + # Unwrap model if it's a TorchModelForXGBoost (AutoXAI needs the original XGBoost model) + from script_utils.model_utils import TorchModelForXGBoost + if isinstance(model, TorchModelForXGBoost): + unwrapped_model = model.xgb_model + else: + unwrapped_model = model + + # Setup context + properties_list = ["robustness", "fidelity", "conciseness"] + context = {} + # Use background samples for explainer setup, but explain all test samples + rand_idx = np.random.randint(0, X_test.shape[0], bg_size) + context["X"] = X_test[rand_idx] # Background samples for SHAP explainer + context["y"] = y_test[rand_idx] + context["feature_names"] = list(raw_data.columns) + context["verbose"] = False + context["task"] = "classification" + context["question"] = "Why" + context["session_id"] = f"_{bg_size}_wine" + context["scaling"] = "Std" + context["weights"] = [1, 2, 0.5] + context["distance"] = "cosine" + context["explanations"] = [] + context["model"] = unwrapped_model + context["ES"] = True + context["IS"] = True + + score_hist = { + "xai_sol": [], "epoch": [], "aggregated_score": [], + "parameters": [], "robustness": [], "scaled_robustness": [], + "fidelity": [], "scaled_fidelity": [], + "conciseness": [], "scaled_conciseness": [] + } + + # Get default parameters + logger.info("Preparing AutoXAI explainer with default parameters...") + default_parameters = get_parameters( + autoxai_nm, score_hist, "default", properties_list, context) + + # Setup explainer + context['explainer'] = set_up_explainer(autoxai_nm, default_parameters, context) + + # Generate explanations for all test samples + logger.info("Generating explanations...") + explanations = np.zeros_like(X_test) + for i in tqdm(range(len(X_test)), desc="Explaining"): + e = get_local_exp(autoxai_nm, X_test[i], default_parameters, context) + idx = default_parameters["most_influent_features"] + explanations[i, idx] = e + + return explanations + + +def evaluate_explanations( + explanations: np.ndarray, + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + logger: logging.Logger, + batch_size: int = 32, +) -> Dict[str, np.ndarray]: + """ + Evaluate explanations using PnPXAI metrics. + + Args: + explanations: Explanation array + model: PyTorch model + X_test: Test features + y_test: Test labels + logger: Logger instance + batch_size: Batch size for evaluation + + Returns: + Dictionary with metric arrays + """ + from torch.utils.data import DataLoader, TensorDataset + from pnpxai import Experiment + from pnpxai.core.modality.modality import Modality + from pnpxai.explainers import KernelShap + from pnpxai.evaluator.metrics import AbPC, Complexity, Metric + + logger.info("Evaluating explanations...") + + # Create dataloader + test_dataset = TensorDataset( + torch.tensor(X_test, dtype=torch.float32), + torch.tensor(y_test, dtype=torch.long) + ) + test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) + + # Create experiment for metrics + sample_batch = next(iter(test_loader)) + modality = Modality( + dtype=sample_batch[0].dtype, + ndims=sample_batch[0].dim(), + ) + + expr = Experiment( + model=model, + data=test_loader, + modality=modality, + target_input_keys=[0], + target_class_extractor=lambda outputs: outputs.argmax(-1), + label_key=-1, + ) + + # Add explainers + expr.explainers.add('kernel_shap', KernelShap) + + # Add metrics + expr.metrics.add('abpc', AbPC) + expr.metrics.add('cmpx', Complexity) + + # Compound metric + class CompoundMetric(Metric): + def __init__(self, model, cmpd_metrics, weights, explainer=None, + target_input_keys=None, additional_input_keys=None, output_modifier=None): + super().__init__(model, explainer, target_input_keys, additional_input_keys, output_modifier) + self.cmpd_metrics = cmpd_metrics + self.weights = weights + + def evaluate(self, inputs, targets, attrs): + values = torch.zeros(attrs.size(0)).to(attrs.device) + for weight, metric in zip(self.weights, self.cmpd_metrics): + values += weight * metric.set_explainer(self.explainer).evaluate(inputs, targets, attrs) + return values + + expr.metrics.add('cmpd', CompoundMetric) + + # Create dummy explainer for evaluation + dummy_explainer = expr.create_explainer('kernel_shap') + + # Evaluate each metric + results = {} + X_test_t = torch.tensor(X_test, dtype=torch.float32) + explanations_t = torch.tensor(explanations, dtype=torch.float32) + + for metric_name in ['abpc', 'cmpx']: + metric = expr.create_metric(metric_name) + metric_values = [] + + for i in range(len(X_test)): + inputs = {0: X_test_t[i].unsqueeze(0)} + targets = model(inputs[0]).argmax(-1) + attrs = explanations_t[i].unsqueeze(0) + + value = metric.set_explainer(dummy_explainer).evaluate(inputs, targets, attrs) + metric_values.append(value.item()) + + results[metric_name] = np.array(metric_values) + + # Compound metric + metric_options = { + 'cmpd_metrics': [expr.create_metric('abpc'), expr.create_metric('cmpx')], + 'weights': [0.7, -0.3] + } + cmpd_metric = expr.create_metric('cmpd', **metric_options) + cmpd_values = [] + + for i in range(len(X_test)): + inputs = {0: X_test_t[i].unsqueeze(0)} + targets = model(inputs[0]).argmax(-1) + attrs = explanations_t[i].unsqueeze(0) + + value = cmpd_metric.set_explainer(dummy_explainer).evaluate(inputs, targets, attrs) + cmpd_values.append(value.item()) + + results['cmpd'] = np.array(cmpd_values) + + # Log summary + for metric_name, values in results.items(): + logger.info(f" {metric_name.upper()}: {values.mean():.4f} ± {values.std():.4f}") + + return results + + +def save_results( + explanations: np.ndarray, + metrics: Dict[str, np.ndarray], + output_dir: Path, + metadata: Dict[str, Any], + logger: logging.Logger, +): + """Save explanations, metrics, and metadata.""" + output_dir.mkdir(parents=True, exist_ok=True) + + # Save explanations + np.save(output_dir / "explanations.npy", explanations) + logger.info(f"Saved explanations: {output_dir / 'explanations.npy'}") + + # Save metrics + for metric_name, values in metrics.items(): + np.save(output_dir / f"{metric_name}.npy", values) + logger.info(f"Saved {metric_name}: {output_dir / f'{metric_name}.npy'}") + + # Save metadata + with open(output_dir / "metadata.json", 'w') as f: + json.dump(metadata, f, indent=2) + logger.info(f"Saved metadata: {output_dir / 'metadata.json'}") + + +def main(): + parser = argparse.ArgumentParser( + description="Generate explanations for Wine Quality predictions", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # PnPXAI with SHAP + python explain_wine_quality.py --framework pnpxai --model xgb --explainer shap + + # Captum with Integrated Gradients + python explain_wine_quality.py --framework captum --model tab_resnet --explainer ig + + # Save to custom directory + python explain_wine_quality.py --framework pnpxai --model xgb --explainer lime --output results/custom + """ + ) + + parser.add_argument( + "--framework", + type=str, + required=True, + choices=["pnpxai", "captum", "omnixai", "openxai", "autoxai"], + help="XAI framework to use" + ) + parser.add_argument( + "--model", + type=str, + required=True, + choices=["xgb", "tab_resnet"], + help="Model type" + ) + parser.add_argument( + "--explainer", + type=str, + required=True, + choices=["lime", "shap", "ig", "grad", "sg", "itg", "vg", "lrp"], + help="Explainer method" + ) + parser.add_argument( + "--data-dir", + type=str, + default="data/Wine Quality", + help="Directory containing preprocessed data" + ) + parser.add_argument( + "--config-dir", + type=str, + default="configs/tabular", + help="Directory containing config files" + ) + parser.add_argument( + "--output", + type=str, + default=None, + help="Output directory (default: results/Wine Quality/{model}/{framework}/{explainer})" + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="Random seed" + ) + parser.add_argument( + "--batch-size", + type=int, + default=32, + help="Batch size for processing" + ) + parser.add_argument( + "--n-samples", + type=int, + default=64, + help="Number of samples for LIME/SHAP" + ) + parser.add_argument( + "--verbose", + action="store_true", + help="Enable verbose logging" + ) + parser.add_argument( + "--max-test-samples", + type=int, + default=None, + help="Maximum number of test samples to use (default: use all)" + ) + + args = parser.parse_args() + + # Setup + logger = setup_logging(args.verbose) + logger.info("Wine Quality Explanation Script") + logger.info("="*50) + + # Validate arguments + try: + validate_explainer_args(args.framework, args.model, args.explainer) + except ValueError as e: + logger.error(f"Invalid arguments: {e}") + sys.exit(1) + + # Set seeds + set_seeds(args.seed) + logger.info(f"Random seed: {args.seed}") + + # Load configurations + configs = load_configs(args.config_dir) + + # Load data + logger.info(f"Loading data from: {args.data_dir}") + X_train, X_test, y_train, y_test, feature_metadata, raw_data = load_wine_quality(args.data_dir) + + # Limit test samples if specified + if args.max_test_samples is not None and args.max_test_samples < len(X_test): + logger.info(f"Limiting test samples from {len(X_test)} to {args.max_test_samples}") + X_test = X_test[:args.max_test_samples] + y_test = y_test[:args.max_test_samples] + + logger.info(f" Train: {len(X_train)} samples") + logger.info(f" Test: {len(X_test)} samples, {X_test.shape[1]} features") + + # Load model + logger.info(f"Loading {args.model} model...") + data_path = Path(args.data_dir) + + if args.model == "xgb": + model_path = data_path / "xgb_model.json" + elif args.model == "tab_resnet": + model_path = data_path / "resnet_model.pth" + + model = load_model( + args.model, + model_path, + input_dim=X_train.shape[1], + output_dim=2, + num_blocks=1 + ) + + # Wrap model for PyTorch + model = wrap_model_for_pytorch(model, args.model) + logger.info(f"Model loaded: {model_path}") + + # Generate explanations + start_time = datetime.now() + + if args.framework == "pnpxai": + explanations = explain_with_pnpxai( + model, X_test, y_test, args.explainer, + configs, logger, args.batch_size, args.n_samples, n_steps=50, model_type=args.model + ) + elif args.framework == "captum": + explanations = explain_with_captum( + model, X_test, y_test, args.explainer, + configs, logger, args.n_samples + ) + elif args.framework == "omnixai": + explanations = explain_with_omnixai( + model, X_test, y_test, args.explainer, + configs, logger, feature_metadata, raw_data, args.n_samples + ) + elif args.framework == "openxai": + explanations = explain_with_openxai( + model, X_test, y_test, X_train, args.explainer, + configs, logger, feature_metadata, args.batch_size, args.n_samples + ) + elif args.framework == "autoxai": + explanations = explain_with_autoxai( + model, X_test, y_test, args.explainer, + configs, logger, raw_data, args.batch_size, args.n_samples + ) + else: + raise ValueError(f"Unknown framework: {args.framework}") + + elapsed_time = (datetime.now() - start_time).total_seconds() + logger.info(f"Explanation generation completed in {elapsed_time:.2f}s") + logger.info(f"Explanation shape: {explanations.shape}") + + # Evaluate explanations + metrics = evaluate_explanations( + explanations, model, X_test, y_test, + logger, args.batch_size + ) + + # Prepare output directory + if args.output: + output_dir = Path(args.output) + else: + output_dir = Path(f"results/Wine Quality/{args.model}/{args.framework}/{args.explainer}") + + # Prepare metadata + metadata = { + "framework": args.framework, + "model": args.model, + "explainer": args.explainer, + "seed": args.seed, + "n_samples": args.n_samples, + "batch_size": args.batch_size, + "data_shape": { + "train": list(X_train.shape), + "test": list(X_test.shape), + }, + "explanation_shape": list(explanations.shape), + "timestamp": datetime.now().isoformat(), + "elapsed_time": elapsed_time, + "metrics_summary": { + k: {"mean": float(v.mean()), "std": float(v.std())} + for k, v in metrics.items() + } + } + + # Save results + save_results(explanations, metrics, output_dir, metadata, logger) + + logger.info("="*50) + logger.info(f"Results saved to: {output_dir.absolute()}") + logger.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/experiments/scripts/wine_quality/script_utils/__init__.py b/experiments/scripts/wine_quality/script_utils/__init__.py new file mode 100644 index 0000000..7c169a4 --- /dev/null +++ b/experiments/scripts/wine_quality/script_utils/__init__.py @@ -0,0 +1,39 @@ +""" +Utility modules for Wine Quality XAI experiments. +""" +from .data_utils import ( + load_wine_quality, + transform, + invert_transform, + find_idx, +) + +from .model_utils import ( + TorchModelForXGBoost, + load_model, + wrap_model_for_pytorch, +) + +from .explainer_factory import ( + ExplainerConfig, + validate_explainer_args, + get_framework_specific_name, +) + +__all__ = [ + # Data utils + "load_wine_quality", + "transform", + "invert_transform", + "find_idx", + + # Model utils + "TorchModelForXGBoost", + "load_model", + "wrap_model_for_pytorch", + + # Explainer factory + "ExplainerConfig", + "validate_explainer_args", + "get_framework_specific_name", +] diff --git a/experiments/scripts/wine_quality/script_utils/data_utils.py b/experiments/scripts/wine_quality/script_utils/data_utils.py new file mode 100644 index 0000000..f18e2d6 --- /dev/null +++ b/experiments/scripts/wine_quality/script_utils/data_utils.py @@ -0,0 +1,132 @@ +""" +Data loading and transformation utilities for Wine Quality dataset. +""" +import pickle +from pathlib import Path +from typing import Dict, Any, Tuple + +import numpy as np +import pandas as pd + + +def load_wine_quality( + data_dir: str = "data/Wine Quality" +) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, Dict[str, Any], pd.DataFrame]: + """ + Load Wine Quality dataset. + + Args: + data_dir: Directory containing the dataset files + + Returns: + X_train: Training features (preprocessed) + X_test: Test features (preprocessed) + y_train: Training labels + y_test: Test labels + feature_metadata: Feature metadata and encoders + raw_data: Original raw data + """ + data_path = Path(data_dir) + + X_train = np.load(data_path / "X_train.npy") + X_test = np.load(data_path / "X_test.npy") + y_train = np.load(data_path / "y_train.npy") + y_test = np.load(data_path / "y_test.npy") + + with open(data_path / "feature_metadata.pkl", "rb") as f: + feature_metadata = pickle.load(f) + + raw_data = pd.read_csv(data_path / "raw_data.csv") + + return X_train, X_test, y_train, y_test, feature_metadata, raw_data + + +def transform(X: pd.DataFrame, feature_metadata: Dict[str, Any]) -> np.ndarray: + """ + Transform raw data using feature metadata encoders. + + Args: + X: Raw feature DataFrame + feature_metadata: Feature metadata with encoders + + Returns: + Transformed feature array + """ + input_data = [] + for k, v in feature_metadata.items(): + if np.isin('missing', X[[k]].values): + X[[k]] = X[[k]].replace("missing", v['encoder'].categories_[0][-1]) + preprocessed = v['encoder'].transform(X[[k]].values) + if v['type'] == 'categorical': + preprocessed = preprocessed.toarray() + input_data.append(preprocessed) + + input_array = np.concatenate(input_data, axis=1) + return input_array + + +def invert_transform( + input_array: np.ndarray, + feature_metadata: Dict[str, Any] +) -> pd.DataFrame: + """ + Invert transformed data back to original feature space. + + Args: + input_array: Transformed feature array + feature_metadata: Feature metadata with encoders + + Returns: + DataFrame with original features + """ + inverted_data = {} + + for col, meta in feature_metadata.items(): + if meta['type'] == 'categorical': + # Extract one-hot encoded portion + start_idx, end_idx = meta['index'][0], meta['index'][-1] + 1 + cat_data = input_array[:, start_idx:end_idx] + # Inverse transform using OneHotEncoder + inverted_col = meta['encoder'].inverse_transform(cat_data) + inverted_data[col] = inverted_col.flatten() + else: + # Inverse transform numerical data + idx = meta['index'] + num_data = input_array[:, idx].reshape(-1, 1) + inverted_col = meta['encoder'].inverse_transform(num_data) + inverted_data[col] = inverted_col.flatten() + + # Convert to DataFrame + inverted_df = pd.DataFrame(inverted_data) + + return inverted_df + + +def find_idx(a: list, b: list) -> list: + """ + Find permutation index where a[idx] = b. + + Args: + a: Source list + b: Target list + + Returns: + Index permutation, or None if impossible + """ + from collections import defaultdict, deque + + # Check if a and b have the same multiset + if sorted(a) != sorted(b): + return None + + # Create mapping from value to indices + pos_map = defaultdict(deque) + for i, val in enumerate(a): + pos_map[val].append(i) + + # Build index by matching b elements + idx = [] + for val in b: + idx.append(pos_map[val].popleft()) + + return idx diff --git a/experiments/scripts/wine_quality/script_utils/explainer_factory.py b/experiments/scripts/wine_quality/script_utils/explainer_factory.py new file mode 100644 index 0000000..1132281 --- /dev/null +++ b/experiments/scripts/wine_quality/script_utils/explainer_factory.py @@ -0,0 +1,187 @@ +""" +Explainer factory for creating explainers across different frameworks. +""" +from typing import Dict, Any, Tuple, Optional +import warnings + + +class ExplainerConfig: + """Configuration and validation for explainer frameworks.""" + + # Framework compatibility matrix + FRAMEWORK_MODEL_SUPPORT = { + "pnpxai": ["xgb", "tab_resnet"], + "captum": ["xgb", "tab_resnet"], + "omnixai": ["xgb"], # Only XGBoost + "openxai": ["tab_resnet"], # Only TabResNet + "autoxai": ["xgb", "tab_resnet"], # Deprecated + } + + FRAMEWORK_EXPLAINER_SUPPORT = { + "pnpxai": ["lime", "shap", "ig", "grad", "sg", "itg", "vg", "lrp"], + "captum": ["lime", "shap", "ig", "grad", "sg", "itg", "lrp"], # no vg + "omnixai": ["lime", "shap"], # Only model-agnostic + "openxai": ["lime", "shap", "ig", "grad", "sg", "itg"], # no lrp, vg + "autoxai": ["lime", "shap"], # Limited support + } + + # Explainer name mappings + EXPLAINER_NAME_MAP = { + "pnpxai": { + "shap": "kernel_shap", + "lime": "lime", + "grad": "gradient", + "itg": "grad_x_input", + "ig": "integrated_gradients", + "sg": "smooth_grad", + "vg": "var_grad", + "lrp": "lrp_uniform_epsilon", + }, + "captum": { + "shap": "KernelShap", + "lime": "Lime", + "grad": "Saliency", + "itg": "InputXGradient", + "ig": "IntegratedGradients", + "sg": "NoiseTunnel", + "lrp": "LRP", + }, + "omnixai": { + "lime": "LimeTabular", + "shap": "ShapTabular", + }, + "openxai": { + "lime": "lime", + "shap": "shap", + "ig": "ig", + "grad": "grad", + "sg": "sg", + "itg": "itg", + }, + "autoxai": { + "lime": "LIME", + "shap": "SHAP", + } + } + + @classmethod + def validate( + cls, + framework: str, + model: str, + explainer: Optional[str] = None + ) -> Tuple[bool, Optional[str]]: + """ + Validate framework/model/explainer combination. + + Args: + framework: Framework name + model: Model type + explainer: Explainer type (optional for pnpxai auto mode) + + Returns: + (is_valid, error_message) + """ + # Check framework + if framework not in cls.FRAMEWORK_MODEL_SUPPORT: + return False, f"Invalid framework: {framework}. Choose from {list(cls.FRAMEWORK_MODEL_SUPPORT.keys())}" + + # Check model support + if model not in cls.FRAMEWORK_MODEL_SUPPORT[framework]: + return False, f"Framework {framework} does not support model {model}" + + # Check explainer if provided + if explainer is not None: + if explainer not in cls.FRAMEWORK_EXPLAINER_SUPPORT[framework]: + return False, f"Framework {framework} does not support explainer {explainer}" + + return True, None + + @classmethod + def get_explainer_name(cls, framework: str, explainer: str) -> str: + """ + Get framework-specific explainer name. + + Args: + framework: Framework name + explainer: Standard explainer name + + Returns: + Framework-specific explainer name + """ + return cls.EXPLAINER_NAME_MAP.get(framework, {}).get(explainer, explainer) + + @classmethod + def get_default_params(cls, framework: str, explainer: str, n_samples: int = 64) -> Dict[str, Any]: + """ + Get default parameters for explainer. + + Args: + framework: Framework name + explainer: Explainer type + n_samples: Number of samples for sampling-based methods + + Returns: + Default parameters dictionary + """ + params = {} + + # Sampling-based explainers + if explainer in ["lime", "shap"]: + if framework == "omnixai": + if explainer == "lime": + params["num_samples"] = n_samples + elif explainer == "shap": + params["nsamples"] = n_samples + elif framework == "openxai": + params["n_samples"] = n_samples + elif framework == "pnpxai": + params["n_samples"] = n_samples + elif framework == "captum": + params["n_samples"] = n_samples + + return params + + +def validate_explainer_args( + framework: str, + model: str, + explainer: Optional[str] = None +) -> None: + """ + Validate explainer arguments and raise error if invalid. + + Args: + framework: Framework name + model: Model type + explainer: Explainer type (optional) + + Raises: + ValueError: If combination is invalid + """ + is_valid, error_msg = ExplainerConfig.validate(framework, model, explainer) + + if not is_valid: + raise ValueError(error_msg) + + # Warn about deprecated frameworks + if framework == "autoxai": + warnings.warn( + "AutoXAI is deprecated and may have compatibility issues with recent dependencies. " + "Consider using PnPXAI or Captum instead.", + DeprecationWarning + ) + + +def get_framework_specific_name(framework: str, explainer: str) -> str: + """ + Get framework-specific explainer name. + + Args: + framework: Framework name + explainer: Standard explainer name + + Returns: + Framework-specific name + """ + return ExplainerConfig.get_explainer_name(framework, explainer) diff --git a/experiments/scripts/wine_quality/script_utils/model_utils.py b/experiments/scripts/wine_quality/script_utils/model_utils.py new file mode 100644 index 0000000..86f421c --- /dev/null +++ b/experiments/scripts/wine_quality/script_utils/model_utils.py @@ -0,0 +1,260 @@ +""" +Model loading and wrapper utilities. +""" +from pathlib import Path +from typing import Union, Optional + +import torch +import torch.nn as nn +import xgboost as xgb + +try: + from huggingface_hub import hf_hub_download + HF_AVAILABLE = True +except ImportError: + HF_AVAILABLE = False + + +class TorchModelForXGBoost(nn.Module): + """ + PyTorch wrapper for XGBoost models. + + This wrapper allows XGBoost models to be used with PyTorch-based + explainability frameworks like Captum and PnPXAI. + """ + + def __init__(self, xgb_model: xgb.XGBClassifier): + """ + Args: + xgb_model: Trained XGBoost classifier + """ + super().__init__() + self.xgb_model = xgb_model + # Dummy layer to make this a proper PyTorch module + self._dummy_layer = nn.Linear(1, 1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass through XGBoost model. + + Args: + x: Input tensor of shape (batch_size, n_features) or (1, batch_size, n_features) + + Returns: + Probability predictions of shape (batch_size, n_classes) + """ + # Handle 3D input (squeeze batch dimension) + if x.ndim >= 3: + x = x.squeeze(0) + + # Get predictions from XGBoost + # Detach if tensor requires grad (for gradient-based explainers) + if x.requires_grad: + out = self.xgb_model.predict_proba(x.detach().cpu().numpy()) + else: + out = self.xgb_model.predict_proba(x.cpu().numpy()) + + # Convert back to tensor + return torch.from_numpy(out) + + +def load_model_from_hf( + model_type: str, + repo_id: str, + filename: str, + input_dim: int = None, + output_dim: int = 2, + cache_dir: str = None, + **kwargs +) -> Union[xgb.XGBClassifier, nn.Module]: + """ + Load a model from Hugging Face Hub. + + Args: + model_type: Type of model ('xgb', 'tab_resnet', 'lr') + repo_id: Hugging Face repository ID (e.g., 'username/repo-name') + filename: Model filename in the repository + input_dim: Input dimension (required for neural network models) + output_dim: Output dimension (default: 2 for binary classification) + cache_dir: Cache directory for downloaded models (default: HF default cache) + **kwargs: Additional arguments for model initialization + + Returns: + Loaded model + + Raises: + ImportError: If huggingface_hub is not installed + ValueError: If model_type is invalid or input_dim not provided for neural networks + """ + if not HF_AVAILABLE: + raise ImportError( + "huggingface_hub is not installed. " + "Install it with: pip install huggingface_hub" + ) + + # Download model from HF Hub + print(f"Downloading {filename} from {repo_id}...") + model_path = hf_hub_download( + repo_id=repo_id, + filename=filename, + cache_dir=cache_dir, + repo_type="model" + ) + print(f"Downloaded to: {model_path}") + + # Load the model using the existing load_model function + return load_model( + model_type=model_type, + model_path=model_path, + input_dim=input_dim, + output_dim=output_dim, + **kwargs + ) + + +def load_model( + model_type: str, + model_path: Union[str, Path] = None, + input_dim: int = None, + output_dim: int = 2, + hf_repo: str = None, + hf_filename: str = None, + local_dir: str = None, + **kwargs +) -> Union[xgb.XGBClassifier, nn.Module]: + """ + Load a trained model with priority: local file → local directory → HF Hub. + + Priority order: + 1. If model_path is provided and exists, load from that path + 2. If local_dir + hf_filename exists, load from there + 3. If hf_repo + hf_filename provided, download from HF Hub + + Args: + model_type: Type of model ('xgb', 'tab_resnet', 'lr') + model_path: Path to model file (Priority 1) + input_dim: Input dimension (required for neural network models) + output_dim: Output dimension (default: 2 for binary classification) + hf_repo: Hugging Face repository ID (e.g., 'username/repo-name') + hf_filename: Model filename in HF repository + local_dir: Local directory to check for model (default: data/Wine Quality) + **kwargs: Additional arguments for model initialization + + Returns: + Loaded model + + Raises: + ValueError: If model_type is invalid, input_dim not provided for neural networks, + or no valid model path found + ImportError: If trying to load from HF Hub without huggingface_hub installed + + Examples: + # Load from explicit path + model = load_model('xgb', model_path='data/Wine Quality/xgb_model.json') + + # Load with local fallback to HF Hub + model = load_model('xgb', hf_repo='username/wine-models', + hf_filename='xgb_model.json', + local_dir='data/Wine Quality') + """ + # Priority 1: Explicit model_path + if model_path is not None: + model_path = Path(model_path) + if model_path.exists(): + print(f"Loading model from: {model_path}") + else: + raise ValueError(f"Model path does not exist: {model_path}") + + # Priority 2: Check local_dir + hf_filename + elif local_dir is not None and hf_filename is not None: + local_path = Path(local_dir) / hf_filename + if local_path.exists(): + print(f"Loading model from local directory: {local_path}") + model_path = local_path + # Priority 3: Fall back to HF Hub + elif hf_repo is not None: + print(f"Model not found locally. Downloading from HF Hub...") + return load_model_from_hf( + model_type=model_type, + repo_id=hf_repo, + filename=hf_filename, + input_dim=input_dim, + output_dim=output_dim, + **kwargs + ) + else: + raise ValueError( + f"Model not found in local directory: {local_path}\n" + f"Provide hf_repo to download from Hugging Face Hub." + ) + + # Priority 3: Only HF Hub specified + elif hf_repo is not None and hf_filename is not None: + print(f"Loading model from HF Hub...") + return load_model_from_hf( + model_type=model_type, + repo_id=hf_repo, + filename=hf_filename, + input_dim=input_dim, + output_dim=output_dim, + **kwargs + ) + + else: + raise ValueError( + "Must provide either:\n" + " - model_path, or\n" + " - local_dir + hf_filename, or\n" + " - hf_repo + hf_filename" + ) + + if model_type == "xgb": + model = xgb.XGBClassifier() + model.load_model(str(model_path)) + return model + + elif model_type == "tab_resnet": + if input_dim is None: + raise ValueError("input_dim is required for tab_resnet model") + + from models.tab_resnet import TabResNet + + num_blocks = kwargs.get('num_blocks', 1) + model = TabResNet(input_dim, output_dim, num_blocks=num_blocks) + model.load_state_dict(torch.load(model_path)) + model.eval() + return model + + elif model_type == "lr": + if input_dim is None: + raise ValueError("input_dim is required for lr model") + + from models.tab_resnet import LogisticRegression + + model = LogisticRegression(input_dim, output_dim) + model.load_state_dict(torch.load(model_path)) + model.eval() + return model + + else: + raise ValueError(f"Unknown model type: {model_type}. Choose from ['xgb', 'tab_resnet', 'lr']") + + +def wrap_model_for_pytorch( + model: Union[xgb.XGBClassifier, nn.Module], + model_type: str +) -> nn.Module: + """ + Wrap model as PyTorch module if needed. + + Args: + model: Model to wrap + model_type: Type of model ('xgb', 'tab_resnet', 'lr') + + Returns: + PyTorch module + """ + if model_type == "xgb": + return TorchModelForXGBoost(model) + else: + return model diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..245f6d2 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,16 @@ +""" +Neural network models for tabular data. +""" +from .tab_resnet import ( + TabResNet, + LogisticRegression, + ResNetBlock, + train_model, +) + +__all__ = [ + "TabResNet", + "LogisticRegression", + "ResNetBlock", + "train_model", +] diff --git a/models/tab_resnet.py b/models/tab_resnet.py new file mode 100644 index 0000000..0882b3c --- /dev/null +++ b/models/tab_resnet.py @@ -0,0 +1,232 @@ +""" +TabResNet and Logistic Regression models for tabular data. + +Models are compatible with XAI frameworks (LIME, SHAP, Captum, PnPXAI). +""" +import torch +import torch.nn as nn +import numpy as np + + +class ResNetBlock(nn.Module): + """Residual block for TabResNet.""" + + def __init__(self, in_features, out_features): + """ + Args: + in_features: Input feature dimension + out_features: Output feature dimension + """ + super(ResNetBlock, self).__init__() + self.bn = nn.BatchNorm1d(in_features) + self.fc1 = nn.Linear(in_features, out_features) + self.fc2 = nn.Linear(out_features, out_features) + self.dropout = nn.Dropout(0.2) + + def forward(self, x): + """ + Forward pass with residual connection. + + Args: + x: Input tensor of shape (batch_size, in_features) + + Returns: + Output tensor of shape (batch_size, out_features) + """ + y = torch.relu(self.fc1(self.bn(x))) + y = self.dropout(y) + y = self.fc2(y) + y = self.dropout(y) + return torch.add(x, y) + + +class TabResNet(nn.Module): + """ + Residual Network for tabular data classification. + + Architecture: + Input -> Embedding -> ResNet Blocks -> BatchNorm -> Output + """ + + def __init__(self, in_features, out_features, num_blocks=1, embedding_dim=128): + """ + Args: + in_features: Input feature dimension + out_features: Number of output classes + num_blocks: Number of residual blocks (default: 1) + embedding_dim: Embedding dimension (default: 128) + """ + super(TabResNet, self).__init__() + self.embedding = nn.Linear(in_features, embedding_dim) + self.res_blocks = [] + for i in range(num_blocks): + self.res_blocks.append(ResNetBlock(embedding_dim, embedding_dim)) + self.res_blocks = nn.ModuleList(self.res_blocks) + self.bn = nn.BatchNorm1d(embedding_dim) + self.fc = nn.Linear(embedding_dim, out_features) + + def network(self, x): + """ + Forward pass without softmax. + + Args: + x: Input tensor of shape (batch_size, in_features) + + Returns: + Logits of shape (batch_size, out_features) + """ + x = self.embedding(x) + for block in self.res_blocks: + x = block(x) + x = torch.relu(self.bn(x)) + x = self.fc(x) + return x + + def forward(self, x): + """ + Forward pass with softmax. + + Args: + x: Input tensor of shape (batch_size, in_features) + + Returns: + Probabilities of shape (batch_size, out_features) + """ + return torch.softmax(self.network(x), dim=1) + + def predict_proba(self, x): + """ + Predict class probabilities (compatible with SHAP). + + Args: + x: Input array or tensor + + Returns: + Probability array of shape (batch_size, out_features) + """ + input = x if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + return self.forward(input.float()).detach().numpy() + + def predict(self, x, argmax=False): + """ + Predict class labels or probabilities (compatible with LIME). + + Args: + x: Input array or tensor + argmax: If True, return class labels; otherwise probabilities + + Returns: + Class labels or probabilities + """ + input = torch.squeeze(x) if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + output = self.forward(input.float()).detach().numpy() + return output.argmax(axis=-1) if argmax else output + + +class LogisticRegression(nn.Module): + """ + Logistic Regression model for binary/multiclass classification. + + Simple linear model with softmax output. + """ + + def __init__(self, input_dim, output_dim): + """ + Args: + input_dim: Input feature dimension + output_dim: Number of output classes + """ + super(LogisticRegression, self).__init__() + self.linear = nn.Linear(input_dim, output_dim) + + def network(self, x): + """ + Forward pass without softmax. + + Args: + x: Input tensor of shape (batch_size, input_dim) + + Returns: + Logits of shape (batch_size, output_dim) + """ + return self.linear(x) + + def forward(self, x): + """ + Forward pass with softmax. + + Args: + x: Input tensor of shape (batch_size, input_dim) + + Returns: + Probabilities of shape (batch_size, output_dim) + """ + return torch.softmax(self.network(x), dim=1) + + def predict_proba(self, x): + """ + Predict class probabilities (compatible with SHAP). + + Args: + x: Input array or tensor + + Returns: + Probability array of shape (batch_size, output_dim) + """ + input = x if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + return self.forward(input.float()).detach().numpy() + + def predict(self, x, argmax=False): + """ + Predict class labels or probabilities (compatible with LIME). + + Args: + x: Input array or tensor + argmax: If True, return class labels; otherwise probabilities + + Returns: + Class labels or probabilities + """ + input = torch.squeeze(x) if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + output = self.forward(input.float()).detach().numpy() + return output.argmax(axis=-1) if argmax else output + + +def train_model( + X_train: torch.Tensor, + y_train: torch.Tensor, + model: nn.Module, + loss_fn: nn.Module, + optimizer: torch.optim.Optimizer, + epochs: int = 1000, + verbose: bool = True +): + """ + Train a PyTorch model. + + Args: + X_train: Training features tensor + y_train: Training labels tensor + model: PyTorch model + loss_fn: Loss function + optimizer: Optimizer + epochs: Number of training epochs + verbose: Whether to print progress + """ + model.train() + + for epoch in range(epochs): + # Forward pass + outputs = model(X_train) + loss = loss_fn(outputs, y_train) + + # Backward pass + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # Print progress + if verbose and (epoch + 1) % 100 == 0: + print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") + + model.eval() diff --git a/models/train.py b/models/train.py new file mode 100755 index 0000000..d76c13c --- /dev/null +++ b/models/train.py @@ -0,0 +1,462 @@ +#!/usr/bin/env python3 +""" +Train models on Wine Quality dataset. + +This script trains three models: +1. XGBoost Classifier +2. TabResNet (residual network for tabular data) +3. Logistic Regression (optional) + +Models are saved locally and optionally uploaded to Hugging Face Hub. +""" +import argparse +import random +import os +from pathlib import Path + +import numpy as np +import torch +import torch.nn as nn +import xgboost as xgb +import yaml + +from tab_resnet import TabResNet, LogisticRegression, train_model + +try: + from huggingface_hub import HfApi, hf_hub_download, list_repo_files + HF_AVAILABLE = True +except ImportError: + HF_AVAILABLE = False + print("Warning: huggingface_hub not installed. HF Hub features disabled.") + + +def set_seeds(seed: int = 0): + """Set random seeds for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def load_config(config_path: str = "../configs/model_config.yaml"): + """Load model configuration from YAML file.""" + with open(config_path, 'r') as f: + config = yaml.safe_load(f) + return config + + +def check_hf_model_exists(repo_id: str, filename: str) -> bool: + """ + Check if a model file exists in Hugging Face Hub. + + Args: + repo_id: Hugging Face repository ID + filename: Model filename to check + + Returns: + True if file exists, False otherwise + """ + if not HF_AVAILABLE: + return False + + try: + api = HfApi() + files = list_repo_files(repo_id) + return filename in files + except Exception as e: + print(f"Could not check HF Hub (repo may not exist): {e}") + return False + + +def upload_to_hf( + local_path: Path, + repo_id: str, + filename: str, + replace: bool = False, + token: str = None +): + """ + Upload model to Hugging Face Hub. + + Args: + local_path: Local file path + repo_id: Hugging Face repository ID + filename: Filename in the repository + replace: If True, replace existing file + token: HF API token (optional, uses HF_TOKEN env var if not provided) + """ + if not HF_AVAILABLE: + print("Warning: huggingface_hub not installed. Skipping upload.") + return + + try: + api = HfApi(token=token) + + # Check if file already exists + file_exists = check_hf_model_exists(repo_id, filename) + + if file_exists and not replace: + print(f"File {filename} already exists in {repo_id}. Skipping upload.") + print("Use --replace to overwrite existing files.") + return + + # Create repo if it doesn't exist + try: + api.create_repo(repo_id, exist_ok=True, repo_type="model") + print(f"Repository {repo_id} ready.") + except Exception as e: + print(f"Note: {e}") + + # Upload file + print(f"Uploading {filename} to {repo_id}...") + api.upload_file( + path_or_fileobj=str(local_path), + path_in_repo=filename, + repo_id=repo_id, + repo_type="model", + ) + print(f"✓ Successfully uploaded to https://huggingface.co/{repo_id}") + + except Exception as e: + print(f"Error uploading to HF Hub: {e}") + print("Make sure you have logged in: huggingface-cli login") + + +def train_xgboost(X_train, y_train, X_test, y_test, config, save_path, skip_training=False): + """ + Train XGBoost classifier. + + Args: + X_train, y_train: Training data + X_test, y_test: Test data (for evaluation) + config: XGBoost configuration + save_path: Path to save model + skip_training: If True, load existing model instead of training + + Returns: + Trained XGBoost model + """ + print("\n" + "="*50) + print("XGBoost Classifier") + print("="*50) + + if skip_training and save_path.exists(): + print("Loading existing model (skip_training=True)...") + model = xgb.XGBClassifier(**config['xgb'].get('params', {})) + model.load_model(str(save_path)) + else: + print("Training XGBoost Classifier...") + # Initialize model with config params + model = xgb.XGBClassifier(**config['xgb'].get('params', {})) + + # Train model + model.fit(X_train, y_train) + + # Save model + model.save_model(str(save_path)) + print(f"Model saved to: {save_path}") + + # Evaluate + y_pred = model.predict(X_test) + accuracy = np.mean(y_pred == y_test) + print(f"XGBoost Accuracy: {accuracy:.4f}") + + return model + + +def train_tabresnet(X_train, y_train, X_test, y_test, config, save_path, device='cpu', skip_training=False): + """ + Train TabResNet model. + + Args: + X_train, y_train: Training data + X_test, y_test: Test data (for evaluation) + config: TabResNet configuration + save_path: Path to save model + device: Device to train on ('cpu' or 'cuda') + skip_training: If True, load existing model instead of training + + Returns: + Trained TabResNet model + """ + print("\n" + "="*50) + print("TabResNet") + print("="*50) + + # Model configuration + input_dim = X_train.shape[1] + output_dim = config['common']['output_dim'] + num_blocks = config['tab_resnet']['num_blocks'] + + # Initialize model + model = TabResNet(input_dim, output_dim, num_blocks=num_blocks).to(device) + + if skip_training and save_path.exists(): + print("Loading existing model (skip_training=True)...") + model.load_state_dict(torch.load(save_path)) + model.eval() + else: + print("Training TabResNet...") + # Convert to tensors + X_train_t = torch.from_numpy(X_train).float().to(device) + y_train_t = torch.from_numpy(y_train).long().to(device) + + # Training configuration + train_config = config['tab_resnet']['training'] + loss_fn = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD( + model.parameters(), + lr=train_config['learning_rate'], + weight_decay=train_config['weight_decay'] + ) + + # Train model + train_model( + X_train_t, y_train_t, model, loss_fn, optimizer, + epochs=train_config['epochs'], + verbose=config['common'].get('verbose', True) + ) + + # Save model + torch.save(model.state_dict(), save_path) + print(f"Model saved to: {save_path}") + + # Evaluate + model.eval() + X_test_t = torch.from_numpy(X_test).float().to(device) + y_test_t = torch.from_numpy(y_test).long().to(device) + with torch.no_grad(): + y_pred = model(X_test_t).argmax(dim=1).cpu().numpy() + accuracy = np.mean(y_pred == y_test) + print(f"TabResNet Accuracy: {accuracy:.4f}") + + return model + + +def train_logistic_regression(X_train, y_train, X_test, y_test, config, save_path, device='cpu', skip_training=False): + """ + Train Logistic Regression model. + + Args: + X_train, y_train: Training data + X_test, y_test: Test data (for evaluation) + config: Logistic Regression configuration + save_path: Path to save model + device: Device to train on ('cpu' or 'cuda') + skip_training: If True, load existing model instead of training + + Returns: + Trained Logistic Regression model + """ + print("\n" + "="*50) + print("Logistic Regression") + print("="*50) + + # Model configuration + input_dim = X_train.shape[1] + output_dim = config['common']['output_dim'] + + # Initialize model + model = LogisticRegression(input_dim, output_dim).to(device) + + if skip_training and save_path.exists(): + print("Loading existing model (skip_training=True)...") + model.load_state_dict(torch.load(save_path)) + model.eval() + else: + print("Training Logistic Regression...") + # Convert to tensors + X_train_t = torch.from_numpy(X_train).float().to(device) + y_train_t = torch.from_numpy(y_train).long().to(device) + + # Training configuration + train_config = config['lr']['training'] + loss_fn = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD( + model.parameters(), + lr=train_config['learning_rate'], + weight_decay=train_config['weight_decay'] + ) + + # Train model + train_model( + X_train_t, y_train_t, model, loss_fn, optimizer, + epochs=train_config['epochs'], + verbose=config['common'].get('verbose', True) + ) + + # Save model + torch.save(model.state_dict(), save_path) + print(f"Model saved to: {save_path}") + + # Evaluate + model.eval() + X_test_t = torch.from_numpy(X_test).float().to(device) + y_test_t = torch.from_numpy(y_test).long().to(device) + with torch.no_grad(): + y_pred = model(X_test_t).argmax(dim=1).cpu().numpy() + accuracy = np.mean(y_pred == y_test) + print(f"Logistic Regression Accuracy: {accuracy:.4f}") + + return model + + +def main(): + parser = argparse.ArgumentParser( + description="Train models on Wine Quality dataset and optionally upload to HF Hub" + ) + parser.add_argument( + "--data-dir", + type=str, + default="../data/Wine Quality", + help="Directory containing preprocessed data" + ) + parser.add_argument( + "--config", + type=str, + default="../configs/model_config.yaml", + help="Path to model configuration file" + ) + parser.add_argument( + "--models", + type=str, + nargs='+', + default=["xgb", "tab_resnet"], + choices=["xgb", "tab_resnet", "lr", "all"], + help="Models to train (default: xgb tab_resnet)" + ) + parser.add_argument( + "--device", + type=str, + default="cpu", + choices=["cpu", "cuda"], + help="Device to use for training (default: cpu)" + ) + parser.add_argument( + "--seed", + type=int, + default=None, + help="Random seed (default: from config)" + ) + parser.add_argument( + "--hf-repo", + type=str, + default=None, + help="Hugging Face repository ID (e.g., username/wine-quality-models)" + ) + parser.add_argument( + "--replace", + action="store_true", + help="Replace models in HF Hub if they exist. If set and models exist in HF, skip training and use HF models." + ) + parser.add_argument( + "--hf-token", + type=str, + default=None, + help="Hugging Face API token (optional, uses HF_TOKEN env var if not provided)" + ) + + args = parser.parse_args() + + # Load configuration + config = load_config(args.config) + + # Set random seed + seed = args.seed if args.seed is not None else config['seeds']['model_training'] + set_seeds(seed) + print(f"Random seed set to: {seed}") + + # Load data + data_path = Path(args.data_dir) + print(f"\nLoading data from: {data_path}") + + X_train = np.load(data_path / "X_train.npy") + y_train = np.load(data_path / "y_train.npy") + X_test = np.load(data_path / "X_test.npy") + y_test = np.load(data_path / "y_test.npy") + + print(f" Train: {X_train.shape[0]} samples, {X_train.shape[1]} features") + print(f" Test: {X_test.shape[0]} samples") + + # Determine models to train + models_to_train = args.models + if "all" in models_to_train: + models_to_train = ["xgb", "tab_resnet", "lr"] + + # Check HF Hub if --replace is set + skip_training = False + if args.replace and args.hf_repo: + print(f"\n--replace flag set. Checking if models exist in {args.hf_repo}...") + all_exist = True + for model_name in models_to_train: + filename = config[model_name]['filename'] + exists = check_hf_model_exists(args.hf_repo, filename) + if exists: + print(f" ✓ {filename} exists in HF Hub") + else: + print(f" ✗ {filename} not found in HF Hub") + all_exist = False + + if all_exist: + print("\nAll models exist in HF Hub. Skipping training.") + skip_training = True + else: + print("\nSome models missing in HF Hub. Will train locally.") + + # Train models + trained_models = {} + + if "xgb" in models_to_train: + save_path = data_path / config['xgb']['filename'] + trained_models['xgb'] = train_xgboost( + X_train, y_train, X_test, y_test, config, save_path, + skip_training=skip_training + ) + + if "tab_resnet" in models_to_train: + save_path = data_path / config['tab_resnet']['filename'] + trained_models['tab_resnet'] = train_tabresnet( + X_train, y_train, X_test, y_test, config, save_path, + device=args.device, skip_training=skip_training + ) + + if "lr" in models_to_train: + save_path = data_path / config['lr']['filename'] + trained_models['lr'] = train_logistic_regression( + X_train, y_train, X_test, y_test, config, save_path, + device=args.device, skip_training=skip_training + ) + + print("\n" + "="*50) + print("Training/Loading completed!") + print("="*50) + print(f"\nProcessed models: {list(trained_models.keys())}") + print(f"Models saved in: {data_path}") + + # Upload to HF Hub if requested + if args.hf_repo: + print("\n" + "="*50) + print("Uploading to Hugging Face Hub") + print("="*50) + + token = args.hf_token or os.getenv('HF_TOKEN') + + for model_name in models_to_train: + save_path = data_path / config[model_name]['filename'] + filename = config[model_name]['filename'] + + upload_to_hf( + local_path=save_path, + repo_id=args.hf_repo, + filename=filename, + replace=args.replace, + token=token + ) + + print(f"\nAll models processed for HF Hub: {args.hf_repo}") + + +if __name__ == "__main__": + main() From 7ef1259413dfbc30c7dbce2dbc4c0375cf65bb77 Mon Sep 17 00:00:00 2001 From: shiningstone23 Date: Sat, 22 Nov 2025 13:11:54 +0900 Subject: [PATCH 15/20] feat/wine_quality --- .gitignore | 4 +- Dockerfile.wine_quality | 6 +- README_wine_quality.md | 280 ++++++++ docker-compose.wine_quality.yml | 65 ++ experiments/models/tab_resnet.py | 79 +++ experiments/scripts/analyze_wine_quality.py | 636 ++++++++++++++++++ experiments/scripts/wine_quality/README.md | 71 ++ .../wine_quality/analyze_wine_quality.py | 4 + .../wine_quality/explain_wine_quality.py | 72 +- .../wine_quality/generate_latex_table.py | 235 +++++++ .../wine_quality/script_utils/model_utils.py | 4 +- .../scripts/wine_quality/visualize_results.py | 296 ++++++++ models/__init__.py | 16 - models/tab_resnet.py | 232 ------- models/train.py | 462 ------------- 15 files changed, 1742 insertions(+), 720 deletions(-) create mode 100644 README_wine_quality.md create mode 100644 docker-compose.wine_quality.yml create mode 100644 experiments/models/tab_resnet.py create mode 100644 experiments/scripts/analyze_wine_quality.py create mode 100644 experiments/scripts/wine_quality/generate_latex_table.py create mode 100644 experiments/scripts/wine_quality/visualize_results.py delete mode 100644 models/__init__.py delete mode 100644 models/tab_resnet.py delete mode 100755 models/train.py diff --git a/.gitignore b/.gitignore index b6578e8..48f2787 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,6 @@ MANIFEST **/*.mp4 results -benchmark \ No newline at end of file +benchmark +logs +etc \ No newline at end of file diff --git a/Dockerfile.wine_quality b/Dockerfile.wine_quality index 99f1ae9..fbedb76 100644 --- a/Dockerfile.wine_quality +++ b/Dockerfile.wine_quality @@ -22,7 +22,8 @@ RUN pip install --no-cache-dir \ shap==0.44.0 \ lime==0.2.0.1 \ pyyaml==6.0 \ - tqdm==4.66.0 + tqdm==4.66.0 \ + ucimlrepo # Install XAI frameworks from GitHub # Pin to specific versions for reproducibility (verified on 2025-11-13) @@ -54,7 +55,8 @@ RUN python -m venv /opt/autoxai_venv && \ cvxpy \ pyyaml==6.0 \ tqdm==4.66.0 \ - bayesian-optimization && \ + bayesian-optimization \ + ucimlrepo && \ /opt/autoxai_venv/bin/pip install --no-cache-dir git+https://github.com/OpenXAIProject/pnpxai.git@exp/tab # Clean up pip cache to reduce image size diff --git a/README_wine_quality.md b/README_wine_quality.md new file mode 100644 index 0000000..603bbe4 --- /dev/null +++ b/README_wine_quality.md @@ -0,0 +1,280 @@ +# Wine Quality XAI Benchmark + +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +This repository provides a comprehensive benchmark for comparing explainability frameworks (PnPXAI, Captum, OmniXAI, OpenXAI, and AutoXAI) on the Wine Quality dataset using XGBoost and TabResNet models. + +## Wine Quality Dataset + +The **Wine Quality Dataset** is a widely-used dataset for classification tasks in machine learning. + +### Dataset Information + +- **Source**: UCI Machine Learning Repository +- **Reference**: P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. "Modeling wine preferences by data mining from physicochemical properties." Decision Support Systems, Elsevier, 47(4):547-553, 2009. +- **URL**: https://archive.ics.uci.edu/ml/datasets/wine+quality +- **Task**: Binary classification (good vs. bad wine quality) +- **Samples**: ~6,497 wine samples (white and red wine combined) + +## Quick Start with Docker Compose + +### Prerequisites + +- Docker +- Docker Compose +- NVIDIA GPU with CUDA support +- nvidia-docker runtime + +### Running the Experiment + +Simply run: + +```bash +docker compose -f docker-compose.wine_quality.yml up +``` + +This single command will: + +1. ✅ Start the Docker container with all dependencies +2. ✅ Load the Wine Quality dataset and pre-trained models +3. ✅ Run explanations across all framework/model/explainer combinations +4. ✅ Evaluate explanations using Faithfulness, Complexity, and Composite metrics +5. ✅ Generate a LaTeX table in markdown format +6. ✅ Save all results to `results/Wine Quality/` +7. ✅ Create an execution log at `results/Wine Quality/experiment.log` + +### Expected Output + +The experiment will generate: + +- **Individual explanations**: `results/Wine Quality/{model}/{framework}/{explainer}/` + - `explanations.npy` - Attribution values for each sample + - `abpc.npy` - Faithfulness (AbPC) metric scores + - `cmpx.npy` - Complexity metric scores + - `cmpd.npy` - Composite metric scores + - `metadata.json` - Experiment configuration and summary + +- **Summary table**: `results/Wine Quality/experiment_result.md` + - LaTeX table comparing all frameworks and explainers + - Format matching academic paper standards + +- **Execution log**: `results/Wine Quality/experiment.log` + - Complete log of the experiment execution + - Useful for debugging and tracking progress + +### Viewing Results + +After the experiment completes, you can view the generated LaTeX table: + +```bash +cat results/Wine\ Quality/experiment_result.md +``` + +Example output format: + +```latex +\begin{table}[!th] + \caption{\textbf{Comparison of explanation performance on Wine Quality dataset.} + Evaluation of XGBoost and TabResNet models across three key metrics: + Faithfulness (higher is better $\uparrow$), Complexity (lower is better $\downarrow$), + and a Composite [Faithfulness, Simplicity] score ($\uparrow$). + The table compares PnP-XAI against Captum, AutoXAI, OmniXAI, and OpenXAI. + Bold values indicate the best score per row; dashes (-) denote unsupported combinations.} + \label{tab:wine_performance} + \centering + \resizebox{\textwidth}{!}{% + \begin{tabular}{lll||cccc|c} + \toprule + \textbf{Model} & \textbf{Metric} & \textbf{Explainer} & \textbf{Captum} & \textbf{OmniXAI} & \textbf{AutoXAI} & \textbf{OpenXAI} & \textbf{PnPXAI} \\ + \midrule + XGBoost & \multirow[t]{2}{*}{Faithfulness ($\uparrow$)} + & KernelSHAP & 0.1189 & 0.1079 & 0.0117 & - & \textbf{0.1205} \\ + & & LIME & \textbf{0.1176} & 0.0448 & 0.0025 & - & 0.1126 \\ + ... + \end{tabular}% + } +\end{table} +``` + +## Experiment Configuration + +The experiment tests the following combinations: + +### Frameworks Tested + +1. **PnPXAI** - Plug-and-Play XAI framework with hyperparameter optimization +2. **Captum** - PyTorch-based explainability library +3. **OmniXAI** - Unified framework for explainable AI (XGBoost only) +4. **OpenXAI** - Open-source explainability framework (TabResNet only) +5. **AutoXAI** - Automated XAI with hyperparameter tuning + +### Models Used + +- **XGBoost** - Gradient boosting classifier +- **TabResNet** - Residual neural network for tabular data + +### Explanation Methods + +- **LIME** - Local Interpretable Model-agnostic Explanations +- **SHAP (KernelSHAP)** - SHapley Additive exPlanations +- **Gradient** - Vanilla gradient (saliency) +- **Gradient × Input** - Input times gradient +- **Integrated Gradients** - Path integral of gradients +- **SmoothGrad** - Averaged gradients with noise +- **VarGrad** - Variance-based gradients (PnPXAI only) +- **LRP** - Layer-wise Relevance Propagation + +### Evaluation Metrics + +- **Faithfulness (AbPC)**: Measures how well the explanation reflects the model's actual decision-making process +- **Complexity**: Measures the simplicity/sparsity of the explanation +- **Composite Score**: Weighted combination (0.7 × Faithfulness - 0.3 × Complexity) + +## Manual Execution + +If you prefer to run the experiment manually without Docker: + +```bash +python experiments/scripts/analyze_wine_quality.py \ + --n-samples 25 \ + --seed 42 \ + --verbose \ + --data-dir data/Wine\ Quality \ + --results-dir results/Wine\ Quality +``` + +### Parameters + +- `--n-samples`: Number of samples for LIME/SHAP (default: 25) +- `--seed`: Random seed for reproducibility (default: 42) +- `--verbose`: Enable detailed logging +- `--data-dir`: Path to data directory (default: data/Wine Quality) +- `--config-dir`: Path to config directory (default: configs/tabular) +- `--results-dir`: Path to results directory (default: results/Wine Quality) + +## Repository Structure + +``` +pnpxai-experiments/ +├── experiments/ +│ ├── scripts/ +│ │ ├── analyze_wine_quality.py # Main experiment runner +│ │ ├── wine_quality/ # Original modular scripts (deprecated) +│ │ └── lib/ # External libraries +│ │ └── AutoXAI/ # AutoXAI framework implementation +│ └── models/ +│ └── tab_resnet.py # TabResNet model definition +├── data/ +│ └── Wine Quality/ +│ ├── X_train.npy # Training features +│ ├── X_test.npy # Test features +│ ├── y_train.npy # Training labels +│ ├── y_test.npy # Test labels +│ ├── feature_metadata.pkl # Feature preprocessing info +│ ├── raw_data.csv # Original raw data +│ ├── xgb_model.json # Pre-trained XGBoost model +│ └── resnet_model.pth # Pre-trained TabResNet model +├── configs/ +│ └── tabular/ +│ ├── explainer_config.yaml # Explainer configurations +│ └── optuna_config.yaml # Optimization configurations +├── results/ +│ └── Wine Quality/ +│ ├── xgb/ # XGBoost model results +│ │ ├── pnpxai/ +│ │ ├── captum/ +│ │ ├── omnixai/ +│ │ └── autoxai/ +│ ├── tab_resnet/ # TabResNet model results +│ │ ├── pnpxai/ +│ │ ├── captum/ +│ │ ├── openxai/ +│ │ └── autoxai/ +│ ├── experiment_result.md # Generated LaTeX table +│ └── experiment.log # Execution log +├── docker-compose.wine_quality.yml # Docker Compose configuration +├── Dockerfile.wine_quality # Docker image definition +├── README.md # Main project README +└── README_wine_quality.md # This file +``` + +## Citation + +If you use this benchmark in your research, please cite: + +```bibtex +@misc{wine_quality_xai_benchmark, + title={Wine Quality XAI Benchmark: Comparing Explainability Frameworks}, + author={Your Name}, + year={2025}, + howpublished={\url{https://github.com/yourusername/pnpxai-experiments}} +} +``` + +### Original Dataset Citation + +```bibtex +@article{cortez2009modeling, + title={Modeling wine preferences by data mining from physicochemical properties}, + author={Cortez, Paulo and Cerdeira, Antonio and Almeida, Fernando and Matos, Telmo and Reis, Jose}, + journal={Decision Support Systems}, + volume={47}, + number={4}, + pages={547--553}, + year={2009}, + publisher={Elsevier} +} +``` + +## Requirements + +See [docker-compose.wine_quality.yml](docker-compose.wine_quality.yml) for the complete list of dependencies and environment setup. + +Key dependencies: +- Python 3.8+ +- PyTorch +- PnPXAI +- Captum +- OmniXAI +- OpenXAI +- XGBoost +- scikit-learn +- pandas, numpy +- CUDA-enabled GPU (recommended) + +## License + +This project is licensed under the MIT License. + +## Troubleshooting + +### Docker Issues + +If you encounter Docker-related issues: + +```bash +# Rebuild the Docker image +docker compose -f docker-compose.wine_quality.yml build --no-cache + +# Check GPU availability +docker run --rm --gpus all nvidia/cuda:11.8.0-base-ubuntu22.04 nvidia-smi +``` + +### CUDA/GPU Issues + +Ensure you have: +- NVIDIA drivers installed +- nvidia-docker2 installed +- Docker daemon configured to use nvidia runtime + +### Permission Issues + +If you encounter permission errors with the results directory: + +```bash +chmod -R 777 results/ +``` + +## Contact + +For questions or issues, please open an issue on GitHub or contact the maintainers. diff --git a/docker-compose.wine_quality.yml b/docker-compose.wine_quality.yml new file mode 100644 index 0000000..6df85d9 --- /dev/null +++ b/docker-compose.wine_quality.yml @@ -0,0 +1,65 @@ +version: '3.8' + +services: + wine_quality: + image: pnpxai_wine_quality:latest + container_name: pnpxai-wine-quality + runtime: nvidia + + # GPU configuration + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + + # Shared memory size for PyTorch DataLoader + shm_size: '8gb' + + # Volume mounts - mount all necessary directories + volumes: + # Mount entire project for code access + - ./:/root/pnpxai-experiments + + # Persistent results directory + - ./results:/root/pnpxai-experiments/results + + # Data directory with Wine Quality dataset + - ./data:/root/pnpxai-experiments/data + + # Config files + - ./configs:/root/pnpxai-experiments/configs + + # Experiments code + - ./experiments:/root/pnpxai-experiments/experiments + + # Working directory + working_dir: /root/pnpxai-experiments + + # Environment variables + environment: + - PYTHONUNBUFFERED=1 + - CUDA_VISIBLE_DEVICES=0 + + # Keep container running + stdin_open: true + tty: true + + # Run experiment automatically with logging + command: > + bash -c " + echo '========================================' && + echo 'Wine Quality XAI Experiment Starting...' && + echo '========================================' && + echo '' && + python experiments/scripts/analyze_wine_quality.py --n-samples 25 --seed 42 --verbose 2>&1 | tee results/Wine\ Quality/experiment.log && + echo '' && + echo '========================================' && + echo 'Experiment completed successfully!' && + echo 'Results saved to: results/Wine Quality/' && + echo 'Log saved to: results/Wine Quality/experiment.log' && + echo '========================================' && + tail -f /dev/null + " diff --git a/experiments/models/tab_resnet.py b/experiments/models/tab_resnet.py new file mode 100644 index 0000000..d1f2a5b --- /dev/null +++ b/experiments/models/tab_resnet.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn +import numpy as np + +class ResNetBlock(nn.Module): + def __init__(self, in_features, out_features): + super(ResNetBlock, self).__init__() + self.bn = nn.BatchNorm1d(in_features) + self.fc1 = nn.Linear(in_features, out_features) + self.fc2 = nn.Linear(out_features, out_features) + self.dropout = nn.Dropout(0.2) + + def forward(self, x): + # if x.ndim >= 3: + # x = x.squeeze(1) + y = torch.relu(self.fc1(self.bn(x))) + y = self.dropout(y) + y = self.fc2(y) + y = self.dropout(y) + return torch.add(x, y) + + +class TabResNet(nn.Module): + def __init__(self, in_features, out_features, num_blocks=1, embedding_dim=128): + super(TabResNet, self).__init__() + self.embedding = nn.Linear(in_features, embedding_dim) + self.res_blocks = [] + for i in range(num_blocks): + self.res_blocks.append(ResNetBlock(embedding_dim, embedding_dim)) + self.res_blocks = nn.ModuleList(self.res_blocks) + self.bn = nn.BatchNorm1d(embedding_dim) + self.fc = nn.Linear(embedding_dim, out_features) + + def network(self, x): + x = self.embedding(x) + for block in self.res_blocks: + x = block(x) + x = torch.relu(self.bn(x)) + x = self.fc(x) + return x + + def forward(self, x): + return torch.softmax(self.network(x), dim=1) + + def predict_proba(self, x): + # Currently used by SHAP + input = x if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + return self.forward(input.float()).detach().numpy() + + def predict(self, x, argmax=False): + # Currently used by LIME + input = torch.squeeze(x) if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + output = self.forward(input.float()).detach().numpy() + return output.argmax(axis=-1) if argmax else output + + + +class LogisticRegression(torch.nn.Module): + def __init__(self, input_dim, output_dim): + super(LogisticRegression, self).__init__() + self.linear = torch.nn.Linear(input_dim, output_dim) + + def network(self, x): + return self.linear(x) + + def forward(self, x): + return torch.softmax(self.network(x), dim=1) + + def predict_proba(self, x): + # Currently used by SHAP + input = x if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + return self.forward(input.float()).detach().numpy() + + def predict(self, x, argmax=False): + # Currently used by LIME + input = torch.squeeze(x) if torch.is_tensor(x) else torch.from_numpy(np.array(x)) + output = self.forward(input.float()).detach().numpy() + return output.argmax(axis=-1) if argmax else output + diff --git a/experiments/scripts/analyze_wine_quality.py b/experiments/scripts/analyze_wine_quality.py new file mode 100644 index 0000000..690addd --- /dev/null +++ b/experiments/scripts/analyze_wine_quality.py @@ -0,0 +1,636 @@ +#!/usr/bin/env python3 +""" +Wine Quality XAI Experiment Runner + +This script runs a complete Wine Quality explanation experiment: +1. Loads data and models +2. Generates explanations using multiple frameworks (PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI) +3. Evaluates explanations +4. Generates a LaTeX table in markdown format +5. Saves results to results/Wine Quality/ + +Usage: + python run_experiment.py [--n-samples N] [--seed SEED] [--verbose] + +Author: Generated for Wine Quality XAI benchmarking +""" + +import os +import sys +import json +import time +import pickle +import random +import argparse +import logging +import warnings +import subprocess +from pathlib import Path +from datetime import datetime +from typing import Dict, Any, Tuple, List, Optional, Union +from collections import defaultdict + +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +import xgboost as xgb +import yaml +from tqdm import tqdm + +# Suppress warnings +warnings.filterwarnings('ignore') + +# ============================================================================ +# Data Loading Utilities +# ============================================================================ + +def load_wine_quality(data_dir: str = "data/Wine Quality") -> Tuple: + """Load Wine Quality dataset and feature metadata.""" + data_path = Path(data_dir) + + X_train = np.load(data_path / "X_train.npy") + X_test = np.load(data_path / "X_test.npy") + y_train = np.load(data_path / "y_train.npy") + y_test = np.load(data_path / "y_test.npy") + + with open(data_path / "feature_metadata.pkl", "rb") as f: + feature_metadata = pickle.load(f) + + raw_data = pd.read_csv(data_path / "raw_data.csv") + + return X_train, X_test, y_train, y_test, feature_metadata, raw_data + + +def transform(X: pd.DataFrame, feature_metadata: Dict[str, Any]) -> np.ndarray: + """Transform raw data using feature metadata encoders.""" + input_data = [] + for k, v in feature_metadata.items(): + if np.isin('missing', X[[k]].values): + X[[k]] = X[[k]].replace("missing", v['encoder'].categories_[0][-1]) + preprocessed = v['encoder'].transform(X[[k]].values) + if v['type'] == 'categorical': + preprocessed = preprocessed.toarray() + input_data.append(preprocessed) + + input_array = np.concatenate(input_data, axis=1) + return input_array + + +def invert_transform(input_array: np.ndarray, feature_metadata: Dict[str, Any]) -> pd.DataFrame: + """Invert transformed data back to original feature space.""" + inverted_data = {} + + for col, meta in feature_metadata.items(): + if meta['type'] == 'categorical': + start_idx, end_idx = meta['index'][0], meta['index'][-1] + 1 + cat_data = input_array[:, start_idx:end_idx] + inverted_col = meta['encoder'].inverse_transform(cat_data) + inverted_data[col] = inverted_col.flatten() + else: + idx = meta['index'] + num_data = input_array[:, idx].reshape(-1, 1) + inverted_col = meta['encoder'].inverse_transform(num_data) + inverted_data[col] = inverted_col.flatten() + + return pd.DataFrame(inverted_data) + + +def find_idx(a: list, b: list) -> list: + """Find permutation index where a[idx] = b.""" + from collections import defaultdict, deque + + if sorted(a) != sorted(b): + return None + + pos_map = defaultdict(deque) + for i, val in enumerate(a): + pos_map[val].append(i) + + idx = [] + for val in b: + idx.append(pos_map[val].popleft()) + + return idx + + +# ============================================================================ +# Model Loading Utilities +# ============================================================================ + +class TorchModelForXGBoost(nn.Module): + """PyTorch wrapper for XGBoost models.""" + + def __init__(self, xgb_model: xgb.XGBClassifier): + super().__init__() + self.xgb_model = xgb_model + self._dummy_layer = nn.Linear(1, 1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim >= 3: + x = x.squeeze(0) + + if x.requires_grad: + out = self.xgb_model.predict_proba(x.detach().cpu().numpy()) + else: + out = self.xgb_model.predict_proba(x.cpu().numpy()) + + return torch.from_numpy(out) + + +def load_model(model_type: str, model_path: Union[str, Path], input_dim: int = None, + output_dim: int = 2, **kwargs) -> Union[xgb.XGBClassifier, nn.Module]: + """Load a trained model.""" + model_path = Path(model_path) + + if model_type == "xgb": + model = xgb.XGBClassifier() + model.load_model(str(model_path)) + return model + + elif model_type == "tab_resnet": + if input_dim is None: + raise ValueError("input_dim is required for tab_resnet model") + + from experiments.models.tab_resnet import TabResNet + + num_blocks = kwargs.get('num_blocks', 1) + model = TabResNet(input_dim, output_dim, num_blocks=num_blocks) + model.load_state_dict(torch.load(model_path)) + model.eval() + return model + + else: + raise ValueError(f"Unknown model type: {model_type}") + + +def wrap_model_for_pytorch(model: Union[xgb.XGBClassifier, nn.Module], + model_type: str) -> nn.Module: + """Wrap model as PyTorch module if needed.""" + if model_type == "xgb": + return TorchModelForXGBoost(model) + else: + return model + + +# ============================================================================ +# Explainer Validation +# ============================================================================ + +FRAMEWORK_MODEL_SUPPORT = { + "pnpxai": ["xgb", "tab_resnet"], + "captum": ["xgb", "tab_resnet"], + "omnixai": ["xgb"], + "openxai": ["tab_resnet"], + "autoxai": ["xgb", "tab_resnet"], +} + +FRAMEWORK_EXPLAINER_SUPPORT = { + "pnpxai": ["lime", "shap", "ig", "grad", "sg", "itg", "vg", "lrp"], + "captum": ["lime", "shap", "ig", "grad", "sg", "itg", "lrp"], + "omnixai": ["lime", "shap"], + "openxai": ["lime", "shap", "ig", "grad", "sg", "itg"], + "autoxai": ["lime", "shap"], +} + + +def validate_explainer_args(framework: str, model: str, explainer: str) -> None: + """Validate explainer arguments.""" + if framework not in FRAMEWORK_MODEL_SUPPORT: + raise ValueError(f"Invalid framework: {framework}") + + if model not in FRAMEWORK_MODEL_SUPPORT[framework]: + raise ValueError(f"Framework {framework} does not support model {model}") + + if explainer not in FRAMEWORK_EXPLAINER_SUPPORT[framework]: + raise ValueError(f"Framework {framework} does not support explainer {explainer}") + + +# ============================================================================ +# Explanation Generation Functions +# ============================================================================ + +def set_seeds(seed: int = 42): + """Set random seeds for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + + +def run_single_explanation(framework: str, model_name: str, explainer: str, + model: nn.Module, X_test: np.ndarray, y_test: np.ndarray, + X_train: np.ndarray, feature_metadata: Dict, raw_data: pd.DataFrame, + config: Dict, logger: logging.Logger, + n_samples: int = 25) -> Tuple[np.ndarray, Dict]: + """Run a single explanation and evaluation.""" + logger.info(f"Generating {framework}/{model_name}/{explainer} explanations...") + + # Import explain_wine_quality module + sys.path.insert(0, str(Path(__file__).parent / "experiments" / "scripts" / "wine_quality")) + from explain_wine_quality import ( + explain_with_pnpxai, explain_with_captum, explain_with_omnixai, + explain_with_openxai, explain_with_autoxai, evaluate_explanations + ) + + # Generate explanations based on framework + if framework == "pnpxai": + explanations = explain_with_pnpxai( + model, X_test, y_test, explainer, config, logger, + batch_size=32, n_samples=n_samples, n_steps=50, model_type=model_name + ) + elif framework == "captum": + explanations = explain_with_captum( + model, X_test, y_test, explainer, config, logger, n_samples + ) + elif framework == "omnixai": + explanations = explain_with_omnixai( + model, X_test, y_test, explainer, config, logger, + feature_metadata, raw_data, n_samples + ) + elif framework == "openxai": + explanations = explain_with_openxai( + model, X_test, y_test, X_train, explainer, config, logger, + feature_metadata, batch_size=32, n_samples=n_samples + ) + elif framework == "autoxai": + explanations = explain_with_autoxai( + model, X_test, y_test, explainer, config, logger, + raw_data, batch_size=32, n_samples=n_samples + ) + else: + raise ValueError(f"Unknown framework: {framework}") + + # Evaluate explanations + metrics = evaluate_explanations(explanations, model, X_test, y_test, logger, batch_size=32) + + return explanations, metrics + + +# ============================================================================ +# Experiment Runner +# ============================================================================ + +EXPERIMENTS = { + "pnpxai": { + "xgb": ["lime", "shap"], + "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg", "vg", "lrp"], + }, + "captum": { + "xgb": ["lime", "shap"], + "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg", "lrp"], + }, + "omnixai": { + "xgb": ["lime", "shap"], + }, + "openxai": { + "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg"], + }, + "autoxai": { + "xgb": ["lime", "shap"], + "tab_resnet": ["lime", "shap"], + }, +} + + +def run_all_experiments(data_dir: str = "data/Wine Quality", + config_dir: str = "configs/tabular", + results_dir: str = "results/Wine Quality", + n_samples: int = 25, + seed: int = 42, + verbose: bool = False): + """Run all Wine Quality experiments and generate results table.""" + + # Setup logging + level = logging.INFO if verbose else logging.WARNING + logging.basicConfig(level=level, format='%(asctime)s - %(levelname)s - %(message)s') + logger = logging.getLogger(__name__) + + logger.info("="*70) + logger.info("Wine Quality XAI Experiment Runner") + logger.info("="*70) + + # Set seeds + set_seeds(seed) + logger.info(f"Random seed: {seed}") + + # Load data + logger.info(f"Loading data from: {data_dir}") + X_train, X_test, y_train, y_test, feature_metadata, raw_data = load_wine_quality(data_dir) + logger.info(f"Data loaded: Train={len(X_train)}, Test={len(X_test)}, Features={X_test.shape[1]}") + + # Load configs + config_path = Path(config_dir) + with open(config_path / "explainer_config.yaml", 'r') as f: + explainer_config = yaml.safe_load(f) + with open(config_path / "optuna_config.yaml", 'r') as f: + optuna_config = yaml.safe_load(f) + + config = {'explainer': explainer_config, 'optuna': optuna_config} + + # Load models + models = {} + logger.info("Loading models...") + + xgb_model = load_model("xgb", Path(data_dir) / "xgb_model.json") + models["xgb"] = wrap_model_for_pytorch(xgb_model, "xgb") + + resnet_model = load_model("tab_resnet", Path(data_dir) / "resnet_model.pth", + input_dim=X_train.shape[1], output_dim=2, num_blocks=1) + models["tab_resnet"] = resnet_model + + logger.info("Models loaded successfully") + + # Run all experiments + total_experiments = sum(len(explainers) for fw_models in EXPERIMENTS.values() + for explainers in fw_models.values()) + logger.info(f"Total experiments to run: {total_experiments}") + + completed = 0 + for framework, fw_models in EXPERIMENTS.items(): + for model_name, explainers in fw_models.items(): + for explainer in explainers: + completed += 1 + logger.info(f"\n[{completed}/{total_experiments}] {framework}/{model_name}/{explainer}") + + try: + # Validate combination + validate_explainer_args(framework, model_name, explainer) + + # Reset seeds for each experiment + set_seeds(seed) + + # Run explanation + model = models[model_name] + explanations, metrics = run_single_explanation( + framework, model_name, explainer, model, + X_test, y_test, X_train, feature_metadata, raw_data, + config, logger, n_samples + ) + + # Save results + output_dir = Path(results_dir) / model_name / framework / explainer + output_dir.mkdir(parents=True, exist_ok=True) + + np.save(output_dir / "explanations.npy", explanations) + np.save(output_dir / "abpc.npy", metrics['abpc']) + np.save(output_dir / "cmpx.npy", metrics['cmpx']) + np.save(output_dir / "cmpd.npy", metrics['cmpd']) + + logger.info(f"✓ Saved to {output_dir}") + + except Exception as e: + logger.error(f"✗ Failed: {e}") + if verbose: + import traceback + traceback.print_exc() + continue + + logger.info("\n" + "="*70) + logger.info("All experiments completed!") + logger.info("="*70) + + +# ============================================================================ +# LaTeX Table Generation +# ============================================================================ + +def collect_results(results_dir="results/Wine Quality"): + """Collect all experiment results.""" + results_path = Path(results_dir) + data = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) + + for root, dirs, files in os.walk(results_path): + if "explanations.npy" in files: + parts = Path(root).relative_to(results_path).parts + if len(parts) != 3: + continue + + model, framework, explainer = parts + + try: + abpc = np.load(os.path.join(root, "abpc.npy")).mean() + cmpx = np.load(os.path.join(root, "cmpx.npy")).mean() + cmpd = np.load(os.path.join(root, "cmpd.npy")).mean() + + data[model][framework][explainer] = { + 'faithfulness': abpc, + 'complexity': cmpx, + 'composite': cmpd + } + except Exception as e: + print(f"Error loading {root}: {e}") + continue + + return data + + +def format_value(value, best_value, is_complexity=False): + """Format value with bold if it's the best.""" + if value is None: + return "-" + + is_best = abs(value - best_value) < 1e-6 if best_value is not None else False + formatted = f"{value:.4f}" + return f"\\textbf{{{formatted}}}" if is_best else formatted + + +def get_best_value(values, is_complexity=False): + """Get the best value from a list.""" + valid_values = [v for v in values if v is not None] + if not valid_values: + return None + + return min(valid_values) if is_complexity else max(valid_values) + + +def generate_latex_table(data): + """Generate LaTeX table matching the original format.""" + + EXPLAINER_MAP = { + "shap": "KernelSHAP", + "lime": "LIME", + "grad": "Gradient", + "itg": "Grad.$\\times$Input", + "ig": "Integrated Gradients", + "sg": "SmoothGrad", + "vg": "VarGrad", + "lrp": "LRP" + } + + MODEL_MAP = { + "xgb": "XGBoost", + "tab_resnet": "ResNet" + } + + FRAMEWORK_ORDER = ["captum", "omnixai", "autoxai", "openxai", "pnpxai"] + + XGB_EXPLAINERS = ["shap", "lime"] + RESNET_EXPLAINERS = ["shap", "lime", "grad", "itg", "ig", "sg", "vg", "lrp"] + + lines = [] + lines.append("\\begin{table}[!th]") + lines.append(" \\caption{\\textbf{Comparison of explanation performance on Wine Quality dataset.}") + lines.append(" Evaluation of XGBoost and TabResNet models across three key metrics:") + lines.append(" Faithfulness (higher is better $\\uparrow$), Complexity (lower is better $\\downarrow$), and a Composite [Faithfulness, Simplicity] score ($\\uparrow$).") + lines.append(" The table compares PnP-XAI against Captum, AutoXAI, OmniXAI, and OpenXAI.") + lines.append(" Bold values indicate the best score per row; dashes (-) denote unsupported combinations.}") + lines.append(" \\label{tab:wine_performance}") + lines.append(" \\centering") + lines.append(" \\resizebox{\\textwidth}{!}{%") + lines.append(" \\begin{tabular}{lll||cccc|c}") + lines.append(" \\toprule") + lines.append(" \\textbf{Model} & \\textbf{Metric} & \\textbf{Explainer} & \\textbf{Captum} & \\textbf{OmniXAI} & \\textbf{AutoXAI} & \\textbf{OpenXAI} & \\textbf{PnPXAI} \\\\") + lines.append(" \\midrule") + + # Process XGBoost + model_key = "xgb" + model_name = MODEL_MAP[model_key] + + for metric_idx, (metric_key, metric_name, is_complexity) in enumerate([ + ('faithfulness', 'Faithfulness ($\\uparrow$)', False), + ('complexity', 'Complexity ($\\downarrow$)', True), + ('composite', 'Composite [Faithfulness, Simplicity] ($\\uparrow$)', False) + ]): + if metric_idx == 0: + lines.append(f" {model_name} & \\multirow[t]{{2}}{{*}}{{{metric_name}}}") + else: + lines.append(f" & \\multirow[t]{{2}}{{*}}{{{metric_name}}}") + + for exp_idx, exp_key in enumerate(XGB_EXPLAINERS): + exp_name = EXPLAINER_MAP[exp_key] + + values = [] + for fw in FRAMEWORK_ORDER: + if fw in data[model_key] and exp_key in data[model_key][fw]: + values.append(data[model_key][fw][exp_key][metric_key]) + else: + values.append(None) + + best_val = get_best_value(values, is_complexity) + formatted_values = [format_value(v, best_val, is_complexity) for v in values] + + if exp_idx == 0: + lines.append(f" & {exp_name} & {' & '.join(formatted_values)} \\\\") + else: + lines.append(f" & & {exp_name} & {' & '.join(formatted_values)} \\\\") + + if metric_idx < 2: + lines.append(" \\cmidrule{2-8}") + + lines.append(" \\midrule") + + # Process ResNet + model_key = "tab_resnet" + model_name = MODEL_MAP[model_key] + + for metric_idx, (metric_key, metric_name, is_complexity) in enumerate([ + ('faithfulness', 'Faithfulness ($\\uparrow$)', False), + ('complexity', 'Complexity ($\\downarrow$)', True), + ('composite', 'Composite [Faithfulness, Simplicity] ($\\uparrow$)', False) + ]): + if metric_idx == 0: + lines.append(f" {model_name} & \\multirow[t]{{{len(RESNET_EXPLAINERS)}}}{{*}}{{{metric_name}}}") + else: + lines.append(f" & \\multirow[t]{{{len(RESNET_EXPLAINERS)}}}{{*}}{{{metric_name}}}") + + for exp_idx, exp_key in enumerate(RESNET_EXPLAINERS): + exp_name = EXPLAINER_MAP[exp_key] + + values = [] + for fw in FRAMEWORK_ORDER: + if fw in data[model_key] and exp_key in data[model_key][fw]: + values.append(data[model_key][fw][exp_key][metric_key]) + else: + values.append(None) + + best_val = get_best_value(values, is_complexity) + formatted_values = [format_value(v, best_val, is_complexity) for v in values] + + if exp_idx == 0: + lines.append(f" & {exp_name:20s} & {' & '.join(formatted_values)} \\\\") + else: + lines.append(f" & & {exp_name:20s} & {' & '.join(formatted_values)} \\\\") + + if metric_idx < 2: + lines.append(" \\cmidrule{2-8}") + + lines.append(" \\bottomrule") + lines.append(" \\end{tabular}%") + lines.append(" }") + lines.append("\\end{table}") + + return "\n".join(lines) + + +# ============================================================================ +# Main Function +# ============================================================================ + +def main(): + parser = argparse.ArgumentParser( + description="Run Wine Quality XAI experiments and generate results table", + formatter_class=argparse.RawDescriptionHelpFormatter + ) + + parser.add_argument("--n-samples", type=int, default=25, + help="Number of samples for LIME/SHAP (default: 25)") + parser.add_argument("--seed", type=int, default=42, + help="Random seed (default: 42)") + parser.add_argument("--verbose", action="store_true", + help="Enable verbose logging") + parser.add_argument("--data-dir", type=str, default="data/Wine Quality", + help="Data directory (default: data/Wine Quality)") + parser.add_argument("--config-dir", type=str, default="configs/tabular", + help="Config directory (default: configs/tabular)") + parser.add_argument("--results-dir", type=str, default="results/Wine Quality", + help="Results directory (default: results/Wine Quality)") + + args = parser.parse_args() + + start_time = time.time() + + # Run all experiments + print("\n" + "="*70) + print("Starting Wine Quality XAI Experiment") + print("="*70 + "\n") + + run_all_experiments( + data_dir=args.data_dir, + config_dir=args.config_dir, + results_dir=args.results_dir, + n_samples=args.n_samples, + seed=args.seed, + verbose=args.verbose + ) + + # Generate LaTeX table + print("\n" + "="*70) + print("Generating LaTeX table...") + print("="*70 + "\n") + + data = collect_results(args.results_dir) + latex_table = generate_latex_table(data) + + # Save table to results directory + output_file = Path(args.results_dir) / "experiment_result.md" + output_file.parent.mkdir(parents=True, exist_ok=True) + + with open(output_file, 'w') as f: + f.write(latex_table + "\n") + + print(latex_table) + print(f"\n✓ Table saved to: {output_file}") + + elapsed = time.time() - start_time + minutes = int(elapsed // 60) + seconds = int(elapsed % 60) + + print("\n" + "="*70) + print(f"All tasks completed in {minutes}m {seconds}s") + print("="*70 + "\n") + + +if __name__ == "__main__": + main() diff --git a/experiments/scripts/wine_quality/README.md b/experiments/scripts/wine_quality/README.md index a8c0c47..8b1d337 100644 --- a/experiments/scripts/wine_quality/README.md +++ b/experiments/scripts/wine_quality/README.md @@ -147,6 +147,24 @@ All experimental settings are managed via YAML configuration files in `configs/t This module requires the Wine Quality Docker image. From the project root: +#### Option 1: Using Docker Compose (Recommended) + +```bash +# Build the wine quality experiment image +docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . + +# Start the container with all volumes mounted +docker-compose -f docker-compose.wine_quality.yml up -d + +# Enter the container +docker exec -it pnpxai-wine-quality bash + +# Stop the container when done +docker-compose -f docker-compose.wine_quality.yml down +``` + +#### Option 2: Using Docker Run + ```bash # Build the wine quality experiment image docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . @@ -157,6 +175,7 @@ docker run --rm -it \ --gpus all \ --shm-size=8g \ -v $(pwd):/root/pnpxai-experiments \ + --name pnpxai-wine-quality \ pnpxai_wine_quality:latest \ /bin/bash ``` @@ -332,6 +351,55 @@ Each experiment generates outputs in `results/Wine Quality/{model}/{framework}/{ - Data shapes - Metric summaries (mean, std) +### Visualizing Results + +After running experiments, you can aggregate and visualize all results using the visualization script: + +```bash +# Inside the container, from /root/pnpxai-experiments/ + +# Display results as markdown table +python -m experiments.scripts.wine_quality.visualize_results + +# Save results to CSV file +python -m experiments.scripts.wine_quality.visualize_results --output csv > wine_quality_results.csv + +# Save results to JSON file +python -m experiments.scripts.wine_quality.visualize_results --output json > wine_quality_results.json + +# Use custom results directory +python -m experiments.scripts.wine_quality.visualize_results --results-dir "path/to/results" + +# Enable verbose output with detailed statistics +python -m experiments.scripts.wine_quality.visualize_results --verbose + +# Customize decimal precision (default: 4) +python -m experiments.scripts.wine_quality.visualize_results --round 3 +``` + +**Output Format:** + +The visualization script creates a comprehensive table showing: +- **Models**: XGBoost and Resnet (TabResNet) +- **Metrics**: Correctness (ABPC), Complexity, Compounded +- **Explainers**: All tested explanation methods (LIME, SHAP, Gradient-based, etc.) +- **Frameworks**: Results for each framework (PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI) + +Example output: +``` +| | model | metric | explainer | PnPXAI | Captum | AutoXAI | OmniXAI | OpenXAI | +|---:|:--------|:------------|:---------------------|---------:|---------:|----------:|----------:|----------:| +| 0 | XGBoost | Correctness | KernelSHAP | 0.1424 | 0.0546 | -0.0023 | 0.0749 | nan | +| 1 | XGBoost | Correctness | LIME | 0.1290 | 0.0557 | -0.0130 | 0.0461 | nan | +... +``` + +**Visualization Options:** +- `--results-dir PATH`: Specify custom results directory (default: `results/Wine Quality`) +- `--output {markdown|csv|json}`: Choose output format (default: markdown) +- `--round N`: Number of decimal places (default: 4) +- `--verbose`: Show detailed collection statistics + ### Checking Progress ```bash @@ -340,6 +408,9 @@ ls -R results/Wine\ Quality/ # Count completed experiments find results/Wine\ Quality/ -name "explanations.npy" | wc -l + +# Quick summary of all results +python -m experiments.scripts.wine_quality.visualize_results ``` ### Interpreting Results diff --git a/experiments/scripts/wine_quality/analyze_wine_quality.py b/experiments/scripts/wine_quality/analyze_wine_quality.py index 02a10f5..8074f0a 100755 --- a/experiments/scripts/wine_quality/analyze_wine_quality.py +++ b/experiments/scripts/wine_quality/analyze_wine_quality.py @@ -101,6 +101,7 @@ def run_single_experiment( dry_run: bool = False, verbose: bool = False, continue_mode: bool = False, + n_samples: int = 25, ) -> Tuple[bool, float]: """ Run a single experiment with specified model/framework/explainer combination. @@ -133,6 +134,7 @@ def run_single_experiment( "--framework", framework, "--model", model, "--explainer", explainer, + "--n-samples", str(n_samples), ] if verbose: @@ -205,6 +207,7 @@ def run_all_experiments( framework_filter: List[str] = None, model_filter: List[str] = None, explainer_filter: List[str] = None, + n_samples: int = 25, ): """ Run all experiment combinations. @@ -270,6 +273,7 @@ def run_all_experiments( dry_run=dry_run, verbose=verbose, continue_mode=continue_mode, + n_samples=n_samples, ) completed += 1 diff --git a/experiments/scripts/wine_quality/explain_wine_quality.py b/experiments/scripts/wine_quality/explain_wine_quality.py index de74ddf..546bdfa 100755 --- a/experiments/scripts/wine_quality/explain_wine_quality.py +++ b/experiments/scripts/wine_quality/explain_wine_quality.py @@ -101,7 +101,7 @@ def explain_with_pnpxai( config: Dict[str, Any], logger: logging.Logger, batch_size: int = 32, - n_samples: int = 64, + n_samples: int = 25, n_steps: int = 50, model_type: str = "xgb", ) -> np.ndarray: @@ -348,6 +348,11 @@ def evaluate(self, inputs, targets, attrs): logger.info(f"Best value: {opt_results.study.best_trial.value:.4f}") + # Re-set seeds before generating explanations for reproducibility + # LIME/SHAP are stochastic, so we need to fix the seed again + logger.info("Re-setting random seeds for reproducible explanation generation...") + set_seeds(config['optuna'].get('seed', 42)) + # Generate explanations opt_explainer = opt_results.explainer th_test_input = torch.tensor(test_dataset.inputs, dtype=torch.float32) @@ -373,7 +378,7 @@ def explain_with_captum( explainer_name: str, config: Dict[str, Any], logger: logging.Logger, - n_samples: int = 64, + n_samples: int = 25, ) -> np.ndarray: """ Generate explanations using Captum framework. @@ -385,7 +390,7 @@ def explain_with_captum( explainer_name: Explainer type config: Configuration dictionary logger: Logger instance - n_samples: Number of samples for LIME/SHAP + n_samples: Number of samples for LIME/SHAP (default: 25) Returns: Explanation array of shape (n_samples, n_features) @@ -443,7 +448,7 @@ def explain_with_captum( attrs_list = [] for i in tqdm(range(len(X_test_t)), desc="Explaining"): input_i = X_test_t[i].unsqueeze(0) - attr_i = explainer.attribute(input_i, target=targets[i]) + attr_i = explainer.attribute(input_i, target=targets[i], n_samples=n_samples) attrs_list.append(attr_i.detach().cpu().numpy()) explanations = np.concatenate(attrs_list, axis=0) @@ -637,6 +642,58 @@ def explain_with_openxai( return explanations +class PyTorchModelWrapper: + """ + Wrapper for PyTorch models to add predict and predict_proba methods for AutoXAI. + + AutoXAI expects models to have predict() and predict_proba() methods like sklearn models. + This wrapper adds these methods for PyTorch neural network models. + """ + def __init__(self, pytorch_model): + self.model = pytorch_model + self.model.eval() + + def predict(self, X): + """ + Predict class labels for samples. + + Args: + X: Input array of shape (n_samples, n_features) + + Returns: + Predicted class labels of shape (n_samples,) + """ + import torch + if not isinstance(X, torch.Tensor): + X = torch.FloatTensor(X) + + with torch.no_grad(): + logits = self.model(X) + predictions = torch.argmax(logits, dim=1) + + return predictions.cpu().numpy() + + def predict_proba(self, X): + """ + Predict class probabilities for samples. + + Args: + X: Input array of shape (n_samples, n_features) + + Returns: + Predicted class probabilities of shape (n_samples, n_classes) + """ + import torch + if not isinstance(X, torch.Tensor): + X = torch.FloatTensor(X) + + with torch.no_grad(): + logits = self.model(X) + probas = torch.softmax(logits, dim=1) + + return probas.cpu().numpy() + + def explain_with_autoxai( model: nn.Module, X_test: np.ndarray, @@ -691,10 +748,15 @@ def explain_with_autoxai( bg_size = min(50, len(X_test)) - # Unwrap model if it's a TorchModelForXGBoost (AutoXAI needs the original XGBoost model) + # Prepare model for AutoXAI + # - For XGBoost models wrapped in TorchModelForXGBoost, unwrap to get the original XGBoost model + # - For PyTorch models (tab_resnet, lr), wrap them to add predict/predict_proba methods from script_utils.model_utils import TorchModelForXGBoost if isinstance(model, TorchModelForXGBoost): unwrapped_model = model.xgb_model + elif isinstance(model, nn.Module): + # Wrap PyTorch models to add predict() and predict_proba() methods + unwrapped_model = PyTorchModelWrapper(model) else: unwrapped_model = model diff --git a/experiments/scripts/wine_quality/generate_latex_table.py b/experiments/scripts/wine_quality/generate_latex_table.py new file mode 100644 index 0000000..7347d12 --- /dev/null +++ b/experiments/scripts/wine_quality/generate_latex_table.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python3 +""" +Generate LaTeX table from Wine Quality experiment results +""" + +import os +import json +from pathlib import Path +from collections import defaultdict + +def load_metric(path): + """Load a numpy metric file and return mean value""" + import numpy as np + return np.load(path).mean() + +def collect_results(results_dir="results/Wine Quality"): + """ + Collect all experiment results. + Returns dict: {model: {framework: {explainer: {metric: value}}}} + """ + results_path = Path(results_dir) + data = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) + + for root, dirs, files in os.walk(results_path): + if "explanations.npy" in files: + parts = Path(root).relative_to(results_path).parts + if len(parts) != 3: + continue + + model, framework, explainer = parts + + try: + import numpy as np + abpc = np.load(os.path.join(root, "abpc.npy")).mean() + cmpx = np.load(os.path.join(root, "cmpx.npy")).mean() + cmpd = np.load(os.path.join(root, "cmpd.npy")).mean() + + data[model][framework][explainer] = { + 'faithfulness': abpc, + 'complexity': cmpx, + 'composite': cmpd + } + except Exception as e: + print(f"Error loading {root}: {e}") + continue + + return data + +def format_value(value, best_value, is_complexity=False): + """Format value with bold if it's the best""" + if value is None: + return "-" + + # For complexity, lower is better + if is_complexity: + is_best = abs(value - best_value) < 1e-6 if best_value is not None else False + else: + is_best = abs(value - best_value) < 1e-6 if best_value is not None else False + + formatted = f"{value:.4f}" + return f"\\textbf{{{formatted}}}" if is_best else formatted + +def get_best_value(values, is_complexity=False): + """Get the best value from a list (considering None values)""" + valid_values = [v for v in values if v is not None] + if not valid_values: + return None + + if is_complexity: + return min(valid_values) + else: + return max(valid_values) + +def generate_latex_table(data): + """Generate LaTeX table matching the original format""" + + # Define the structure + EXPLAINER_MAP = { + "shap": "KernelSHAP", + "lime": "LIME", + "grad": "Gradient", + "itg": "Grad.$\\times$Input", + "ig": "Integrated Gradients", + "sg": "SmoothGrad", + "vg": "VarGrad", + "lrp": "LRP" + } + + MODEL_MAP = { + "xgb": "XGBoost", + "tab_resnet": "ResNet" + } + + FRAMEWORK_ORDER = ["captum", "omnixai", "autoxai", "openxai", "pnpxai"] + FRAMEWORK_NAMES = { + "captum": "Captum", + "omnixai": "OmniXAI", + "autoxai": "AutoXAI", + "openxai": "OpenXAI", + "pnpxai": "PnPXAI" + } + + # XGBoost explainers + XGB_EXPLAINERS = ["shap", "lime"] + + # ResNet explainers + RESNET_EXPLAINERS = ["shap", "lime", "grad", "itg", "ig", "sg", "vg", "lrp"] + + # Build LaTeX table + lines = [] + lines.append("\\begin{table}[!th]") + lines.append(" \\caption{\\textbf{Comparison of explanation performance on Wine Quality dataset.}") + lines.append(" Evaluation of XGBoost and TabResNet models across three key metrics:") + lines.append(" Faithfulness (higher is better $\\uparrow$), Complexity (lower is better $\\downarrow$), and a Composite [Faithfulness, Simplicity] score ($\\uparrow$).") + lines.append(" The table compares PnP-XAI against Captum, AutoXAI, OmniXAI, and OpenXAI.") + lines.append(" Bold values indicate the best score per row; dashes (-) denote unsupported combinations.}") + lines.append(" \\label{tab:wine_performance}") + lines.append(" \\centering") + lines.append(" \\resizebox{\\textwidth}{!}{%") + lines.append(" \\begin{tabular}{lll||cccc|c}") + lines.append(" \\toprule") + lines.append(" \\textbf{Model} & \\textbf{Metric} & \\textbf{Explainer} & \\textbf{Captum} & \\textbf{OmniXAI} & \\textbf{AutoXAI} & \\textbf{OpenXAI} & \\textbf{PnPXAI} \\\\") + lines.append(" \\midrule") + + # Process XGBoost + model_key = "xgb" + model_name = MODEL_MAP[model_key] + + for metric_idx, (metric_key, metric_name, is_complexity) in enumerate([ + ('faithfulness', 'Faithfulness ($\\uparrow$)', False), + ('complexity', 'Complexity ($\\downarrow$)', True), + ('composite', 'Composite [Faithfulness, Simplicity] ($\\uparrow$)', False) + ]): + if metric_idx == 0: + lines.append(f" {model_name} & \\multirow[t]{{2}}{{*}}{{{metric_name}}}") + else: + lines.append(f" & \\multirow[t]{{2}}{{*}}{{{metric_name}}}") + + for exp_idx, exp_key in enumerate(XGB_EXPLAINERS): + exp_name = EXPLAINER_MAP[exp_key] + + # Get values for all frameworks + values = [] + for fw in FRAMEWORK_ORDER: + if fw in data[model_key] and exp_key in data[model_key][fw]: + values.append(data[model_key][fw][exp_key][metric_key]) + else: + values.append(None) + + # Find best value + best_val = get_best_value(values, is_complexity) + + # Format row + formatted_values = [format_value(v, best_val, is_complexity) for v in values] + + if exp_idx == 0: + lines.append(f" & {exp_name} & {' & '.join(formatted_values)} \\\\") + else: + lines.append(f" & & {exp_name} & {' & '.join(formatted_values)} \\\\") + + if metric_idx < 2: + lines.append(" \\cmidrule{2-8}") + + lines.append(" \\midrule") + + # Process ResNet + model_key = "tab_resnet" + model_name = MODEL_MAP[model_key] + + for metric_idx, (metric_key, metric_name, is_complexity) in enumerate([ + ('faithfulness', 'Faithfulness ($\\uparrow$)', False), + ('complexity', 'Complexity ($\\downarrow$)', True), + ('composite', 'Composite [Faithfulness, Simplicity] ($\\uparrow$)', False) + ]): + if metric_idx == 0: + lines.append(f" {model_name} & \\multirow[t]{{{len(RESNET_EXPLAINERS)}}}{{*}}{{{metric_name}}}") + else: + lines.append(f" & \\multirow[t]{{{len(RESNET_EXPLAINERS)}}}{{*}}{{{metric_name}}}") + + for exp_idx, exp_key in enumerate(RESNET_EXPLAINERS): + exp_name = EXPLAINER_MAP[exp_key] + + # Get values for all frameworks + values = [] + for fw in FRAMEWORK_ORDER: + if fw in data[model_key] and exp_key in data[model_key][fw]: + values.append(data[model_key][fw][exp_key][metric_key]) + else: + values.append(None) + + # Find best value + best_val = get_best_value(values, is_complexity) + + # Format row + formatted_values = [format_value(v, best_val, is_complexity) for v in values] + + if exp_idx == 0: + lines.append(f" & {exp_name:20s} & {' & '.join(formatted_values)} \\\\") + else: + lines.append(f" & & {exp_name:20s} & {' & '.join(formatted_values)} \\\\") + + if metric_idx < 2: + lines.append(" \\cmidrule{2-8}") + + lines.append(" \\bottomrule") + lines.append(" \\end{tabular}%") + lines.append(" }") + lines.append("\\end{table}") + + return "\n".join(lines) + +if __name__ == "__main__": + import numpy as np + + print("Collecting experiment results...") + data = collect_results() + + print(f"\nFound results for models: {list(data.keys())}") + for model in data: + print(f" {model}: frameworks = {list(data[model].keys())}") + + print("\nGenerating LaTeX table...") + latex_table = generate_latex_table(data) + + print("\n" + "="*70) + print("Generated LaTeX Table:") + print("="*70) + print(latex_table) + + # Save to file + output_file = "new_experiment_result.md" + with open(output_file, 'w') as f: + f.write(latex_table + "\n") + + print(f"\n✓ Table saved to: {output_file}") diff --git a/experiments/scripts/wine_quality/script_utils/model_utils.py b/experiments/scripts/wine_quality/script_utils/model_utils.py index 86f421c..61e8c28 100644 --- a/experiments/scripts/wine_quality/script_utils/model_utils.py +++ b/experiments/scripts/wine_quality/script_utils/model_utils.py @@ -217,7 +217,7 @@ def load_model( if input_dim is None: raise ValueError("input_dim is required for tab_resnet model") - from models.tab_resnet import TabResNet + from experiments.models.tab_resnet import TabResNet num_blocks = kwargs.get('num_blocks', 1) model = TabResNet(input_dim, output_dim, num_blocks=num_blocks) @@ -229,7 +229,7 @@ def load_model( if input_dim is None: raise ValueError("input_dim is required for lr model") - from models.tab_resnet import LogisticRegression + from experiments.models.tab_resnet import LogisticRegression model = LogisticRegression(input_dim, output_dim) model.load_state_dict(torch.load(model_path)) diff --git a/experiments/scripts/wine_quality/visualize_results.py b/experiments/scripts/wine_quality/visualize_results.py new file mode 100644 index 0000000..715337c --- /dev/null +++ b/experiments/scripts/wine_quality/visualize_results.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python3 +""" +Wine Quality Results Visualization Script + +Aggregates and displays all experiment results from Wine Quality analysis in a markdown table format. +This script scans the results directory and creates a comprehensive summary similar to benchmark/notebooks/vis.ipynb. + +Usage: + python -m experiments.scripts.wine_quality.visualize_results [OPTIONS] + +Options: + --results-dir: Path to results directory (default: results/Wine Quality) + --output: Output format (markdown, csv, json) (default: markdown) + --round: Number of decimal places to round (default: 4) + --verbose: Enable verbose logging + +Examples: + # Display results as markdown table + python -m experiments.scripts.wine_quality.visualize_results + + # Save to CSV + python -m experiments.scripts.wine_quality.visualize_results --output csv > results.csv + + # Custom results directory + python -m experiments.scripts.wine_quality.visualize_results --results-dir "path/to/results" +""" + +import os +import sys +import argparse +import numpy as np +import pandas as pd +from pathlib import Path +from typing import Dict, List, Optional +from pandas.api.types import CategoricalDtype + + +# Mapping dictionaries for display names +EXP_MAP = { + "shap": "KernelSHAP", + "kernel_shap": "KernelSHAP", + "lime": "LIME", + "integrated_gradients": "Integrated Gradients", + "itg": "Gradient x Input", + "grad": "Gradient", + "gradient": "Gradient", + "lrp_uniform_epsilon": "LRP", + "lrp": "LRP", + "smooth_grad": "SmoothGrad", + "sg": "SmoothGrad", + "var_grad": "VarGrad", + "vg": "VarGrad", + "grad_x_input": "Gradient x Input", + "ig": "Integrated Gradients", +} + +MODEL_MAP = { + "xgb": "XGBoost", + "tab_resnet": "Resnet", +} + +FRAMEWORK_MAP = { + "pnpxai": "PnPXAI", + "captum": "Captum", + "autoxai": "AutoXAI", + "omnixai": "OmniXAI", + "openxai": "OpenXAI", +} + +METRIC_MAP = { + "abpc": "Correctness", + "cmpx": "Complexity", + "cmpd": "Compounded" +} + +# Ordering for categorical data +EXP_ORDER = [ + 'KernelSHAP', + 'LIME', + 'Gradient', + 'Gradient x Input', + 'Integrated Gradients', + 'SmoothGrad', + 'VarGrad', + 'LRP', +] + + +def collect_results(results_dir: str = "results/Wine Quality", verbose: bool = False) -> pd.DataFrame: + """ + Collect all experiment results from the results directory. + + Args: + results_dir: Path to the results directory + verbose: Enable verbose logging + + Returns: + DataFrame with columns: model, framework, explainer, abpc, cmpx, cmpd + """ + results_path = Path(results_dir) + if not results_path.exists(): + raise FileNotFoundError(f"Results directory not found: {results_dir}") + + records = [] + + # Walk through the directory structure: results/Wine Quality/{model}/{framework}/{explainer}/ + for root, dirs, files in os.walk(results_path): + # Check if this directory contains result files + if "explanations.npy" in files: + parts = Path(root).relative_to(results_path).parts + + if len(parts) != 3: + if verbose: + print(f"Skipping unexpected directory structure: {root}") + continue + + model, framework, explainer = parts + + # Load metrics + try: + abpc = np.load(os.path.join(root, "abpc.npy")).mean() + cmpx = np.load(os.path.join(root, "cmpx.npy")).mean() + cmpd = np.load(os.path.join(root, "cmpd.npy")).mean() + + records.append({ + "model": model, + "framework": framework, + "explainer": explainer, + "abpc": abpc, + "cmpx": cmpx, + "cmpd": cmpd, + }) + + if verbose: + print(f"Loaded: Model={model}, Framework={framework}, Explainer={explainer}") + print(f" ABPC={abpc:.4f}, CMPX={cmpx:.4f}, CMPD={cmpd:.4f}") + + except Exception as e: + if verbose: + print(f"Error loading metrics from {root}: {e}") + continue + + if not records: + raise ValueError(f"No experiment results found in {results_dir}") + + return pd.DataFrame(records) + + +def create_pivot_table(df: pd.DataFrame, round_decimals: int = 4) -> pd.DataFrame: + """ + Transform the results DataFrame into a pivot table format for display. + + Args: + df: DataFrame with columns: model, framework, explainer, abpc, cmpx, cmpd + round_decimals: Number of decimal places to round + + Returns: + Pivot table with metrics as rows and frameworks as columns + """ + # Replace codes with display names + name_map = {} + name_map.update(EXP_MAP) + name_map.update(MODEL_MAP) + name_map.update(FRAMEWORK_MAP) + name_map.update(METRIC_MAP) + + # Reshape data: unpivot metrics + performance = ( + df + .replace(name_map) + .melt( + id_vars=["model", "framework", "explainer"], + value_vars=["abpc", "cmpx", "cmpd"], + var_name="metric", + value_name="value" + ) + .replace(name_map) + .assign( + model=lambda d: d["model"].astype( + CategoricalDtype(list(MODEL_MAP.values()), ordered=True) + ), + metric=lambda d: d["metric"].astype( + CategoricalDtype(list(METRIC_MAP.values()), ordered=True) + ), + explainer=lambda d: d["explainer"].astype( + CategoricalDtype(EXP_ORDER, ordered=True) + ), + framework=lambda d: d["framework"].astype( + CategoricalDtype(list(FRAMEWORK_MAP.values()), ordered=True) + ) + ) + [['model', 'metric', 'explainer', 'framework', 'value']] + .pivot( + index=["model", "metric", "explainer"], + columns="framework", + values="value" + ) + .reset_index() + .sort_values(["model", "metric", "explainer"]) + .round(round_decimals) + ) + + return performance + + +def print_summary(df: pd.DataFrame, verbose: bool = False): + """Print summary statistics about the collected results.""" + print("\n" + "="*70) + print("Wine Quality Experiment Results Summary") + print("="*70) + print(f"\nTotal experiments: {len(df)}") + print(f"Models: {sorted(df['model'].unique())}") + print(f"Frameworks: {sorted(df['framework'].unique())}") + print(f"Explainers: {sorted(df['explainer'].unique())}") + print() + + if verbose: + # Count experiments by combination + print("Experiments by model:") + print(df.groupby('model').size().to_string()) + print("\nExperiments by framework:") + print(df.groupby('framework').size().to_string()) + print("\nExperiments by explainer:") + print(df.groupby('explainer').size().to_string()) + print() + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Visualize Wine Quality experiment results", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Display results as markdown table + python -m experiments.scripts.wine_quality.visualize_results + + # Save to CSV + python -m experiments.scripts.wine_quality.visualize_results --output csv > results.csv + + # Custom results directory with verbose output + python -m experiments.scripts.wine_quality.visualize_results --results-dir "path/to/results" --verbose + """ + ) + + parser.add_argument("--results-dir", type=str, default="results/Wine Quality", + help="Path to results directory (default: results/Wine Quality)") + parser.add_argument("--output", type=str, default="markdown", + choices=["markdown", "csv", "json"], + help="Output format (default: markdown)") + parser.add_argument("--round", type=int, default=4, dest="round_decimals", + help="Number of decimal places to round (default: 4)") + parser.add_argument("--verbose", action="store_true", + help="Enable verbose logging") + + args = parser.parse_args() + + try: + # Collect results + if args.verbose: + print(f"Scanning results directory: {args.results_dir}") + print() + + df = collect_results(args.results_dir, verbose=args.verbose) + + # Print summary + print_summary(df, verbose=args.verbose) + + # Create pivot table + pivot_table = create_pivot_table(df, round_decimals=args.round_decimals) + + # Output results + print("="*70) + print("Results Table") + print("="*70) + print() + + if args.output == "markdown": + print(pivot_table.to_markdown(index=False)) + elif args.output == "csv": + print(pivot_table.to_csv(index=False)) + elif args.output == "json": + print(pivot_table.to_json(orient="records", indent=2)) + + print() + + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + if args.verbose: + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/models/__init__.py b/models/__init__.py deleted file mode 100644 index 245f6d2..0000000 --- a/models/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -Neural network models for tabular data. -""" -from .tab_resnet import ( - TabResNet, - LogisticRegression, - ResNetBlock, - train_model, -) - -__all__ = [ - "TabResNet", - "LogisticRegression", - "ResNetBlock", - "train_model", -] diff --git a/models/tab_resnet.py b/models/tab_resnet.py deleted file mode 100644 index 0882b3c..0000000 --- a/models/tab_resnet.py +++ /dev/null @@ -1,232 +0,0 @@ -""" -TabResNet and Logistic Regression models for tabular data. - -Models are compatible with XAI frameworks (LIME, SHAP, Captum, PnPXAI). -""" -import torch -import torch.nn as nn -import numpy as np - - -class ResNetBlock(nn.Module): - """Residual block for TabResNet.""" - - def __init__(self, in_features, out_features): - """ - Args: - in_features: Input feature dimension - out_features: Output feature dimension - """ - super(ResNetBlock, self).__init__() - self.bn = nn.BatchNorm1d(in_features) - self.fc1 = nn.Linear(in_features, out_features) - self.fc2 = nn.Linear(out_features, out_features) - self.dropout = nn.Dropout(0.2) - - def forward(self, x): - """ - Forward pass with residual connection. - - Args: - x: Input tensor of shape (batch_size, in_features) - - Returns: - Output tensor of shape (batch_size, out_features) - """ - y = torch.relu(self.fc1(self.bn(x))) - y = self.dropout(y) - y = self.fc2(y) - y = self.dropout(y) - return torch.add(x, y) - - -class TabResNet(nn.Module): - """ - Residual Network for tabular data classification. - - Architecture: - Input -> Embedding -> ResNet Blocks -> BatchNorm -> Output - """ - - def __init__(self, in_features, out_features, num_blocks=1, embedding_dim=128): - """ - Args: - in_features: Input feature dimension - out_features: Number of output classes - num_blocks: Number of residual blocks (default: 1) - embedding_dim: Embedding dimension (default: 128) - """ - super(TabResNet, self).__init__() - self.embedding = nn.Linear(in_features, embedding_dim) - self.res_blocks = [] - for i in range(num_blocks): - self.res_blocks.append(ResNetBlock(embedding_dim, embedding_dim)) - self.res_blocks = nn.ModuleList(self.res_blocks) - self.bn = nn.BatchNorm1d(embedding_dim) - self.fc = nn.Linear(embedding_dim, out_features) - - def network(self, x): - """ - Forward pass without softmax. - - Args: - x: Input tensor of shape (batch_size, in_features) - - Returns: - Logits of shape (batch_size, out_features) - """ - x = self.embedding(x) - for block in self.res_blocks: - x = block(x) - x = torch.relu(self.bn(x)) - x = self.fc(x) - return x - - def forward(self, x): - """ - Forward pass with softmax. - - Args: - x: Input tensor of shape (batch_size, in_features) - - Returns: - Probabilities of shape (batch_size, out_features) - """ - return torch.softmax(self.network(x), dim=1) - - def predict_proba(self, x): - """ - Predict class probabilities (compatible with SHAP). - - Args: - x: Input array or tensor - - Returns: - Probability array of shape (batch_size, out_features) - """ - input = x if torch.is_tensor(x) else torch.from_numpy(np.array(x)) - return self.forward(input.float()).detach().numpy() - - def predict(self, x, argmax=False): - """ - Predict class labels or probabilities (compatible with LIME). - - Args: - x: Input array or tensor - argmax: If True, return class labels; otherwise probabilities - - Returns: - Class labels or probabilities - """ - input = torch.squeeze(x) if torch.is_tensor(x) else torch.from_numpy(np.array(x)) - output = self.forward(input.float()).detach().numpy() - return output.argmax(axis=-1) if argmax else output - - -class LogisticRegression(nn.Module): - """ - Logistic Regression model for binary/multiclass classification. - - Simple linear model with softmax output. - """ - - def __init__(self, input_dim, output_dim): - """ - Args: - input_dim: Input feature dimension - output_dim: Number of output classes - """ - super(LogisticRegression, self).__init__() - self.linear = nn.Linear(input_dim, output_dim) - - def network(self, x): - """ - Forward pass without softmax. - - Args: - x: Input tensor of shape (batch_size, input_dim) - - Returns: - Logits of shape (batch_size, output_dim) - """ - return self.linear(x) - - def forward(self, x): - """ - Forward pass with softmax. - - Args: - x: Input tensor of shape (batch_size, input_dim) - - Returns: - Probabilities of shape (batch_size, output_dim) - """ - return torch.softmax(self.network(x), dim=1) - - def predict_proba(self, x): - """ - Predict class probabilities (compatible with SHAP). - - Args: - x: Input array or tensor - - Returns: - Probability array of shape (batch_size, output_dim) - """ - input = x if torch.is_tensor(x) else torch.from_numpy(np.array(x)) - return self.forward(input.float()).detach().numpy() - - def predict(self, x, argmax=False): - """ - Predict class labels or probabilities (compatible with LIME). - - Args: - x: Input array or tensor - argmax: If True, return class labels; otherwise probabilities - - Returns: - Class labels or probabilities - """ - input = torch.squeeze(x) if torch.is_tensor(x) else torch.from_numpy(np.array(x)) - output = self.forward(input.float()).detach().numpy() - return output.argmax(axis=-1) if argmax else output - - -def train_model( - X_train: torch.Tensor, - y_train: torch.Tensor, - model: nn.Module, - loss_fn: nn.Module, - optimizer: torch.optim.Optimizer, - epochs: int = 1000, - verbose: bool = True -): - """ - Train a PyTorch model. - - Args: - X_train: Training features tensor - y_train: Training labels tensor - model: PyTorch model - loss_fn: Loss function - optimizer: Optimizer - epochs: Number of training epochs - verbose: Whether to print progress - """ - model.train() - - for epoch in range(epochs): - # Forward pass - outputs = model(X_train) - loss = loss_fn(outputs, y_train) - - # Backward pass - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # Print progress - if verbose and (epoch + 1) % 100 == 0: - print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") - - model.eval() diff --git a/models/train.py b/models/train.py deleted file mode 100755 index d76c13c..0000000 --- a/models/train.py +++ /dev/null @@ -1,462 +0,0 @@ -#!/usr/bin/env python3 -""" -Train models on Wine Quality dataset. - -This script trains three models: -1. XGBoost Classifier -2. TabResNet (residual network for tabular data) -3. Logistic Regression (optional) - -Models are saved locally and optionally uploaded to Hugging Face Hub. -""" -import argparse -import random -import os -from pathlib import Path - -import numpy as np -import torch -import torch.nn as nn -import xgboost as xgb -import yaml - -from tab_resnet import TabResNet, LogisticRegression, train_model - -try: - from huggingface_hub import HfApi, hf_hub_download, list_repo_files - HF_AVAILABLE = True -except ImportError: - HF_AVAILABLE = False - print("Warning: huggingface_hub not installed. HF Hub features disabled.") - - -def set_seeds(seed: int = 0): - """Set random seeds for reproducibility.""" - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def load_config(config_path: str = "../configs/model_config.yaml"): - """Load model configuration from YAML file.""" - with open(config_path, 'r') as f: - config = yaml.safe_load(f) - return config - - -def check_hf_model_exists(repo_id: str, filename: str) -> bool: - """ - Check if a model file exists in Hugging Face Hub. - - Args: - repo_id: Hugging Face repository ID - filename: Model filename to check - - Returns: - True if file exists, False otherwise - """ - if not HF_AVAILABLE: - return False - - try: - api = HfApi() - files = list_repo_files(repo_id) - return filename in files - except Exception as e: - print(f"Could not check HF Hub (repo may not exist): {e}") - return False - - -def upload_to_hf( - local_path: Path, - repo_id: str, - filename: str, - replace: bool = False, - token: str = None -): - """ - Upload model to Hugging Face Hub. - - Args: - local_path: Local file path - repo_id: Hugging Face repository ID - filename: Filename in the repository - replace: If True, replace existing file - token: HF API token (optional, uses HF_TOKEN env var if not provided) - """ - if not HF_AVAILABLE: - print("Warning: huggingface_hub not installed. Skipping upload.") - return - - try: - api = HfApi(token=token) - - # Check if file already exists - file_exists = check_hf_model_exists(repo_id, filename) - - if file_exists and not replace: - print(f"File {filename} already exists in {repo_id}. Skipping upload.") - print("Use --replace to overwrite existing files.") - return - - # Create repo if it doesn't exist - try: - api.create_repo(repo_id, exist_ok=True, repo_type="model") - print(f"Repository {repo_id} ready.") - except Exception as e: - print(f"Note: {e}") - - # Upload file - print(f"Uploading {filename} to {repo_id}...") - api.upload_file( - path_or_fileobj=str(local_path), - path_in_repo=filename, - repo_id=repo_id, - repo_type="model", - ) - print(f"✓ Successfully uploaded to https://huggingface.co/{repo_id}") - - except Exception as e: - print(f"Error uploading to HF Hub: {e}") - print("Make sure you have logged in: huggingface-cli login") - - -def train_xgboost(X_train, y_train, X_test, y_test, config, save_path, skip_training=False): - """ - Train XGBoost classifier. - - Args: - X_train, y_train: Training data - X_test, y_test: Test data (for evaluation) - config: XGBoost configuration - save_path: Path to save model - skip_training: If True, load existing model instead of training - - Returns: - Trained XGBoost model - """ - print("\n" + "="*50) - print("XGBoost Classifier") - print("="*50) - - if skip_training and save_path.exists(): - print("Loading existing model (skip_training=True)...") - model = xgb.XGBClassifier(**config['xgb'].get('params', {})) - model.load_model(str(save_path)) - else: - print("Training XGBoost Classifier...") - # Initialize model with config params - model = xgb.XGBClassifier(**config['xgb'].get('params', {})) - - # Train model - model.fit(X_train, y_train) - - # Save model - model.save_model(str(save_path)) - print(f"Model saved to: {save_path}") - - # Evaluate - y_pred = model.predict(X_test) - accuracy = np.mean(y_pred == y_test) - print(f"XGBoost Accuracy: {accuracy:.4f}") - - return model - - -def train_tabresnet(X_train, y_train, X_test, y_test, config, save_path, device='cpu', skip_training=False): - """ - Train TabResNet model. - - Args: - X_train, y_train: Training data - X_test, y_test: Test data (for evaluation) - config: TabResNet configuration - save_path: Path to save model - device: Device to train on ('cpu' or 'cuda') - skip_training: If True, load existing model instead of training - - Returns: - Trained TabResNet model - """ - print("\n" + "="*50) - print("TabResNet") - print("="*50) - - # Model configuration - input_dim = X_train.shape[1] - output_dim = config['common']['output_dim'] - num_blocks = config['tab_resnet']['num_blocks'] - - # Initialize model - model = TabResNet(input_dim, output_dim, num_blocks=num_blocks).to(device) - - if skip_training and save_path.exists(): - print("Loading existing model (skip_training=True)...") - model.load_state_dict(torch.load(save_path)) - model.eval() - else: - print("Training TabResNet...") - # Convert to tensors - X_train_t = torch.from_numpy(X_train).float().to(device) - y_train_t = torch.from_numpy(y_train).long().to(device) - - # Training configuration - train_config = config['tab_resnet']['training'] - loss_fn = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD( - model.parameters(), - lr=train_config['learning_rate'], - weight_decay=train_config['weight_decay'] - ) - - # Train model - train_model( - X_train_t, y_train_t, model, loss_fn, optimizer, - epochs=train_config['epochs'], - verbose=config['common'].get('verbose', True) - ) - - # Save model - torch.save(model.state_dict(), save_path) - print(f"Model saved to: {save_path}") - - # Evaluate - model.eval() - X_test_t = torch.from_numpy(X_test).float().to(device) - y_test_t = torch.from_numpy(y_test).long().to(device) - with torch.no_grad(): - y_pred = model(X_test_t).argmax(dim=1).cpu().numpy() - accuracy = np.mean(y_pred == y_test) - print(f"TabResNet Accuracy: {accuracy:.4f}") - - return model - - -def train_logistic_regression(X_train, y_train, X_test, y_test, config, save_path, device='cpu', skip_training=False): - """ - Train Logistic Regression model. - - Args: - X_train, y_train: Training data - X_test, y_test: Test data (for evaluation) - config: Logistic Regression configuration - save_path: Path to save model - device: Device to train on ('cpu' or 'cuda') - skip_training: If True, load existing model instead of training - - Returns: - Trained Logistic Regression model - """ - print("\n" + "="*50) - print("Logistic Regression") - print("="*50) - - # Model configuration - input_dim = X_train.shape[1] - output_dim = config['common']['output_dim'] - - # Initialize model - model = LogisticRegression(input_dim, output_dim).to(device) - - if skip_training and save_path.exists(): - print("Loading existing model (skip_training=True)...") - model.load_state_dict(torch.load(save_path)) - model.eval() - else: - print("Training Logistic Regression...") - # Convert to tensors - X_train_t = torch.from_numpy(X_train).float().to(device) - y_train_t = torch.from_numpy(y_train).long().to(device) - - # Training configuration - train_config = config['lr']['training'] - loss_fn = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD( - model.parameters(), - lr=train_config['learning_rate'], - weight_decay=train_config['weight_decay'] - ) - - # Train model - train_model( - X_train_t, y_train_t, model, loss_fn, optimizer, - epochs=train_config['epochs'], - verbose=config['common'].get('verbose', True) - ) - - # Save model - torch.save(model.state_dict(), save_path) - print(f"Model saved to: {save_path}") - - # Evaluate - model.eval() - X_test_t = torch.from_numpy(X_test).float().to(device) - y_test_t = torch.from_numpy(y_test).long().to(device) - with torch.no_grad(): - y_pred = model(X_test_t).argmax(dim=1).cpu().numpy() - accuracy = np.mean(y_pred == y_test) - print(f"Logistic Regression Accuracy: {accuracy:.4f}") - - return model - - -def main(): - parser = argparse.ArgumentParser( - description="Train models on Wine Quality dataset and optionally upload to HF Hub" - ) - parser.add_argument( - "--data-dir", - type=str, - default="../data/Wine Quality", - help="Directory containing preprocessed data" - ) - parser.add_argument( - "--config", - type=str, - default="../configs/model_config.yaml", - help="Path to model configuration file" - ) - parser.add_argument( - "--models", - type=str, - nargs='+', - default=["xgb", "tab_resnet"], - choices=["xgb", "tab_resnet", "lr", "all"], - help="Models to train (default: xgb tab_resnet)" - ) - parser.add_argument( - "--device", - type=str, - default="cpu", - choices=["cpu", "cuda"], - help="Device to use for training (default: cpu)" - ) - parser.add_argument( - "--seed", - type=int, - default=None, - help="Random seed (default: from config)" - ) - parser.add_argument( - "--hf-repo", - type=str, - default=None, - help="Hugging Face repository ID (e.g., username/wine-quality-models)" - ) - parser.add_argument( - "--replace", - action="store_true", - help="Replace models in HF Hub if they exist. If set and models exist in HF, skip training and use HF models." - ) - parser.add_argument( - "--hf-token", - type=str, - default=None, - help="Hugging Face API token (optional, uses HF_TOKEN env var if not provided)" - ) - - args = parser.parse_args() - - # Load configuration - config = load_config(args.config) - - # Set random seed - seed = args.seed if args.seed is not None else config['seeds']['model_training'] - set_seeds(seed) - print(f"Random seed set to: {seed}") - - # Load data - data_path = Path(args.data_dir) - print(f"\nLoading data from: {data_path}") - - X_train = np.load(data_path / "X_train.npy") - y_train = np.load(data_path / "y_train.npy") - X_test = np.load(data_path / "X_test.npy") - y_test = np.load(data_path / "y_test.npy") - - print(f" Train: {X_train.shape[0]} samples, {X_train.shape[1]} features") - print(f" Test: {X_test.shape[0]} samples") - - # Determine models to train - models_to_train = args.models - if "all" in models_to_train: - models_to_train = ["xgb", "tab_resnet", "lr"] - - # Check HF Hub if --replace is set - skip_training = False - if args.replace and args.hf_repo: - print(f"\n--replace flag set. Checking if models exist in {args.hf_repo}...") - all_exist = True - for model_name in models_to_train: - filename = config[model_name]['filename'] - exists = check_hf_model_exists(args.hf_repo, filename) - if exists: - print(f" ✓ {filename} exists in HF Hub") - else: - print(f" ✗ {filename} not found in HF Hub") - all_exist = False - - if all_exist: - print("\nAll models exist in HF Hub. Skipping training.") - skip_training = True - else: - print("\nSome models missing in HF Hub. Will train locally.") - - # Train models - trained_models = {} - - if "xgb" in models_to_train: - save_path = data_path / config['xgb']['filename'] - trained_models['xgb'] = train_xgboost( - X_train, y_train, X_test, y_test, config, save_path, - skip_training=skip_training - ) - - if "tab_resnet" in models_to_train: - save_path = data_path / config['tab_resnet']['filename'] - trained_models['tab_resnet'] = train_tabresnet( - X_train, y_train, X_test, y_test, config, save_path, - device=args.device, skip_training=skip_training - ) - - if "lr" in models_to_train: - save_path = data_path / config['lr']['filename'] - trained_models['lr'] = train_logistic_regression( - X_train, y_train, X_test, y_test, config, save_path, - device=args.device, skip_training=skip_training - ) - - print("\n" + "="*50) - print("Training/Loading completed!") - print("="*50) - print(f"\nProcessed models: {list(trained_models.keys())}") - print(f"Models saved in: {data_path}") - - # Upload to HF Hub if requested - if args.hf_repo: - print("\n" + "="*50) - print("Uploading to Hugging Face Hub") - print("="*50) - - token = args.hf_token or os.getenv('HF_TOKEN') - - for model_name in models_to_train: - save_path = data_path / config[model_name]['filename'] - filename = config[model_name]['filename'] - - upload_to_hf( - local_path=save_path, - repo_id=args.hf_repo, - filename=filename, - replace=args.replace, - token=token - ) - - print(f"\nAll models processed for HF Hub: {args.hf_repo}") - - -if __name__ == "__main__": - main() From 897c6720b036b8d1b65ccbc3c6ed9ce50737df74 Mon Sep 17 00:00:00 2001 From: shiningstone23 Date: Sat, 22 Nov 2025 13:52:51 +0900 Subject: [PATCH 16/20] feat/tabular:integrate codes --- docker-compose.wine_quality.yml | 2 +- experiments/scripts/analyze_wine_quality.py | 717 +++++++++- experiments/scripts/wine_quality/README.md | 465 ------- experiments/scripts/wine_quality/__init__.py | 6 - .../wine_quality/analyze_wine_quality.py | 370 ------ .../wine_quality/explain_wine_quality.py | 1175 ----------------- .../wine_quality/generate_latex_table.py | 235 ---- .../wine_quality/script_utils/__init__.py | 39 - .../wine_quality/script_utils/data_utils.py | 132 -- .../script_utils/explainer_factory.py | 187 --- .../wine_quality/script_utils/model_utils.py | 260 ---- .../scripts/wine_quality/visualize_results.py | 296 ----- 12 files changed, 706 insertions(+), 3178 deletions(-) delete mode 100644 experiments/scripts/wine_quality/README.md delete mode 100644 experiments/scripts/wine_quality/__init__.py delete mode 100755 experiments/scripts/wine_quality/analyze_wine_quality.py delete mode 100755 experiments/scripts/wine_quality/explain_wine_quality.py delete mode 100644 experiments/scripts/wine_quality/generate_latex_table.py delete mode 100644 experiments/scripts/wine_quality/script_utils/__init__.py delete mode 100644 experiments/scripts/wine_quality/script_utils/data_utils.py delete mode 100644 experiments/scripts/wine_quality/script_utils/explainer_factory.py delete mode 100644 experiments/scripts/wine_quality/script_utils/model_utils.py delete mode 100644 experiments/scripts/wine_quality/visualize_results.py diff --git a/docker-compose.wine_quality.yml b/docker-compose.wine_quality.yml index 6df85d9..24eeb76 100644 --- a/docker-compose.wine_quality.yml +++ b/docker-compose.wine_quality.yml @@ -54,7 +54,7 @@ services: echo 'Wine Quality XAI Experiment Starting...' && echo '========================================' && echo '' && - python experiments/scripts/analyze_wine_quality.py --n-samples 25 --seed 42 --verbose 2>&1 | tee results/Wine\ Quality/experiment.log && + python -m experiments.scripts.analyze_wine_quality --n-samples 25 --seed 42 --verbose 2>&1 | tee results/Wine\ Quality/experiment.log && echo '' && echo '========================================' && echo 'Experiment completed successfully!' && diff --git a/experiments/scripts/analyze_wine_quality.py b/experiments/scripts/analyze_wine_quality.py index 690addd..6f98251 100644 --- a/experiments/scripts/analyze_wine_quality.py +++ b/experiments/scripts/analyze_wine_quality.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ -Wine Quality XAI Experiment Runner +Wine Quality XAI Experiment Runner - Standalone Version This script runs a complete Wine Quality explanation experiment: 1. Loads data and models @@ -10,7 +10,7 @@ 5. Saves results to results/Wine Quality/ Usage: - python run_experiment.py [--n-samples N] [--seed SEED] [--verbose] + python analyze_wine_quality.py [--n-samples N] [--seed SEED] [--verbose] Author: Generated for Wine Quality XAI benchmarking """ @@ -25,10 +25,11 @@ import logging import warnings import subprocess +import functools from pathlib import Path from datetime import datetime from typing import Dict, Any, Tuple, List, Optional, Union -from collections import defaultdict +from collections import defaultdict, deque import numpy as np import pandas as pd @@ -40,6 +41,7 @@ # Suppress warnings warnings.filterwarnings('ignore') +warnings.filterwarnings('ignore', message='You are providing multiple inputs for Lime / Kernel SHAP attributions') # ============================================================================ # Data Loading Utilities @@ -98,8 +100,6 @@ def invert_transform(input_array: np.ndarray, feature_metadata: Dict[str, Any]) def find_idx(a: list, b: list) -> list: """Find permutation index where a[idx] = b.""" - from collections import defaultdict, deque - if sorted(a) != sorted(b): return None @@ -219,6 +219,706 @@ def set_seeds(seed: int = 42): torch.cuda.manual_seed_all(seed) +def explain_with_pnpxai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: Optional[str], + config: Dict[str, Any], + logger: logging.Logger, + batch_size: int = 32, + n_samples: int = 25, + n_steps: int = 50, + model_type: str = "xgb", +) -> np.ndarray: + """Generate explanations using PnPXAI framework.""" + from torch.utils.data import DataLoader, Dataset + from pnpxai import Experiment, AutoExplanation + from pnpxai.core.modality.modality import Modality + from pnpxai.explainers import Lime, KernelShap + from pnpxai.evaluator.metrics import AbPC, Complexity, Metric + from sklearn.cluster import KMeans as SklearnKMeans + from pnpxai.explainers.utils.baselines import BaselineFunction + from pnpxai.explainers.utils.postprocess import NormalizationFunction, minmax + from pnpxai.explainers.base import Tunable + from pnpxai.explainers.types import TunableParameter + + logger.info("Setting up PnPXAI framework...") + + # Define custom dataset + class TabDataset(Dataset): + def __init__(self, inputs, labels): + super().__init__() + self.inputs = inputs + self.labels = labels + + def __len__(self): + return len(self.inputs) + + def __getitem__(self, idx): + return self.inputs[idx], self.labels[idx] + + def collate_fn(batch): + inputs = torch.stack([torch.from_numpy(d[0]) for d in batch]).to(torch.float) + labels = torch.tensor([d[1] for d in batch], dtype=torch.long) + return inputs, labels + + # Create dataset and dataloader + test_dataset = TabDataset(X_test, y_test) + test_loader = DataLoader( + test_dataset, + batch_size=batch_size, + collate_fn=collate_fn, + shuffle=False, + pin_memory=True, + ) + + # Create modality from sample batch + sample_batch = next(iter(test_loader)) + modality = Modality( + dtype=sample_batch[0].dtype, + ndims=sample_batch[0].dim(), + ) + + # Define custom baseline function (KMeans) + bg_data_idx = np.random.choice(len(X_test), size=50, replace=False) + X_bg = X_test[bg_data_idx] + + class KMeans(BaselineFunction, Tunable): + def __init__(self, background_data, n_clusters=8): + self.background_data = background_data + self.n_clusters = TunableParameter( + name='n_clusters', + current_value=n_clusters, + dtype=int, + is_leaf=True, + space={'low': 10, 'high': len(background_data), 'step': 10}, + ) + self.kmeans_ = SklearnKMeans(n_clusters).fit(background_data) + BaselineFunction.__init__(self) + Tunable.__init__(self) + self.register_tunable_params([self.n_clusters]) + + def __call__(self, inputs): + if inputs.ndim == 3: + inputs = inputs.squeeze(1) + cluster_ids = self.kmeans_.predict(inputs.to(torch.float64).numpy()) + cluster_centers = self.kmeans_.cluster_centers_[cluster_ids] + return torch.from_numpy(cluster_centers).float().to(inputs.device) + + # Define custom normalization functions + class Pos(NormalizationFunction): + def __init__(self): + super().__init__() + + def __call__(self, attrs): + return attrs.abs() + + class MinMax(NormalizationFunction): + def __init__(self): + super().__init__() + + def __call__(self, attrs): + return minmax(attrs) / 1000 + + # Define compound metric + class CompoundMetric(Metric): + def __init__( + self, + model, + cmpd_metrics, + weights, + explainer=None, + target_input_keys=None, + additional_input_keys=None, + output_modifier=None, + ): + super().__init__( + model, explainer, target_input_keys, + additional_input_keys, output_modifier, + ) + assert len(cmpd_metrics) == len(weights) + self.cmpd_metrics = cmpd_metrics + self.weights = weights + + def evaluate(self, inputs, targets, attrs): + values = torch.zeros(attrs.size(0)).to(attrs.device) + for weight, metric in zip(self.weights, self.cmpd_metrics): + values += weight * metric.set_explainer(self.explainer).evaluate(inputs, targets, attrs) + return values + + # Create experiment based on model type + logger.info("Creating PnPXAI experiment...") + + if model_type == "tab_resnet": + expr = AutoExplanation( + model=model, + data=test_loader, + modality=modality, + target_input_keys=[0], + target_class_extractor=lambda outputs: outputs.argmax(-1), + label_key='labels', + target_labels=False, + ) + + expr.metrics.delete('morf') + expr.metrics.delete('lerf') + + elif model_type == "xgb": + expr = Experiment( + model=model, + data=test_loader, + modality=modality, + target_input_keys=[0], + target_class_extractor=lambda outputs: outputs.argmax(-1), + label_key=-1, + ) + + # add explainers + expr.explainers.add('kernel_shap', KernelShap) + expr.explainers.add('lime', Lime) + + # add metrics + expr.metrics.add('abpc', AbPC) + + else: + raise ValueError("Invalid model type") + + # Add custom baseline function and default kwargs + expr.modality.util_functions['baseline_fn'].add('kmeans', KMeans) + expr.modality.util_functions['baseline_fn'].add_default_kwargs( + 'background_data', X_bg + ) + + # Add custom normalization functions and complexity metric + expr.modality.util_functions['normalization_fn'].add('pos', Pos) + expr.modality.util_functions['normalization_fn'].add('minmax', MinMax) + expr.metrics.add('cmpx', Complexity) + expr.metrics.add('cmpd', CompoundMetric) + + # Map explainer names + PNP_INV_MAP = { + "kernel_shap": "shap", + "lime": "lime", + "gradient": "grad", + "grad_x_input": "itg", + "integrated_gradients": "ig", + "smooth_grad": "sg", + "lrp_uniform_epsilon": "lrp", + "var_grad": "vg", + } + + explainer_map = { + 'shap': 'kernel_shap', + 'lime': 'lime', + 'grad': 'gradient', + 'itg': 'grad_x_input', + 'ig': 'integrated_gradients', + 'sg': 'smooth_grad', + 'vg': 'var_grad', + 'lrp': 'lrp_uniform_epsilon', + } + + if explainer_name: + pnp_explainer = explainer_map.get(explainer_name, explainer_name) + logger.info(f"Using explainer: {pnp_explainer}") + + # Setup metric options + metric_options = { + 'cmpd_metrics': [ + expr.create_metric('abpc'), + expr.create_metric('cmpx'), + ], + 'weights': [.7, -.3] + } + + # Set direction + direction = 'maximize' + + # Setup disable_tunable_params + disable_tunable_params = {} + if pnp_explainer in ['lime', 'kernel_shap']: + disable_tunable_params['n_samples'] = n_samples + if pnp_explainer in ['integrated_gradients']: + disable_tunable_params['n_steps'] = n_steps + + logger.info("Running hyperparameter optimization...") + opt_results = expr.optimize( + explainer_key=pnp_explainer, + metric_key='cmpd', + metric_options=metric_options, + direction=direction, + disable_tunable_params=disable_tunable_params, + **config['optuna'] + ) + + logger.info(f"Best value: {opt_results.study.best_trial.value:.4f}") + + # Re-set seeds before generating explanations for reproducibility + logger.info("Re-setting random seeds for reproducible explanation generation...") + set_seeds(config['optuna'].get('seed', 42)) + + # Generate explanations + opt_explainer = opt_results.explainer + th_test_input = torch.tensor(test_dataset.inputs, dtype=torch.float32) + targets = model(th_test_input).argmax(-1) + + exp_name = PNP_INV_MAP[pnp_explainer] + + if exp_name in ["shap", "lime"]: + explanations = opt_explainer.attribute(th_test_input, targets)[0].detach().cpu().numpy() + else: + explanations = opt_explainer.attribute(th_test_input, targets).squeeze(1).detach().cpu().numpy() + + else: + raise ValueError("PnPXAI requires explainer name") + + return explanations + + +def explain_with_captum( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + n_samples: int = 25, +) -> np.ndarray: + """Generate explanations using Captum framework.""" + from captum.attr import ( + KernelShap, Lime, IntegratedGradients, Saliency, + InputXGradient, NoiseTunnel, LRP + ) + from captum.attr._utils.lrp_rules import EpsilonRule + + logger.info(f"Setting up Captum framework with {explainer_name}...") + + # Create explainer + if explainer_name == "shap": + explainer = KernelShap(model) + elif explainer_name == "lime": + explainer = Lime(model, interpretable_model=None) + elif explainer_name == "grad": + explainer = Saliency(model) + elif explainer_name == "itg": + explainer = InputXGradient(model) + elif explainer_name == "ig": + explainer = IntegratedGradients(model, multiply_by_inputs=True) + elif explainer_name == "sg": + explainer = NoiseTunnel(Saliency(model)) + elif explainer_name == "lrp": + # Set LRP rules for batch norm layers + if hasattr(model, 'res_blocks'): + for block in model.res_blocks: + if hasattr(block, 'bn'): + block.bn.rule = EpsilonRule() + if hasattr(model, 'bn'): + model.bn.rule = EpsilonRule() + explainer = LRP(model) + else: + raise ValueError(f"Unknown Captum explainer: {explainer_name}") + + # Convert to tensor + X_test_t = torch.tensor(X_test, dtype=torch.float32) + targets = model(X_test_t).argmax(dim=1) + + # Generate explanations + logger.info("Generating explanations...") + + if explainer_name == "grad": + explanations = explainer.attribute(X_test_t, target=targets, abs=False) + explanations = explanations.detach().numpy() + + elif explainer_name == "sg": + explanations = explainer.attribute(X_test_t, target=targets, nt_type='smoothgrad') + explanations = explanations.detach().numpy() + + elif explainer_name in ("shap", "lime"): + # Process in batches to avoid memory issues + attrs_list = [] + for i in tqdm(range(len(X_test_t)), desc="Explaining"): + input_i = X_test_t[i].unsqueeze(0) + attr_i = explainer.attribute(input_i, target=targets[i], n_samples=n_samples) + attrs_list.append(attr_i.detach().cpu().numpy()) + explanations = np.concatenate(attrs_list, axis=0) + + else: + explanations = explainer.attribute(X_test_t, target=targets) + explanations = explanations.detach().numpy() + + return explanations + + +def explain_with_omnixai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + feature_metadata: Dict[str, Any], + raw_data, + n_samples: int = 64, +) -> np.ndarray: + """Generate explanations using OmniXAI framework.""" + from omnixai.data.tabular import Tabular + from omnixai.explainers.tabular import TabularExplainer + + logger.info(f"Setting up OmniXAI framework with {explainer_name}...") + + NAME_MAP = { + "lime": "LimeTabular", + "shap": "ShapTabular" + } + + explainer_nm = NAME_MAP[explainer_name] + + # Prepare training data + raw_data = raw_data.fillna("missing") + categorical_columns = [c for c in raw_data.columns if feature_metadata[c]["type"] == "categorical"] + train_data = Tabular(raw_data, categorical_columns=categorical_columns) + + # Get target function from wrapped model + if hasattr(model, 'xgb_model'): + target_function = model.xgb_model.predict_proba + else: + raise ValueError("OmniXAI requires XGBoost model") + + # Create transformation functions + transform_fn = functools.partial(transform, feature_metadata=feature_metadata) + + def prep(z): + return transform_fn(z.data.fillna("missing")) + + # Create explainer + explainer = TabularExplainer( + explainers=[explainer_nm], + mode="classification", + data=train_data, + model=target_function, + preprocess=prep, + ) + + # Prepare test instances + test_instances = invert_transform(X_test, feature_metadata).fillna("missing") + + # Set parameters + params = { + "LimeTabular": {"num_features": raw_data.shape[1], "num_samples": n_samples}, + "ShapTabular": {"nsamples": n_samples} + } + + # Generate explanations + logger.info("Generating explanations...") + exp_obj = explainer.explain(test_instances, params=params) + + # Extract and reorder scores + scores = [] + for i in range(test_instances.shape[0]): + exp = exp_obj[explainer_nm].get_explanations(i) + sorted_idx = find_idx(exp['features'], exp['instance'].columns.tolist()) + scores.append([exp['scores'][j] for j in sorted_idx]) + + explanations = np.array(scores) + return explanations + + +def explain_with_openxai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + X_train: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + feature_metadata: Dict[str, Any], + batch_size: int = 32, + n_samples: int = 64, +) -> np.ndarray: + """Generate explanations using OpenXAI framework.""" + from torch.utils.data import DataLoader, TensorDataset + from openxai import Explainer + from openxai.experiment_utils import fill_param_dict + + logger.info(f"Setting up OpenXAI framework with {explainer_name}...") + + # Convert to tensors + test_input = torch.tensor(X_test, dtype=torch.float32) + train_input = None + explainer_params = {} + + # Setup training data for LIME/IG + if explainer_name in ['lime', 'ig']: + train_input = torch.tensor(X_train, dtype=torch.float32) + explainer_params = fill_param_dict(explainer_name, {}, train_input) + + if explainer_name in ['lime', 'shap']: + explainer_params['n_samples'] = n_samples + + # Create explainer + explainer = Explainer(method=explainer_name, model=model, param_dict=explainer_params) + + # Get predictions + predicted_labels = model(test_input).detach().argmax(dim=1) + + # Create data loader + dataset = TensorDataset(test_input, predicted_labels) + data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False) + + # Generate explanations + logger.info("Generating explanations...") + all_explanations = [] + for batch_inputs, batch_labels in tqdm(data_loader, desc="Explaining batches"): + batch_explanations = explainer.get_explanations(batch_inputs, label=batch_labels) + all_explanations.append(batch_explanations) + + # Combine batches + combined_explanations = torch.cat(all_explanations, dim=0) + + # Aggregate categorical features + processed_explanations = [] + for feature_name, feature_info in feature_metadata.items(): + if feature_info['type'] == 'categorical': + feature_index = feature_info['index'] + onehot_encoded = test_input[:, feature_index] + explanation_values = combined_explanations[:, feature_index] + categorical_explanation = (onehot_encoded * explanation_values).sum(dim=1) + processed_explanations.append(categorical_explanation) + else: + feature_index = feature_info['index'] + processed_explanations.append(combined_explanations[:, feature_index]) + + explanations = torch.stack(processed_explanations, dim=1).detach().numpy() + return explanations + + +class PyTorchModelWrapper: + """Wrapper for PyTorch models to add predict and predict_proba methods for AutoXAI.""" + + def __init__(self, pytorch_model): + self.model = pytorch_model + self.model.eval() + + def predict(self, X): + """Predict class labels for samples.""" + if not isinstance(X, torch.Tensor): + X = torch.FloatTensor(X) + + with torch.no_grad(): + logits = self.model(X) + predictions = torch.argmax(logits, dim=1) + + return predictions.cpu().numpy() + + def predict_proba(self, X): + """Predict class probabilities for samples.""" + if not isinstance(X, torch.Tensor): + X = torch.FloatTensor(X) + + with torch.no_grad(): + logits = self.model(X) + probas = torch.softmax(logits, dim=1) + + return probas.cpu().numpy() + + +def explain_with_autoxai( + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + explainer_name: str, + config: Dict[str, Any], + logger: logging.Logger, + raw_data, + batch_size: int = 32, + n_samples: int = 64, +) -> np.ndarray: + """Generate explanations using AutoXAI framework.""" + import glob + + # Add AutoXAI virtual environment to sys.path + autoxai_venv = "/opt/autoxai_venv" + if os.path.exists(autoxai_venv): + site_packages = glob.glob(f"{autoxai_venv}/lib/python*/site-packages") + if site_packages: + sys.path.insert(0, site_packages[0]) + + autoxai_path = os.path.join(os.path.dirname(__file__), "lib", "AutoXAI") + sys.path.insert(0, autoxai_path) + from hyperparameters_optimization import get_parameters + from XAI_solutions import set_up_explainer, get_local_exp + + logger.info(f"Setting up AutoXAI framework with {explainer_name}...") + + AUTOXAI_NAME_MAP = {"shap": "SHAP", "lime": "LIME"} + autoxai_nm = AUTOXAI_NAME_MAP[explainer_name] + + bg_size = min(50, len(X_test)) + + # Prepare model for AutoXAI + if isinstance(model, TorchModelForXGBoost): + unwrapped_model = model.xgb_model + elif isinstance(model, nn.Module): + unwrapped_model = PyTorchModelWrapper(model) + else: + unwrapped_model = model + + # Setup context + properties_list = ["robustness", "fidelity", "conciseness"] + context = {} + rand_idx = np.random.randint(0, X_test.shape[0], bg_size) + context["X"] = X_test[rand_idx] + context["y"] = y_test[rand_idx] + context["feature_names"] = list(raw_data.columns) + context["verbose"] = False + context["task"] = "classification" + context["question"] = "Why" + context["session_id"] = f"_{bg_size}_wine" + context["scaling"] = "Std" + context["weights"] = [1, 2, 0.5] + context["distance"] = "cosine" + context["explanations"] = [] + context["model"] = unwrapped_model + context["ES"] = True + context["IS"] = True + + score_hist = { + "xai_sol": [], "epoch": [], "aggregated_score": [], + "parameters": [], "robustness": [], "scaled_robustness": [], + "fidelity": [], "scaled_fidelity": [], + "conciseness": [], "scaled_conciseness": [] + } + + # Get default parameters + logger.info("Preparing AutoXAI explainer with default parameters...") + default_parameters = get_parameters( + autoxai_nm, score_hist, "default", properties_list, context) + + # Setup explainer + context['explainer'] = set_up_explainer(autoxai_nm, default_parameters, context) + + # Generate explanations for all test samples + logger.info("Generating explanations...") + explanations = np.zeros_like(X_test) + for i in tqdm(range(len(X_test)), desc="Explaining"): + e = get_local_exp(autoxai_nm, X_test[i], default_parameters, context) + idx = default_parameters["most_influent_features"] + explanations[i, idx] = e + + return explanations + + +def evaluate_explanations( + explanations: np.ndarray, + model: nn.Module, + X_test: np.ndarray, + y_test: np.ndarray, + logger: logging.Logger, + batch_size: int = 32, +) -> Dict[str, np.ndarray]: + """Evaluate explanations using PnPXAI metrics.""" + from torch.utils.data import DataLoader, TensorDataset + from pnpxai import Experiment + from pnpxai.core.modality.modality import Modality + from pnpxai.explainers import KernelShap + from pnpxai.evaluator.metrics import AbPC, Complexity, Metric + + logger.info("Evaluating explanations...") + + # Create dataloader + test_dataset = TensorDataset( + torch.tensor(X_test, dtype=torch.float32), + torch.tensor(y_test, dtype=torch.long) + ) + test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) + + # Create experiment for metrics + sample_batch = next(iter(test_loader)) + modality = Modality( + dtype=sample_batch[0].dtype, + ndims=sample_batch[0].dim(), + ) + + expr = Experiment( + model=model, + data=test_loader, + modality=modality, + target_input_keys=[0], + target_class_extractor=lambda outputs: outputs.argmax(-1), + label_key=-1, + ) + + # Add explainers + expr.explainers.add('kernel_shap', KernelShap) + + # Add metrics + expr.metrics.add('abpc', AbPC) + expr.metrics.add('cmpx', Complexity) + + # Compound metric + class CompoundMetric(Metric): + def __init__(self, model, cmpd_metrics, weights, explainer=None, + target_input_keys=None, additional_input_keys=None, output_modifier=None): + super().__init__(model, explainer, target_input_keys, additional_input_keys, output_modifier) + self.cmpd_metrics = cmpd_metrics + self.weights = weights + + def evaluate(self, inputs, targets, attrs): + values = torch.zeros(attrs.size(0)).to(attrs.device) + for weight, metric in zip(self.weights, self.cmpd_metrics): + values += weight * metric.set_explainer(self.explainer).evaluate(inputs, targets, attrs) + return values + + expr.metrics.add('cmpd', CompoundMetric) + + # Create dummy explainer for evaluation + dummy_explainer = expr.create_explainer('kernel_shap') + + # Evaluate each metric + results = {} + X_test_t = torch.tensor(X_test, dtype=torch.float32) + explanations_t = torch.tensor(explanations, dtype=torch.float32) + + for metric_name in ['abpc', 'cmpx']: + metric = expr.create_metric(metric_name) + metric_values = [] + + for i in range(len(X_test)): + inputs = {0: X_test_t[i].unsqueeze(0)} + targets = model(inputs[0]).argmax(-1) + attrs = explanations_t[i].unsqueeze(0) + + value = metric.set_explainer(dummy_explainer).evaluate(inputs, targets, attrs) + metric_values.append(value.item()) + + results[metric_name] = np.array(metric_values) + + # Compound metric + metric_options = { + 'cmpd_metrics': [expr.create_metric('abpc'), expr.create_metric('cmpx')], + 'weights': [0.7, -0.3] + } + cmpd_metric = expr.create_metric('cmpd', **metric_options) + cmpd_values = [] + + for i in range(len(X_test)): + inputs = {0: X_test_t[i].unsqueeze(0)} + targets = model(inputs[0]).argmax(-1) + attrs = explanations_t[i].unsqueeze(0) + + value = cmpd_metric.set_explainer(dummy_explainer).evaluate(inputs, targets, attrs) + cmpd_values.append(value.item()) + + results['cmpd'] = np.array(cmpd_values) + + # Log summary + for metric_name, values in results.items(): + logger.info(f" {metric_name.upper()}: {values.mean():.4f} ± {values.std():.4f}") + + return results + + def run_single_explanation(framework: str, model_name: str, explainer: str, model: nn.Module, X_test: np.ndarray, y_test: np.ndarray, X_train: np.ndarray, feature_metadata: Dict, raw_data: pd.DataFrame, @@ -227,13 +927,6 @@ def run_single_explanation(framework: str, model_name: str, explainer: str, """Run a single explanation and evaluation.""" logger.info(f"Generating {framework}/{model_name}/{explainer} explanations...") - # Import explain_wine_quality module - sys.path.insert(0, str(Path(__file__).parent / "experiments" / "scripts" / "wine_quality")) - from explain_wine_quality import ( - explain_with_pnpxai, explain_with_captum, explain_with_omnixai, - explain_with_openxai, explain_with_autoxai, evaluate_explanations - ) - # Generate explanations based on framework if framework == "pnpxai": explanations = explain_with_pnpxai( diff --git a/experiments/scripts/wine_quality/README.md b/experiments/scripts/wine_quality/README.md deleted file mode 100644 index 8b1d337..0000000 --- a/experiments/scripts/wine_quality/README.md +++ /dev/null @@ -1,465 +0,0 @@ -# Wine Quality XAI Experiments - -This module provides a comprehensive framework for analyzing Wine Quality predictions using various Explainable AI (XAI) methods. The experiments compare multiple XAI frameworks across different models and explainers, with automated hyperparameter optimization. - -## 📋 Table of Contents - -- [Overview](#overview) -- [Wine Quality Dataset](#wine-quality-dataset) -- [Experimental Setup](#experimental-setup) -- [Usage](#usage) -- [Project Structure](#project-structure) -- [Results](#results) - ---- - -## Overview - -This project implements a benchmarking framework for comparing explainability methods on the Wine Quality dataset. It supports: - -- **5 XAI Frameworks**: PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI -- **2 Models**: XGBoost, TabResNet (Residual Network for Tabular Data) -- **8 Explainer Methods**: LIME, SHAP, Integrated Gradients, Gradient (Saliency), SmoothGrad, Input×Gradient, VarGrad, LRP -- **Automated Evaluation**: ABPC (Correctness), Complexity, Compounded metrics -- **Hyperparameter Optimization**: Optuna-based grid search with 25 trials - -The goal is to optimize explanations for various model and explainer combinations, finding the best hyperparameters that maximize explanation quality metrics. - ---- - -## Wine Quality Dataset - -### Dataset Characteristics - -- **Source**: UCI Machine Learning Repository (ID: 186) -- **Samples**: 6,497 total (5,197 train, 1,300 test) -- **Features**: 11 numerical features - - Fixed acidity - - Volatile acidity - - Citric acid - - Residual sugar - - Chlorides - - Free sulfur dioxide - - Total sulfur dioxide - - Density - - pH - - Sulphates - - Alcohol -- **Target**: Binary classification - - Class 1: Quality scores 7, 8, or 9 (high quality) - - Class 0: Quality scores ≤ 6 (standard quality) -- **Preprocessing**: - - StandardScaler for numerical features - - Stratified train/test split (80/20) - - Random seed: 42 for reproducibility - -### Dataset Download - -The dataset is automatically downloaded and preprocessed using the UCI ML Repository API: - -```bash -# Inside the container or with proper Python environment -cd data/Wine\ Quality/ -python preprocess.py -``` - -This will: -1. Download the Wine Quality dataset from UCI (ID: 186) -2. Apply feature preprocessing (StandardScaler) -3. Create binary classification labels (quality >= 7 → 1) -4. Split into train/test sets (stratified, 80/20) -5. Save processed data as `.npy` files and metadata as `.pkl` - -**Output Files:** -- `raw_data.csv` - Original dataset -- `X_train.npy`, `X_test.npy` - Normalized features -- `y_train.npy`, `y_test.npy` - Binary labels -- `feature_metadata.pkl` - Feature encoders and metadata -- `xgb_model.json` - Trained XGBoost model -- `resnet_model.pth` - Trained TabResNet model - ---- - -## Experimental Setup - -### Supported Models - -1. **XGBoost Classifier** (`xgb`) - - Standard gradient boosting classifier - - Compatible with: PnPXAI, Captum, OmniXAI, AutoXAI - - Explainers: LIME, SHAP (model-agnostic methods) - -2. **TabResNet** (`tab_resnet`) - - Residual network architecture for tabular data - - Compatible with: PnPXAI, Captum, OpenXAI, AutoXAI - - Explainers: All 8 methods (including gradient-based) - -### Supported XAI Frameworks - -| Framework | XGBoost | TabResNet | Explainers | Special Features | -|-----------|---------|-----------|------------|------------------| -| **PnPXAI** | ✅ | ✅ | All 8 + HPO | Hyperparameter optimization with Optuna | -| **Captum** | ✅ | ✅ | 7 (no VG) | PyTorch native implementation | -| **OmniXAI** | ✅ | ❌ | LIME, SHAP | XGBoost only | -| **OpenXAI** | ❌ | ✅ | 6 (no LRP/VG) | TabResNet only | -| **AutoXAI** | ✅ | ✅ | LIME, SHAP | Legacy HPO framework (separate venv) | - -### Explainer Methods - -- **Model-Agnostic**: - - `lime`: Local Interpretable Model-agnostic Explanations - - `shap`: SHapley Additive exPlanations (KernelSHAP) - -- **Gradient-Based** (PyTorch models only): - - `grad`: Gradient (Saliency) - - `itg`: Input × Gradient - - `ig`: Integrated Gradients - - `sg`: SmoothGrad - - `vg`: VarGrad (PnPXAI only) - - `lrp`: Layer-wise Relevance Propagation - -### Experimental Objectives - -The experiments aim to: - -1. **Optimize Explanations**: Use hyperparameter optimization (HPO) to find the best explanation parameters for each model/explainer combination -2. **Compare Frameworks**: Evaluate explanation quality across different XAI frameworks -3. **Evaluate Quality**: Measure explanations using: - - **ABPC (Correctness)**: How accurately the explanation reflects the model's behavior - - **Complexity**: Sparsity and interpretability of explanations (lower is better) - - **Compounded**: Combined score (0.7 × ABPC - 0.3 × Complexity) -4. **Enable Reproducibility**: All experiments use fixed random seeds and Docker containers - -### Configuration Files - -All experimental settings are managed via YAML configuration files in `configs/tabular/`: - -- `dataset_config.yaml`: Dataset settings (UCI ID, split ratio, target info) -- `model_config.yaml`: Model hyperparameters and training seeds -- `explainer_config.yaml`: Framework-specific explainer parameters -- `optuna_config.yaml`: HPO settings (grid search sampler, 25 trials) - ---- - -## Usage - -### Docker Setup - -This module requires the Wine Quality Docker image. From the project root: - -#### Option 1: Using Docker Compose (Recommended) - -```bash -# Build the wine quality experiment image -docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . - -# Start the container with all volumes mounted -docker-compose -f docker-compose.wine_quality.yml up -d - -# Enter the container -docker exec -it pnpxai-wine-quality bash - -# Stop the container when done -docker-compose -f docker-compose.wine_quality.yml down -``` - -#### Option 2: Using Docker Run - -```bash -# Build the wine quality experiment image -docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . - -# Run with GPU support and volume mount -docker run --rm -it \ - --runtime=nvidia \ - --gpus all \ - --shm-size=8g \ - -v $(pwd):/root/pnpxai-experiments \ - --name pnpxai-wine-quality \ - pnpxai_wine_quality:latest \ - /bin/bash -``` - -### Quick Start - -The main entry point is `analyze_wine_quality.py`, which replaces the old shell scripts with a pure Python implementation: - -```bash -# Inside the Docker container, from /root/pnpxai-experiments/ - -# Run all experiments (all models, frameworks, explainers) -python -m experiments.scripts.wine_quality.analyze_wine_quality - -# Dry run to see what will be executed -python -m experiments.scripts.wine_quality.analyze_wine_quality --dry-run - -# Continue from where you left off (skip completed experiments) -python -m experiments.scripts.wine_quality.analyze_wine_quality --continue - -# Run with verbose logging -python -m experiments.scripts.wine_quality.analyze_wine_quality --verbose -``` - -### Running Specific Combinations - -```bash -# Run only PnPXAI framework -python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai - -# Run only XGBoost model -python -m experiments.scripts.wine_quality.analyze_wine_quality --model xgb - -# Run only SHAP explainer -python -m experiments.scripts.wine_quality.analyze_wine_quality --explainer shap - -# Combine filters: PnPXAI + XGBoost + SHAP -python -m experiments.scripts.wine_quality.analyze_wine_quality \ - --framework pnpxai \ - --model xgb \ - --explainer shap \ - --verbose -``` - -### Running Individual Experiments - -You can also run individual experiments directly: - -```bash -# Run a single experiment with PnPXAI -python -m experiments.scripts.wine_quality.explain_wine_quality \ - --framework pnpxai \ - --model xgb \ - --explainer shap \ - --verbose - -# Run with Captum and TabResNet -python -m experiments.scripts.wine_quality.explain_wine_quality \ - --framework captum \ - --model tab_resnet \ - --explainer ig \ - --verbose -``` - -### Command-Line Options - -**For `analyze_wine_quality.py` (batch runner):** -- `--dry-run`: Show commands without executing -- `--continue`: Skip already completed experiments -- `--verbose`: Enable verbose logging -- `--framework [FRAMEWORKS...]`: Filter by framework(s) -- `--model [MODELS...]`: Filter by model(s) -- `--explainer [EXPLAINERS...]`: Filter by explainer(s) - -**For `explain_wine_quality.py` (individual experiments):** -- `--framework {pnpxai|captum|omnixai|openxai|autoxai}`: XAI framework (required) -- `--model {xgb|tab_resnet}`: Model type (required) -- `--explainer {lime|shap|ig|grad|sg|itg|vg|lrp}`: Explainer method (required) -- `--data-dir PATH`: Data directory (default: `data/Wine Quality`) -- `--config-dir PATH`: Config directory (default: `configs/tabular`) -- `--output PATH`: Custom output directory -- `--seed INT`: Random seed (default: 42) -- `--batch-size INT`: Batch size (default: 32) -- `--n-samples INT`: Samples for LIME/SHAP (default: 64) -- `--max-test-samples INT`: Limit test set size -- `--verbose`: Enable verbose logging - -### All Valid Combinations - -The `analyze_wine_quality.py` script automatically runs all valid framework/model/explainer combinations: - -**PnPXAI** (10 combinations): -- `xgb`: lime, shap -- `tab_resnet`: lime, shap, grad, itg, ig, sg, vg, lrp - -**Captum** (9 combinations): -- `xgb`: lime, shap -- `tab_resnet`: lime, shap, grad, itg, ig, sg, lrp - -**OmniXAI** (2 combinations): -- `xgb`: lime, shap - -**OpenXAI** (6 combinations): -- `tab_resnet`: lime, shap, grad, itg, ig, sg - -**AutoXAI** (4 combinations): -- `xgb`: lime, shap -- `tab_resnet`: lime, shap - -**Total: 31 experiment combinations** - ---- - -## Project Structure - -``` -wine_quality/ -├── analyze_wine_quality.py # Batch experiment runner -├── explain_wine_quality.py # Single experiment script -├── script_utils/ # Utility modules -│ ├── data_utils.py # Data loading and transformation -│ ├── model_utils.py # Model loading and wrapping -│ └── explainer_factory.py # Explainer validation -└── lib/ # AutoXAI library -``` - -### Utility Modules - -**`script_utils/data_utils.py`:** -- `load_wine_quality()`: Load preprocessed data and metadata -- `transform()`: Apply feature transformations -- `invert_transform()`: Reverse transformations for interpretation -- `find_idx()`: Find feature index mappings - -**`script_utils/model_utils.py`:** -- `TorchModelForXGBoost`: XGBoost to PyTorch wrapper -- `load_model()`: Priority-based model loading (local → HF Hub) -- `wrap_model_for_pytorch()`: Framework-agnostic model wrapping - -**`script_utils/explainer_factory.py`:** -- `ExplainerConfig`: Framework compatibility matrix -- `validate_explainer_args()`: Validate framework/model/explainer combinations -- `get_framework_specific_name()`: Map explainer names across frameworks - ---- - -## Results - -### Output Structure - -Each experiment generates outputs in `results/Wine Quality/{model}/{framework}/{explainer}/`: - -- **`explanations.npy`**: Attribution values (n_samples × n_features) - - Importance scores for each feature for each test sample - - Higher absolute values indicate more important features - -- **`abpc.npy`**: ABPC (Area Between Perturbation Curves) scores - - Measures correctness of explanations - - Higher values indicate better explanations - -- **`cmpx.npy`**: Complexity scores - - Measures sparsity/simplicity of explanations - - Lower values indicate simpler, more interpretable explanations - -- **`cmpd.npy`**: Compounded scores - - Combined metric: 0.7 × ABPC - 0.3 × Complexity - - Balances correctness and simplicity - -- **`metadata.json`**: Execution metadata - - Framework, model, explainer details - - Hyperparameters used - - Execution time - - Data shapes - - Metric summaries (mean, std) - -### Visualizing Results - -After running experiments, you can aggregate and visualize all results using the visualization script: - -```bash -# Inside the container, from /root/pnpxai-experiments/ - -# Display results as markdown table -python -m experiments.scripts.wine_quality.visualize_results - -# Save results to CSV file -python -m experiments.scripts.wine_quality.visualize_results --output csv > wine_quality_results.csv - -# Save results to JSON file -python -m experiments.scripts.wine_quality.visualize_results --output json > wine_quality_results.json - -# Use custom results directory -python -m experiments.scripts.wine_quality.visualize_results --results-dir "path/to/results" - -# Enable verbose output with detailed statistics -python -m experiments.scripts.wine_quality.visualize_results --verbose - -# Customize decimal precision (default: 4) -python -m experiments.scripts.wine_quality.visualize_results --round 3 -``` - -**Output Format:** - -The visualization script creates a comprehensive table showing: -- **Models**: XGBoost and Resnet (TabResNet) -- **Metrics**: Correctness (ABPC), Complexity, Compounded -- **Explainers**: All tested explanation methods (LIME, SHAP, Gradient-based, etc.) -- **Frameworks**: Results for each framework (PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI) - -Example output: -``` -| | model | metric | explainer | PnPXAI | Captum | AutoXAI | OmniXAI | OpenXAI | -|---:|:--------|:------------|:---------------------|---------:|---------:|----------:|----------:|----------:| -| 0 | XGBoost | Correctness | KernelSHAP | 0.1424 | 0.0546 | -0.0023 | 0.0749 | nan | -| 1 | XGBoost | Correctness | LIME | 0.1290 | 0.0557 | -0.0130 | 0.0461 | nan | -... -``` - -**Visualization Options:** -- `--results-dir PATH`: Specify custom results directory (default: `results/Wine Quality`) -- `--output {markdown|csv|json}`: Choose output format (default: markdown) -- `--round N`: Number of decimal places (default: 4) -- `--verbose`: Show detailed collection statistics - -### Checking Progress - -```bash -# Inside the container -ls -R results/Wine\ Quality/ - -# Count completed experiments -find results/Wine\ Quality/ -name "explanations.npy" | wc -l - -# Quick summary of all results -python -m experiments.scripts.wine_quality.visualize_results -``` - -### Interpreting Results - -- **Best Explanation**: Highest compounded score (balances correctness and simplicity) -- **Most Accurate**: Highest ABPC score (may be complex) -- **Simplest**: Lowest complexity score (may sacrifice accuracy) - -Compare metrics across different framework/model/explainer combinations to find the optimal explanation method for your use case. - ---- - -## Troubleshooting - -### Common Issues - -1. **Module not found errors**: - - Ensure you're running commands from `/root/pnpxai-experiments` in the Docker container - - Check that volume mount is working: `ls -la /root/pnpxai-experiments` - -2. **AutoXAI import errors**: - - AutoXAI uses a separate virtual environment due to numpy version conflicts - - The `analyze_wine_quality.py` script automatically handles this - -3. **CUDA out of memory**: - - Reduce batch size: `--batch-size 16` - - Limit test samples: `--max-test-samples 100` - -4. **Invalid framework/model/explainer combination**: - - Check the compatibility matrix in [Supported XAI Frameworks](#supported-xai-frameworks) - - The script validates combinations and raises errors for invalid inputs - -5. **Results not saving**: - - Check Docker volume mount permissions - - Ensure sufficient disk space - - Verify results directory exists: `mkdir -p results/Wine\ Quality` - ---- - -## Citation - -### Dataset Citation - -```bibtex -@misc{cortez2009wine, - author={Cortez, Paulo and Cerdeira, A. and Almeida, F. and Matos, T. and Reis, J.}, - title={Wine Quality}, - year={2009}, - howpublished={UCI Machine Learning Repository}, - note={DOI: 10.24432/C56S3T} -} -``` diff --git a/experiments/scripts/wine_quality/__init__.py b/experiments/scripts/wine_quality/__init__.py deleted file mode 100644 index e98d8c6..0000000 --- a/experiments/scripts/wine_quality/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Wine Quality Explanation Experiment Module - -This module provides tools for generating and analyzing explanations -for Wine Quality predictions using various XAI frameworks. -""" diff --git a/experiments/scripts/wine_quality/analyze_wine_quality.py b/experiments/scripts/wine_quality/analyze_wine_quality.py deleted file mode 100755 index 8074f0a..0000000 --- a/experiments/scripts/wine_quality/analyze_wine_quality.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/env python3 -""" -Wine Quality Analysis Script - -Run all experiments for Wine Quality dataset with various model/framework/explainer combinations. -This script replaces the shell scripts (run_all_experiments.sh and run_explain.sh) with a pure Python implementation. - -Usage: - python -m experiments.scripts.wine_quality.analyze_wine_quality [OPTIONS] - -Options: - --dry-run: Show commands without executing - --continue: Skip already completed experiments - --framework: Run only specific framework(s) - --model: Run only specific model(s) - --explainer: Run only specific explainer(s) - --verbose: Enable verbose logging - -Examples: - # Run all experiments - python -m experiments.scripts.wine_quality.analyze_wine_quality - - # Dry run to see what will be executed - python -m experiments.scripts.wine_quality.analyze_wine_quality --dry-run - - # Continue from where you left off - python -m experiments.scripts.wine_quality.analyze_wine_quality --continue - - # Run specific combination - python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai --model xgb --explainer shap -""" - -import os -import sys -import argparse -import subprocess -from pathlib import Path -from datetime import datetime -from typing import Dict, List, Tuple -import time - -# Color codes for terminal output -class Colors: - RED = '\033[0;31m' - GREEN = '\033[0;32m' - YELLOW = '\033[1;33m' - BLUE = '\033[0;34m' - NC = '\033[0m' # No Color - - -# Define experiment combinations -# Framework -> Model -> Explainers -EXPERIMENTS: Dict[str, Dict[str, List[str]]] = { - "pnpxai": { - "xgb": ["lime", "shap"], - "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg", "vg", "lrp"], - }, - "captum": { - "xgb": ["lime", "shap"], - "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg", "lrp"], - }, - "omnixai": { - "xgb": ["lime", "shap"], - }, - "openxai": { - "tab_resnet": ["lime", "shap", "grad", "itg", "ig", "sg"], - }, - "autoxai": { - "xgb": ["lime", "shap"], - "tab_resnet": ["lime", "shap"], - }, -} - - -def print_colored(message: str, color: str = Colors.NC): - """Print colored message to terminal.""" - print(f"{color}{message}{Colors.NC}") - - -def check_if_completed(model: str, framework: str, explainer: str, results_dir: str = "results/Wine Quality") -> bool: - """ - Check if an experiment has already been completed. - - Args: - model: Model name (xgb, tab_resnet) - framework: Framework name (pnpxai, captum, etc.) - explainer: Explainer name (lime, shap, etc.) - results_dir: Base results directory - - Returns: - True if experiment is completed, False otherwise - """ - result_path = Path(results_dir) / model / framework / explainer / "explanations.npy" - return result_path.exists() - - -def run_single_experiment( - model: str, - framework: str, - explainer: str, - dry_run: bool = False, - verbose: bool = False, - continue_mode: bool = False, - n_samples: int = 25, -) -> Tuple[bool, float]: - """ - Run a single experiment with specified model/framework/explainer combination. - - Args: - model: Model name - framework: Framework name - explainer: Explainer name - dry_run: If True, only print command without executing - verbose: Enable verbose logging - continue_mode: Skip if already completed - - Returns: - Tuple of (success: bool, elapsed_time: float) - """ - print_colored("="*50, Colors.BLUE) - print_colored(f"Experiment: Model={model}, Framework={framework}, Explainer={explainer}", Colors.BLUE) - print_colored("="*50, Colors.BLUE) - - # Check if already completed in continue mode - if continue_mode and check_if_completed(model, framework, explainer): - print_colored("⊗ Already completed. Skipping.", Colors.YELLOW) - print() - return True, 0.0 - - # Build command - # Use explain_wine_quality directly since we're already in the wine_quality module - cmd = [ - sys.executable, "-m", "experiments.scripts.wine_quality.explain_wine_quality", - "--framework", framework, - "--model", model, - "--explainer", explainer, - "--n-samples", str(n_samples), - ] - - if verbose: - cmd.append("--verbose") - - if dry_run: - print_colored(f"[DRY RUN] {' '.join(cmd)}", Colors.YELLOW) - print() - return True, 0.0 - - # Execute command - start_time = time.time() - - # For autoxai, we need to use the special virtual environment - env = os.environ.copy() - if framework == "autoxai": - print_colored("Running AutoXAI with dedicated virtual environment...", Colors.YELLOW) - # Set the virtual environment's Python interpreter - venv_python = "/opt/autoxai_venv/bin/python" - if os.path.exists(venv_python): - cmd[0] = venv_python - else: - print_colored(f"Warning: AutoXAI venv not found at {venv_python}, using default Python", Colors.YELLOW) - - try: - result = subprocess.run( - cmd, - check=True, - env=env, - cwd=Path.cwd(), - ) - elapsed_time = time.time() - start_time - print_colored("✓ Experiment completed", Colors.GREEN) - print() - return True, elapsed_time - - except subprocess.CalledProcessError as e: - elapsed_time = time.time() - start_time - print_colored(f"✗ Experiment failed: {e}", Colors.RED) - print_colored("Continuing with next experiment...", Colors.YELLOW) - print() - return False, elapsed_time - - -def count_total_experiments( - experiments: Dict[str, Dict[str, List[str]]], - framework_filter: List[str] = None, - model_filter: List[str] = None, - explainer_filter: List[str] = None, -) -> int: - """Count total number of experiments to run.""" - total = 0 - for framework, models in experiments.items(): - if framework_filter and framework not in framework_filter: - continue - for model, explainers in models.items(): - if model_filter and model not in model_filter: - continue - for explainer in explainers: - if explainer_filter and explainer not in explainer_filter: - continue - total += 1 - return total - - -def run_all_experiments( - dry_run: bool = False, - continue_mode: bool = False, - verbose: bool = False, - framework_filter: List[str] = None, - model_filter: List[str] = None, - explainer_filter: List[str] = None, - n_samples: int = 25, -): - """ - Run all experiment combinations. - - Args: - dry_run: If True, only print commands without executing - continue_mode: Skip already completed experiments - verbose: Enable verbose logging - framework_filter: Only run these frameworks (None = all) - model_filter: Only run these models (None = all) - explainer_filter: Only run these explainers (None = all) - """ - print() - print_colored("="*70, Colors.BLUE) - print_colored("Wine Quality Analysis - Running All Experiments", Colors.BLUE) - print_colored("="*70, Colors.BLUE) - print() - - if dry_run: - print_colored("⚠ DRY RUN MODE: Commands will be printed but not executed", Colors.YELLOW) - print() - - if continue_mode: - print_colored("⚠ CONTINUE MODE: Already completed experiments will be skipped", Colors.YELLOW) - print() - - # Count total experiments - total_experiments = count_total_experiments( - EXPERIMENTS, framework_filter, model_filter, explainer_filter - ) - print_colored(f"Total experiments to run: {total_experiments}", Colors.GREEN) - print() - - # Track progress - completed = 0 - succeeded = 0 - failed = 0 - skipped = 0 - total_time = 0.0 - - start_time = time.time() - - # Run experiments - for framework, models in EXPERIMENTS.items(): - if framework_filter and framework not in framework_filter: - continue - - for model, explainers in models.items(): - if model_filter and model not in model_filter: - continue - - for explainer in explainers: - if explainer_filter and explainer not in explainer_filter: - continue - - # Check if skipping in continue mode - if continue_mode and check_if_completed(model, framework, explainer): - skipped += 1 - - # Run experiment - success, elapsed = run_single_experiment( - model, framework, explainer, - dry_run=dry_run, - verbose=verbose, - continue_mode=continue_mode, - n_samples=n_samples, - ) - - completed += 1 - total_time += elapsed - - if success: - succeeded += 1 - else: - failed += 1 - - # Print progress - if not dry_run: - print_colored(f"Progress: {completed}/{total_experiments} completed", Colors.GREEN) - if continue_mode and skipped > 0: - print_colored(f"({skipped} skipped)", Colors.YELLOW) - print() - - # Print summary - total_elapsed = time.time() - start_time - minutes = int(total_elapsed // 60) - seconds = int(total_elapsed % 60) - - print() - print_colored("="*70, Colors.BLUE) - print_colored("All Experiments Completed!", Colors.BLUE) - print_colored("="*70, Colors.BLUE) - print_colored(f"Total execution time: {minutes}m {seconds}s", Colors.GREEN) - - if not dry_run: - print_colored(f"Succeeded: {succeeded}", Colors.GREEN) - if failed > 0: - print_colored(f"Failed: {failed}", Colors.RED) - if skipped > 0: - print_colored(f"Skipped: {skipped}", Colors.YELLOW) - - print() - print_colored("Results are saved in: results/Wine Quality/", Colors.BLUE) - print() - - -def main(): - """Main entry point.""" - parser = argparse.ArgumentParser( - description="Run Wine Quality explanation experiments", - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - # Run all experiments - python -m experiments.scripts.wine_quality.analyze_wine_quality - - # Dry run to see what will be executed - python -m experiments.scripts.wine_quality.analyze_wine_quality --dry-run - - # Continue from where you left off - python -m experiments.scripts.wine_quality.analyze_wine_quality --continue - - # Run specific framework - python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai - - # Run specific combination - python -m experiments.scripts.wine_quality.analyze_wine_quality --framework pnpxai --model xgb --explainer shap - """ - ) - - parser.add_argument("--dry-run", action="store_true", - help="Show commands without executing") - parser.add_argument("--continue", dest="continue_mode", action="store_true", - help="Skip already completed experiments") - parser.add_argument("--verbose", action="store_true", - help="Enable verbose logging") - parser.add_argument("--framework", nargs="+", - choices=list(EXPERIMENTS.keys()), - help="Run only specific framework(s)") - parser.add_argument("--model", nargs="+", - choices=["xgb", "tab_resnet"], - help="Run only specific model(s)") - parser.add_argument("--explainer", nargs="+", - choices=["lime", "shap", "grad", "itg", "ig", "sg", "vg", "lrp"], - help="Run only specific explainer(s)") - - args = parser.parse_args() - - run_all_experiments( - dry_run=args.dry_run, - continue_mode=args.continue_mode, - verbose=args.verbose, - framework_filter=args.framework, - model_filter=args.model, - explainer_filter=args.explainer, - ) - - -if __name__ == "__main__": - main() diff --git a/experiments/scripts/wine_quality/explain_wine_quality.py b/experiments/scripts/wine_quality/explain_wine_quality.py deleted file mode 100755 index 546bdfa..0000000 --- a/experiments/scripts/wine_quality/explain_wine_quality.py +++ /dev/null @@ -1,1175 +0,0 @@ -#!/usr/bin/env python3 -""" -Wine Quality Explanation Script - -Generate explanations for Wine Quality predictions using various XAI frameworks. - -Supported frameworks: - - pnpxai: PnP XAI with hyperparameter optimization - - captum: PyTorch-based explainability library - - omnixai: OmniXAI framework (XGBoost only, LIME/SHAP only) - - openxai: OpenXAI framework (TabResNet only, no LRP/VG) - - autoxai: AutoXAI framework with hyperparameter optimization (TabResNet only, LIME/SHAP only) - -Supported models: - - xgb: XGBoost classifier - - tab_resnet: TabResNet (residual network for tabular data) - -Supported explainers: - - lime: Local Interpretable Model-agnostic Explanations - - shap: SHapley Additive exPlanations (KernelSHAP) - - ig: Integrated Gradients - - grad: Gradient (Saliency) - - sg: SmoothGrad - - itg: Input × Gradient - - vg: VarGrad (PnPXAI only) - - lrp: Layer-wise Relevance Propagation -""" -import os -import sys -import json -import argparse -import logging -import random -import warnings -from pathlib import Path -from datetime import datetime -from typing import Dict, Any, Optional - -# Suppress Captum LIME/KernelShap batch warning -warnings.filterwarnings('ignore', message='You are providing multiple inputs for Lime / Kernel SHAP attributions') - -import numpy as np -import torch -import torch.nn as nn -import yaml -from tqdm import tqdm - -# Add parent directory to path for imports -sys.path.append(str(Path(__file__).parent)) - -from script_utils import ( - load_wine_quality, - validate_explainer_args, - load_model, - wrap_model_for_pytorch, -) - - -def set_seeds(seed: int = 42): - """Set random seeds for reproducibility.""" - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def setup_logging(verbose: bool = True): - """Setup logging configuration.""" - level = logging.INFO if verbose else logging.WARNING - logging.basicConfig( - level=level, - format='%(asctime)s - %(levelname)s - %(message)s', - datefmt='%Y-%m-%d %H:%M:%S' - ) - return logging.getLogger(__name__) - - -def load_configs(config_dir: str = "configs/tabular") -> Dict[str, Any]: - """Load all configuration files.""" - config_path = Path(config_dir) - configs = {} - - # Load explainer config - with open(config_path / "explainer_config.yaml", 'r') as f: - configs['explainer'] = yaml.safe_load(f) - - # Load optuna config - with open(config_path / "optuna_config.yaml", 'r') as f: - configs['optuna'] = yaml.safe_load(f) - - return configs - - -def explain_with_pnpxai( - model: nn.Module, - X_test: np.ndarray, - y_test: np.ndarray, - explainer_name: Optional[str], - config: Dict[str, Any], - logger: logging.Logger, - batch_size: int = 32, - n_samples: int = 25, - n_steps: int = 50, - model_type: str = "xgb", -) -> np.ndarray: - """ - Generate explanations using PnPXAI framework. - - Args: - model: PyTorch model (wrapped if XGBoost) - X_test: Test features - y_test: Test labels - explainer_name: Explainer type (None for auto-optimization) - config: Configuration dictionary - logger: Logger instance - batch_size: Batch size for processing - n_samples: Number of samples for LIME/SHAP - n_steps: Number of steps for Integrated Gradients - model_type: Model type ('xgb' or 'tab_resnet') - - Returns: - Explanation array of shape (n_samples, n_features) - """ - import pandas as pd - import itertools - from collections import defaultdict - from torch.utils.data import DataLoader, Dataset - from pnpxai import Experiment, AutoExplanation - from pnpxai.core.modality.modality import Modality - from pnpxai.explainers import Lime, KernelShap - from pnpxai.evaluator.metrics import AbPC, Complexity, Metric - from sklearn.cluster import KMeans as SklearnKMeans - from pnpxai.explainers.utils.baselines import BaselineFunction - from pnpxai.explainers.utils.postprocess import NormalizationFunction, minmax - from pnpxai.explainers.base import Tunable - from pnpxai.explainers.types import TunableParameter - - logger.info("Setting up PnPXAI framework...") - - # Define custom dataset - class TabDataset(Dataset): - def __init__(self, inputs, labels): - super().__init__() - self.inputs = inputs - self.labels = labels - - def __len__(self): - return len(self.inputs) - - def __getitem__(self, idx): - return self.inputs[idx], self.labels[idx] - - def collate_fn(batch): - inputs = torch.stack([torch.from_numpy(d[0]) for d in batch]).to(torch.float) - labels = torch.tensor([d[1] for d in batch], dtype=torch.long) - return inputs, labels - - # Create dataset and dataloader - test_dataset = TabDataset(X_test, y_test) - test_loader = DataLoader( - test_dataset, - batch_size=batch_size, - collate_fn=collate_fn, - shuffle=False, - pin_memory=True, - ) - - # Create modality from sample batch - sample_batch = next(iter(test_loader)) - modality = Modality( - dtype=sample_batch[0].dtype, - ndims=sample_batch[0].dim(), - ) - - # Define custom baseline function (KMeans) - bg_data_idx = np.random.choice(len(X_test), size=50, replace=False) - X_bg = X_test[bg_data_idx] - - class KMeans(BaselineFunction, Tunable): - def __init__(self, background_data, n_clusters=8): - self.background_data = background_data - self.n_clusters = TunableParameter( - name='n_clusters', - current_value=n_clusters, - dtype=int, - is_leaf=True, - space={'low': 10, 'high': len(background_data), 'step': 10}, - ) - self.kmeans_ = SklearnKMeans(n_clusters).fit(background_data) - BaselineFunction.__init__(self) - Tunable.__init__(self) - self.register_tunable_params([self.n_clusters]) - - def __call__(self, inputs): - if inputs.ndim == 3: - inputs = inputs.squeeze(1) - cluster_ids = self.kmeans_.predict(inputs.to(torch.float64).numpy()) - cluster_centers = self.kmeans_.cluster_centers_[cluster_ids] - return torch.from_numpy(cluster_centers).float().to(inputs.device) - - # Define custom normalization functions - class Pos(NormalizationFunction): - def __init__(self): - super().__init__() - - def __call__(self, attrs): - return attrs.abs() - - class MinMax(NormalizationFunction): - def __init__(self): - super().__init__() - - def __call__(self, attrs): - return minmax(attrs) / 1000 - - # Define compound metric - class CompoundMetric(Metric): - def __init__( - self, - model, - cmpd_metrics, - weights, - explainer=None, - target_input_keys=None, - additional_input_keys=None, - output_modifier=None, - ): - super().__init__( - model, explainer, target_input_keys, - additional_input_keys, output_modifier, - ) - assert len(cmpd_metrics) == len(weights) - self.cmpd_metrics = cmpd_metrics - self.weights = weights - - def evaluate(self, inputs, targets, attrs): - values = torch.zeros(attrs.size(0)).to(attrs.device) - for weight, metric in zip(self.weights, self.cmpd_metrics): - values += weight * metric.set_explainer(self.explainer).evaluate(inputs, targets, attrs) - return values - - # Create experiment based on model type - logger.info("Creating PnPXAI experiment...") - - if model_type == "tab_resnet": - expr = AutoExplanation( - model=model, - data=test_loader, - modality=modality, - target_input_keys=[0], # Current test_loader batches data as tuple of (inputs, targets). 0 means the location of inputs in the tuple - target_class_extractor=lambda outputs: outputs.argmax(-1), - label_key='labels', - target_labels=False, # Gets attributions on the prediction for all explainer if False. - ) - - expr.metrics.delete('morf') - expr.metrics.delete('lerf') - - elif model_type == "xgb": - expr = Experiment( - model=model, - data=test_loader, - modality=modality, - target_input_keys=[0], # feature location in batch from dataloader - target_class_extractor=lambda outputs: outputs.argmax(-1), # extract target class from output batch - label_key=-1, # label location in input batch from dataloader - ) - - # add explainers - expr.explainers.add('kernel_shap', KernelShap) - expr.explainers.add('lime', Lime) - - # add metrics - expr.metrics.add('abpc', AbPC) - - else: - raise ValueError("Invalid model type") - - # Add custom baseline function and default kwargs - expr.modality.util_functions['baseline_fn'].add('kmeans', KMeans) - expr.modality.util_functions['baseline_fn'].add_default_kwargs( - 'background_data', X_bg - ) - - # Add custom normalization functions and complexity metric - expr.modality.util_functions['normalization_fn'].add('pos', Pos) - expr.modality.util_functions['normalization_fn'].add('minmax', MinMax) - expr.metrics.add('cmpx', Complexity) - expr.metrics.add('cmpd', CompoundMetric) - - # Map explainer names - PNP_INV_MAP = { - "kernel_shap": "shap", - "lime": "lime", - "gradient": "grad", - "grad_x_input": "itg", - "integrated_gradients": "ig", - "smooth_grad": "sg", - "lrp_uniform_epsilon": "lrp", - "var_grad": "vg", - } - - explainer_map = { - 'shap': 'kernel_shap', - 'lime': 'lime', - 'grad': 'gradient', - 'itg': 'grad_x_input', - 'ig': 'integrated_gradients', - 'sg': 'smooth_grad', - 'vg': 'var_grad', - 'lrp': 'lrp_uniform_epsilon', - } - - if explainer_name: - pnp_explainer = explainer_map.get(explainer_name, explainer_name) - logger.info(f"Using explainer: {pnp_explainer}") - - # Setup metric options - metric_options = { - 'cmpd_metrics': [ - expr.create_metric('abpc'), - expr.create_metric('cmpx'), - ], - 'weights': [.7, -.3] - } - - # Set direction - direction = 'maximize' - - # Setup disable_tunable_params - disable_tunable_params = {} - if pnp_explainer in ['lime', 'kernel_shap']: - disable_tunable_params['n_samples'] = n_samples - if pnp_explainer in ['integrated_gradients']: - disable_tunable_params['n_steps'] = n_steps - - logger.info("Running hyperparameter optimization...") - opt_results = expr.optimize( - explainer_key=pnp_explainer, - metric_key='cmpd', - metric_options=metric_options, - direction=direction, - disable_tunable_params=disable_tunable_params, - **config['optuna'] - ) - - logger.info(f"Best value: {opt_results.study.best_trial.value:.4f}") - - # Re-set seeds before generating explanations for reproducibility - # LIME/SHAP are stochastic, so we need to fix the seed again - logger.info("Re-setting random seeds for reproducible explanation generation...") - set_seeds(config['optuna'].get('seed', 42)) - - # Generate explanations - opt_explainer = opt_results.explainer - th_test_input = torch.tensor(test_dataset.inputs, dtype=torch.float32) - targets = model(th_test_input).argmax(-1) - - exp_name = PNP_INV_MAP[pnp_explainer] - - if exp_name in ["shap", "lime"]: - explanations = opt_explainer.attribute(th_test_input, targets)[0].detach().cpu().numpy() - else: - explanations = opt_explainer.attribute(th_test_input, targets).squeeze(1).detach().cpu().numpy() - - else: - raise ValueError("PnPXAI requires explainer name") - - return explanations - - -def explain_with_captum( - model: nn.Module, - X_test: np.ndarray, - y_test: np.ndarray, - explainer_name: str, - config: Dict[str, Any], - logger: logging.Logger, - n_samples: int = 25, -) -> np.ndarray: - """ - Generate explanations using Captum framework. - - Args: - model: PyTorch model (wrapped if XGBoost) - X_test: Test features - y_test: Test labels - explainer_name: Explainer type - config: Configuration dictionary - logger: Logger instance - n_samples: Number of samples for LIME/SHAP (default: 25) - - Returns: - Explanation array of shape (n_samples, n_features) - """ - from captum.attr import ( - KernelShap, Lime, IntegratedGradients, Saliency, - InputXGradient, NoiseTunnel, LRP - ) - from captum.attr._utils.lrp_rules import EpsilonRule - - logger.info(f"Setting up Captum framework with {explainer_name}...") - - # Create explainer - if explainer_name == "shap": - explainer = KernelShap(model) - elif explainer_name == "lime": - explainer = Lime(model, interpretable_model=None) - elif explainer_name == "grad": - explainer = Saliency(model) - elif explainer_name == "itg": - explainer = InputXGradient(model) - elif explainer_name == "ig": - explainer = IntegratedGradients(model, multiply_by_inputs=True) - elif explainer_name == "sg": - explainer = NoiseTunnel(Saliency(model)) - elif explainer_name == "lrp": - # Set LRP rules for batch norm layers - if hasattr(model, 'res_blocks'): - for block in model.res_blocks: - if hasattr(block, 'bn'): - block.bn.rule = EpsilonRule() - if hasattr(model, 'bn'): - model.bn.rule = EpsilonRule() - explainer = LRP(model) - else: - raise ValueError(f"Unknown Captum explainer: {explainer_name}") - - # Convert to tensor - X_test_t = torch.tensor(X_test, dtype=torch.float32) - targets = model(X_test_t).argmax(dim=1) - - # Generate explanations - logger.info("Generating explanations...") - - if explainer_name == "grad": - explanations = explainer.attribute(X_test_t, target=targets, abs=False) - explanations = explanations.detach().numpy() - - elif explainer_name == "sg": - explanations = explainer.attribute(X_test_t, target=targets, nt_type='smoothgrad') - explanations = explanations.detach().numpy() - - elif explainer_name in ("shap", "lime"): - # Process in batches to avoid memory issues - attrs_list = [] - for i in tqdm(range(len(X_test_t)), desc="Explaining"): - input_i = X_test_t[i].unsqueeze(0) - attr_i = explainer.attribute(input_i, target=targets[i], n_samples=n_samples) - attrs_list.append(attr_i.detach().cpu().numpy()) - explanations = np.concatenate(attrs_list, axis=0) - - else: - explanations = explainer.attribute(X_test_t, target=targets) - explanations = explanations.detach().numpy() - - return explanations - - -def explain_with_omnixai( - model: nn.Module, - X_test: np.ndarray, - y_test: np.ndarray, - explainer_name: str, - config: Dict[str, Any], - logger: logging.Logger, - feature_metadata: Dict[str, Any], - raw_data, - n_samples: int = 64, -) -> np.ndarray: - """ - Generate explanations using OmniXAI framework. - - OmniXAI only supports XGBoost models and LIME/SHAP explainers. - - Args: - model: PyTorch model (wrapped XGBoost) - X_test: Test features - y_test: Test labels - explainer_name: Explainer type (lime or shap) - config: Configuration dictionary - logger: Logger instance - feature_metadata: Feature metadata for transformations - raw_data: Raw data for training - n_samples: Number of samples for LIME/SHAP - - Returns: - Explanation array of shape (n_samples, n_features) - """ - import functools - import pandas as pd - from omnixai.data.tabular import Tabular - from omnixai.explainers.tabular import TabularExplainer - from script_utils.data_utils import transform, invert_transform, find_idx - - logger.info(f"Setting up OmniXAI framework with {explainer_name}...") - - NAME_MAP = { - "lime": "LimeTabular", - "shap": "ShapTabular" - } - - explainer_nm = NAME_MAP[explainer_name] - - # Prepare training data - raw_data = raw_data.fillna("missing") - categorical_columns = [c for c in raw_data.columns if feature_metadata[c]["type"] == "categorical"] - train_data = Tabular(raw_data, categorical_columns=categorical_columns) - - # Get target function from wrapped model - if hasattr(model, 'xgb_model'): - target_function = model.xgb_model.predict_proba - else: - raise ValueError("OmniXAI requires XGBoost model") - - # Create transformation functions - transform_fn = functools.partial(transform, feature_metadata=feature_metadata) - - def prep(z): - return transform_fn(z.data.fillna("missing")) - - # Create explainer - explainer = TabularExplainer( - explainers=[explainer_nm], - mode="classification", - data=train_data, - model=target_function, - preprocess=prep, - ) - - # Prepare test instances - test_instances = invert_transform(X_test, feature_metadata).fillna("missing") - - # Set parameters - params = { - "LimeTabular": {"num_features": raw_data.shape[1], "num_samples": n_samples}, - "ShapTabular": {"nsamples": n_samples} - } - - # Generate explanations - logger.info("Generating explanations...") - exp_obj = explainer.explain(test_instances, params=params) - - # Extract and reorder scores - scores = [] - for i in range(test_instances.shape[0]): - exp = exp_obj[explainer_nm].get_explanations(i) - sorted_idx = find_idx(exp['features'], exp['instance'].columns.tolist()) - scores.append([exp['scores'][j] for j in sorted_idx]) - - explanations = np.array(scores) - return explanations - - -def explain_with_openxai( - model: nn.Module, - X_test: np.ndarray, - y_test: np.ndarray, - X_train: np.ndarray, - explainer_name: str, - config: Dict[str, Any], - logger: logging.Logger, - feature_metadata: Dict[str, Any], - batch_size: int = 32, - n_samples: int = 64, -) -> np.ndarray: - """ - Generate explanations using OpenXAI framework. - - OpenXAI only supports TabResNet models (not XGBoost) and does not support LRP/VG. - - Args: - model: PyTorch model (TabResNet) - X_test: Test features - y_test: Test labels - X_train: Training features (required for LIME/IG) - explainer_name: Explainer type - config: Configuration dictionary - logger: Logger instance - feature_metadata: Feature metadata for categorical aggregation - batch_size: Batch size for processing - n_samples: Number of samples for LIME/SHAP - - Returns: - Explanation array of shape (n_samples, n_features_original) - """ - from torch.utils.data import DataLoader, TensorDataset - from openxai import Explainer - from openxai.experiment_utils import fill_param_dict - - logger.info(f"Setting up OpenXAI framework with {explainer_name}...") - - # Convert to tensors - test_input = torch.tensor(X_test, dtype=torch.float32) - train_input = None - explainer_params = {} - - # Setup training data for LIME/IG - if explainer_name in ['lime', 'ig']: - train_input = torch.tensor(X_train, dtype=torch.float32) - explainer_params = fill_param_dict(explainer_name, {}, train_input) - - if explainer_name in ['lime', 'shap']: - explainer_params['n_samples'] = n_samples - - # Create explainer - explainer = Explainer(method=explainer_name, model=model, param_dict=explainer_params) - - # Get predictions - predicted_labels = model(test_input).detach().argmax(dim=1) - - # Create data loader - dataset = TensorDataset(test_input, predicted_labels) - data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False) - - # Generate explanations - logger.info("Generating explanations...") - all_explanations = [] - for batch_inputs, batch_labels in tqdm(data_loader, desc="Explaining batches"): - batch_explanations = explainer.get_explanations(batch_inputs, label=batch_labels) - all_explanations.append(batch_explanations) - - # Combine batches - combined_explanations = torch.cat(all_explanations, dim=0) - - # Aggregate categorical features - processed_explanations = [] - for feature_name, feature_info in feature_metadata.items(): - if feature_info['type'] == 'categorical': - feature_index = feature_info['index'] - onehot_encoded = test_input[:, feature_index] - explanation_values = combined_explanations[:, feature_index] - categorical_explanation = (onehot_encoded * explanation_values).sum(dim=1) - processed_explanations.append(categorical_explanation) - else: - feature_index = feature_info['index'] - processed_explanations.append(combined_explanations[:, feature_index]) - - explanations = torch.stack(processed_explanations, dim=1).detach().numpy() - return explanations - - -class PyTorchModelWrapper: - """ - Wrapper for PyTorch models to add predict and predict_proba methods for AutoXAI. - - AutoXAI expects models to have predict() and predict_proba() methods like sklearn models. - This wrapper adds these methods for PyTorch neural network models. - """ - def __init__(self, pytorch_model): - self.model = pytorch_model - self.model.eval() - - def predict(self, X): - """ - Predict class labels for samples. - - Args: - X: Input array of shape (n_samples, n_features) - - Returns: - Predicted class labels of shape (n_samples,) - """ - import torch - if not isinstance(X, torch.Tensor): - X = torch.FloatTensor(X) - - with torch.no_grad(): - logits = self.model(X) - predictions = torch.argmax(logits, dim=1) - - return predictions.cpu().numpy() - - def predict_proba(self, X): - """ - Predict class probabilities for samples. - - Args: - X: Input array of shape (n_samples, n_features) - - Returns: - Predicted class probabilities of shape (n_samples, n_classes) - """ - import torch - if not isinstance(X, torch.Tensor): - X = torch.FloatTensor(X) - - with torch.no_grad(): - logits = self.model(X) - probas = torch.softmax(logits, dim=1) - - return probas.cpu().numpy() - - -def explain_with_autoxai( - model: nn.Module, - X_test: np.ndarray, - y_test: np.ndarray, - explainer_name: str, - config: Dict[str, Any], - logger: logging.Logger, - raw_data, - batch_size: int = 32, - n_samples: int = 64, -) -> np.ndarray: - """ - Generate explanations using AutoXAI framework. - - AutoXAI only supports TabResNet models and LIME/SHAP explainers. - - Args: - model: PyTorch model (TabResNet) - X_test: Test features - y_test: Test labels - explainer_name: Explainer type (lime or shap) - config: Configuration dictionary - logger: Logger instance - raw_data: Raw data for feature names - batch_size: Batch size for processing - n_samples: Number of samples to explain - - Returns: - Explanation array of shape (n_samples, n_features) - """ - import sys - import os - import glob - - # Add AutoXAI virtual environment to sys.path - autoxai_venv = "/opt/autoxai_venv" - if os.path.exists(autoxai_venv): - # Find site-packages directory in the virtual environment - site_packages = glob.glob(f"{autoxai_venv}/lib/python*/site-packages") - if site_packages: - sys.path.insert(0, site_packages[0]) - - autoxai_path = os.path.join(os.path.dirname(__file__), "lib", "AutoXAI") - sys.path.insert(0, autoxai_path) - from hyperparameters_optimization import get_parameters - from XAI_solutions import set_up_explainer, get_local_exp - - logger.info(f"Setting up AutoXAI framework with {explainer_name}...") - - AUTOXAI_NAME_MAP = {"shap": "SHAP", "lime": "LIME"} - autoxai_nm = AUTOXAI_NAME_MAP[explainer_name] - - bg_size = min(50, len(X_test)) - - # Prepare model for AutoXAI - # - For XGBoost models wrapped in TorchModelForXGBoost, unwrap to get the original XGBoost model - # - For PyTorch models (tab_resnet, lr), wrap them to add predict/predict_proba methods - from script_utils.model_utils import TorchModelForXGBoost - if isinstance(model, TorchModelForXGBoost): - unwrapped_model = model.xgb_model - elif isinstance(model, nn.Module): - # Wrap PyTorch models to add predict() and predict_proba() methods - unwrapped_model = PyTorchModelWrapper(model) - else: - unwrapped_model = model - - # Setup context - properties_list = ["robustness", "fidelity", "conciseness"] - context = {} - # Use background samples for explainer setup, but explain all test samples - rand_idx = np.random.randint(0, X_test.shape[0], bg_size) - context["X"] = X_test[rand_idx] # Background samples for SHAP explainer - context["y"] = y_test[rand_idx] - context["feature_names"] = list(raw_data.columns) - context["verbose"] = False - context["task"] = "classification" - context["question"] = "Why" - context["session_id"] = f"_{bg_size}_wine" - context["scaling"] = "Std" - context["weights"] = [1, 2, 0.5] - context["distance"] = "cosine" - context["explanations"] = [] - context["model"] = unwrapped_model - context["ES"] = True - context["IS"] = True - - score_hist = { - "xai_sol": [], "epoch": [], "aggregated_score": [], - "parameters": [], "robustness": [], "scaled_robustness": [], - "fidelity": [], "scaled_fidelity": [], - "conciseness": [], "scaled_conciseness": [] - } - - # Get default parameters - logger.info("Preparing AutoXAI explainer with default parameters...") - default_parameters = get_parameters( - autoxai_nm, score_hist, "default", properties_list, context) - - # Setup explainer - context['explainer'] = set_up_explainer(autoxai_nm, default_parameters, context) - - # Generate explanations for all test samples - logger.info("Generating explanations...") - explanations = np.zeros_like(X_test) - for i in tqdm(range(len(X_test)), desc="Explaining"): - e = get_local_exp(autoxai_nm, X_test[i], default_parameters, context) - idx = default_parameters["most_influent_features"] - explanations[i, idx] = e - - return explanations - - -def evaluate_explanations( - explanations: np.ndarray, - model: nn.Module, - X_test: np.ndarray, - y_test: np.ndarray, - logger: logging.Logger, - batch_size: int = 32, -) -> Dict[str, np.ndarray]: - """ - Evaluate explanations using PnPXAI metrics. - - Args: - explanations: Explanation array - model: PyTorch model - X_test: Test features - y_test: Test labels - logger: Logger instance - batch_size: Batch size for evaluation - - Returns: - Dictionary with metric arrays - """ - from torch.utils.data import DataLoader, TensorDataset - from pnpxai import Experiment - from pnpxai.core.modality.modality import Modality - from pnpxai.explainers import KernelShap - from pnpxai.evaluator.metrics import AbPC, Complexity, Metric - - logger.info("Evaluating explanations...") - - # Create dataloader - test_dataset = TensorDataset( - torch.tensor(X_test, dtype=torch.float32), - torch.tensor(y_test, dtype=torch.long) - ) - test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) - - # Create experiment for metrics - sample_batch = next(iter(test_loader)) - modality = Modality( - dtype=sample_batch[0].dtype, - ndims=sample_batch[0].dim(), - ) - - expr = Experiment( - model=model, - data=test_loader, - modality=modality, - target_input_keys=[0], - target_class_extractor=lambda outputs: outputs.argmax(-1), - label_key=-1, - ) - - # Add explainers - expr.explainers.add('kernel_shap', KernelShap) - - # Add metrics - expr.metrics.add('abpc', AbPC) - expr.metrics.add('cmpx', Complexity) - - # Compound metric - class CompoundMetric(Metric): - def __init__(self, model, cmpd_metrics, weights, explainer=None, - target_input_keys=None, additional_input_keys=None, output_modifier=None): - super().__init__(model, explainer, target_input_keys, additional_input_keys, output_modifier) - self.cmpd_metrics = cmpd_metrics - self.weights = weights - - def evaluate(self, inputs, targets, attrs): - values = torch.zeros(attrs.size(0)).to(attrs.device) - for weight, metric in zip(self.weights, self.cmpd_metrics): - values += weight * metric.set_explainer(self.explainer).evaluate(inputs, targets, attrs) - return values - - expr.metrics.add('cmpd', CompoundMetric) - - # Create dummy explainer for evaluation - dummy_explainer = expr.create_explainer('kernel_shap') - - # Evaluate each metric - results = {} - X_test_t = torch.tensor(X_test, dtype=torch.float32) - explanations_t = torch.tensor(explanations, dtype=torch.float32) - - for metric_name in ['abpc', 'cmpx']: - metric = expr.create_metric(metric_name) - metric_values = [] - - for i in range(len(X_test)): - inputs = {0: X_test_t[i].unsqueeze(0)} - targets = model(inputs[0]).argmax(-1) - attrs = explanations_t[i].unsqueeze(0) - - value = metric.set_explainer(dummy_explainer).evaluate(inputs, targets, attrs) - metric_values.append(value.item()) - - results[metric_name] = np.array(metric_values) - - # Compound metric - metric_options = { - 'cmpd_metrics': [expr.create_metric('abpc'), expr.create_metric('cmpx')], - 'weights': [0.7, -0.3] - } - cmpd_metric = expr.create_metric('cmpd', **metric_options) - cmpd_values = [] - - for i in range(len(X_test)): - inputs = {0: X_test_t[i].unsqueeze(0)} - targets = model(inputs[0]).argmax(-1) - attrs = explanations_t[i].unsqueeze(0) - - value = cmpd_metric.set_explainer(dummy_explainer).evaluate(inputs, targets, attrs) - cmpd_values.append(value.item()) - - results['cmpd'] = np.array(cmpd_values) - - # Log summary - for metric_name, values in results.items(): - logger.info(f" {metric_name.upper()}: {values.mean():.4f} ± {values.std():.4f}") - - return results - - -def save_results( - explanations: np.ndarray, - metrics: Dict[str, np.ndarray], - output_dir: Path, - metadata: Dict[str, Any], - logger: logging.Logger, -): - """Save explanations, metrics, and metadata.""" - output_dir.mkdir(parents=True, exist_ok=True) - - # Save explanations - np.save(output_dir / "explanations.npy", explanations) - logger.info(f"Saved explanations: {output_dir / 'explanations.npy'}") - - # Save metrics - for metric_name, values in metrics.items(): - np.save(output_dir / f"{metric_name}.npy", values) - logger.info(f"Saved {metric_name}: {output_dir / f'{metric_name}.npy'}") - - # Save metadata - with open(output_dir / "metadata.json", 'w') as f: - json.dump(metadata, f, indent=2) - logger.info(f"Saved metadata: {output_dir / 'metadata.json'}") - - -def main(): - parser = argparse.ArgumentParser( - description="Generate explanations for Wine Quality predictions", - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - # PnPXAI with SHAP - python explain_wine_quality.py --framework pnpxai --model xgb --explainer shap - - # Captum with Integrated Gradients - python explain_wine_quality.py --framework captum --model tab_resnet --explainer ig - - # Save to custom directory - python explain_wine_quality.py --framework pnpxai --model xgb --explainer lime --output results/custom - """ - ) - - parser.add_argument( - "--framework", - type=str, - required=True, - choices=["pnpxai", "captum", "omnixai", "openxai", "autoxai"], - help="XAI framework to use" - ) - parser.add_argument( - "--model", - type=str, - required=True, - choices=["xgb", "tab_resnet"], - help="Model type" - ) - parser.add_argument( - "--explainer", - type=str, - required=True, - choices=["lime", "shap", "ig", "grad", "sg", "itg", "vg", "lrp"], - help="Explainer method" - ) - parser.add_argument( - "--data-dir", - type=str, - default="data/Wine Quality", - help="Directory containing preprocessed data" - ) - parser.add_argument( - "--config-dir", - type=str, - default="configs/tabular", - help="Directory containing config files" - ) - parser.add_argument( - "--output", - type=str, - default=None, - help="Output directory (default: results/Wine Quality/{model}/{framework}/{explainer})" - ) - parser.add_argument( - "--seed", - type=int, - default=42, - help="Random seed" - ) - parser.add_argument( - "--batch-size", - type=int, - default=32, - help="Batch size for processing" - ) - parser.add_argument( - "--n-samples", - type=int, - default=64, - help="Number of samples for LIME/SHAP" - ) - parser.add_argument( - "--verbose", - action="store_true", - help="Enable verbose logging" - ) - parser.add_argument( - "--max-test-samples", - type=int, - default=None, - help="Maximum number of test samples to use (default: use all)" - ) - - args = parser.parse_args() - - # Setup - logger = setup_logging(args.verbose) - logger.info("Wine Quality Explanation Script") - logger.info("="*50) - - # Validate arguments - try: - validate_explainer_args(args.framework, args.model, args.explainer) - except ValueError as e: - logger.error(f"Invalid arguments: {e}") - sys.exit(1) - - # Set seeds - set_seeds(args.seed) - logger.info(f"Random seed: {args.seed}") - - # Load configurations - configs = load_configs(args.config_dir) - - # Load data - logger.info(f"Loading data from: {args.data_dir}") - X_train, X_test, y_train, y_test, feature_metadata, raw_data = load_wine_quality(args.data_dir) - - # Limit test samples if specified - if args.max_test_samples is not None and args.max_test_samples < len(X_test): - logger.info(f"Limiting test samples from {len(X_test)} to {args.max_test_samples}") - X_test = X_test[:args.max_test_samples] - y_test = y_test[:args.max_test_samples] - - logger.info(f" Train: {len(X_train)} samples") - logger.info(f" Test: {len(X_test)} samples, {X_test.shape[1]} features") - - # Load model - logger.info(f"Loading {args.model} model...") - data_path = Path(args.data_dir) - - if args.model == "xgb": - model_path = data_path / "xgb_model.json" - elif args.model == "tab_resnet": - model_path = data_path / "resnet_model.pth" - - model = load_model( - args.model, - model_path, - input_dim=X_train.shape[1], - output_dim=2, - num_blocks=1 - ) - - # Wrap model for PyTorch - model = wrap_model_for_pytorch(model, args.model) - logger.info(f"Model loaded: {model_path}") - - # Generate explanations - start_time = datetime.now() - - if args.framework == "pnpxai": - explanations = explain_with_pnpxai( - model, X_test, y_test, args.explainer, - configs, logger, args.batch_size, args.n_samples, n_steps=50, model_type=args.model - ) - elif args.framework == "captum": - explanations = explain_with_captum( - model, X_test, y_test, args.explainer, - configs, logger, args.n_samples - ) - elif args.framework == "omnixai": - explanations = explain_with_omnixai( - model, X_test, y_test, args.explainer, - configs, logger, feature_metadata, raw_data, args.n_samples - ) - elif args.framework == "openxai": - explanations = explain_with_openxai( - model, X_test, y_test, X_train, args.explainer, - configs, logger, feature_metadata, args.batch_size, args.n_samples - ) - elif args.framework == "autoxai": - explanations = explain_with_autoxai( - model, X_test, y_test, args.explainer, - configs, logger, raw_data, args.batch_size, args.n_samples - ) - else: - raise ValueError(f"Unknown framework: {args.framework}") - - elapsed_time = (datetime.now() - start_time).total_seconds() - logger.info(f"Explanation generation completed in {elapsed_time:.2f}s") - logger.info(f"Explanation shape: {explanations.shape}") - - # Evaluate explanations - metrics = evaluate_explanations( - explanations, model, X_test, y_test, - logger, args.batch_size - ) - - # Prepare output directory - if args.output: - output_dir = Path(args.output) - else: - output_dir = Path(f"results/Wine Quality/{args.model}/{args.framework}/{args.explainer}") - - # Prepare metadata - metadata = { - "framework": args.framework, - "model": args.model, - "explainer": args.explainer, - "seed": args.seed, - "n_samples": args.n_samples, - "batch_size": args.batch_size, - "data_shape": { - "train": list(X_train.shape), - "test": list(X_test.shape), - }, - "explanation_shape": list(explanations.shape), - "timestamp": datetime.now().isoformat(), - "elapsed_time": elapsed_time, - "metrics_summary": { - k: {"mean": float(v.mean()), "std": float(v.std())} - for k, v in metrics.items() - } - } - - # Save results - save_results(explanations, metrics, output_dir, metadata, logger) - - logger.info("="*50) - logger.info(f"Results saved to: {output_dir.absolute()}") - logger.info("Done!") - - -if __name__ == "__main__": - main() diff --git a/experiments/scripts/wine_quality/generate_latex_table.py b/experiments/scripts/wine_quality/generate_latex_table.py deleted file mode 100644 index 7347d12..0000000 --- a/experiments/scripts/wine_quality/generate_latex_table.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/env python3 -""" -Generate LaTeX table from Wine Quality experiment results -""" - -import os -import json -from pathlib import Path -from collections import defaultdict - -def load_metric(path): - """Load a numpy metric file and return mean value""" - import numpy as np - return np.load(path).mean() - -def collect_results(results_dir="results/Wine Quality"): - """ - Collect all experiment results. - Returns dict: {model: {framework: {explainer: {metric: value}}}} - """ - results_path = Path(results_dir) - data = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) - - for root, dirs, files in os.walk(results_path): - if "explanations.npy" in files: - parts = Path(root).relative_to(results_path).parts - if len(parts) != 3: - continue - - model, framework, explainer = parts - - try: - import numpy as np - abpc = np.load(os.path.join(root, "abpc.npy")).mean() - cmpx = np.load(os.path.join(root, "cmpx.npy")).mean() - cmpd = np.load(os.path.join(root, "cmpd.npy")).mean() - - data[model][framework][explainer] = { - 'faithfulness': abpc, - 'complexity': cmpx, - 'composite': cmpd - } - except Exception as e: - print(f"Error loading {root}: {e}") - continue - - return data - -def format_value(value, best_value, is_complexity=False): - """Format value with bold if it's the best""" - if value is None: - return "-" - - # For complexity, lower is better - if is_complexity: - is_best = abs(value - best_value) < 1e-6 if best_value is not None else False - else: - is_best = abs(value - best_value) < 1e-6 if best_value is not None else False - - formatted = f"{value:.4f}" - return f"\\textbf{{{formatted}}}" if is_best else formatted - -def get_best_value(values, is_complexity=False): - """Get the best value from a list (considering None values)""" - valid_values = [v for v in values if v is not None] - if not valid_values: - return None - - if is_complexity: - return min(valid_values) - else: - return max(valid_values) - -def generate_latex_table(data): - """Generate LaTeX table matching the original format""" - - # Define the structure - EXPLAINER_MAP = { - "shap": "KernelSHAP", - "lime": "LIME", - "grad": "Gradient", - "itg": "Grad.$\\times$Input", - "ig": "Integrated Gradients", - "sg": "SmoothGrad", - "vg": "VarGrad", - "lrp": "LRP" - } - - MODEL_MAP = { - "xgb": "XGBoost", - "tab_resnet": "ResNet" - } - - FRAMEWORK_ORDER = ["captum", "omnixai", "autoxai", "openxai", "pnpxai"] - FRAMEWORK_NAMES = { - "captum": "Captum", - "omnixai": "OmniXAI", - "autoxai": "AutoXAI", - "openxai": "OpenXAI", - "pnpxai": "PnPXAI" - } - - # XGBoost explainers - XGB_EXPLAINERS = ["shap", "lime"] - - # ResNet explainers - RESNET_EXPLAINERS = ["shap", "lime", "grad", "itg", "ig", "sg", "vg", "lrp"] - - # Build LaTeX table - lines = [] - lines.append("\\begin{table}[!th]") - lines.append(" \\caption{\\textbf{Comparison of explanation performance on Wine Quality dataset.}") - lines.append(" Evaluation of XGBoost and TabResNet models across three key metrics:") - lines.append(" Faithfulness (higher is better $\\uparrow$), Complexity (lower is better $\\downarrow$), and a Composite [Faithfulness, Simplicity] score ($\\uparrow$).") - lines.append(" The table compares PnP-XAI against Captum, AutoXAI, OmniXAI, and OpenXAI.") - lines.append(" Bold values indicate the best score per row; dashes (-) denote unsupported combinations.}") - lines.append(" \\label{tab:wine_performance}") - lines.append(" \\centering") - lines.append(" \\resizebox{\\textwidth}{!}{%") - lines.append(" \\begin{tabular}{lll||cccc|c}") - lines.append(" \\toprule") - lines.append(" \\textbf{Model} & \\textbf{Metric} & \\textbf{Explainer} & \\textbf{Captum} & \\textbf{OmniXAI} & \\textbf{AutoXAI} & \\textbf{OpenXAI} & \\textbf{PnPXAI} \\\\") - lines.append(" \\midrule") - - # Process XGBoost - model_key = "xgb" - model_name = MODEL_MAP[model_key] - - for metric_idx, (metric_key, metric_name, is_complexity) in enumerate([ - ('faithfulness', 'Faithfulness ($\\uparrow$)', False), - ('complexity', 'Complexity ($\\downarrow$)', True), - ('composite', 'Composite [Faithfulness, Simplicity] ($\\uparrow$)', False) - ]): - if metric_idx == 0: - lines.append(f" {model_name} & \\multirow[t]{{2}}{{*}}{{{metric_name}}}") - else: - lines.append(f" & \\multirow[t]{{2}}{{*}}{{{metric_name}}}") - - for exp_idx, exp_key in enumerate(XGB_EXPLAINERS): - exp_name = EXPLAINER_MAP[exp_key] - - # Get values for all frameworks - values = [] - for fw in FRAMEWORK_ORDER: - if fw in data[model_key] and exp_key in data[model_key][fw]: - values.append(data[model_key][fw][exp_key][metric_key]) - else: - values.append(None) - - # Find best value - best_val = get_best_value(values, is_complexity) - - # Format row - formatted_values = [format_value(v, best_val, is_complexity) for v in values] - - if exp_idx == 0: - lines.append(f" & {exp_name} & {' & '.join(formatted_values)} \\\\") - else: - lines.append(f" & & {exp_name} & {' & '.join(formatted_values)} \\\\") - - if metric_idx < 2: - lines.append(" \\cmidrule{2-8}") - - lines.append(" \\midrule") - - # Process ResNet - model_key = "tab_resnet" - model_name = MODEL_MAP[model_key] - - for metric_idx, (metric_key, metric_name, is_complexity) in enumerate([ - ('faithfulness', 'Faithfulness ($\\uparrow$)', False), - ('complexity', 'Complexity ($\\downarrow$)', True), - ('composite', 'Composite [Faithfulness, Simplicity] ($\\uparrow$)', False) - ]): - if metric_idx == 0: - lines.append(f" {model_name} & \\multirow[t]{{{len(RESNET_EXPLAINERS)}}}{{*}}{{{metric_name}}}") - else: - lines.append(f" & \\multirow[t]{{{len(RESNET_EXPLAINERS)}}}{{*}}{{{metric_name}}}") - - for exp_idx, exp_key in enumerate(RESNET_EXPLAINERS): - exp_name = EXPLAINER_MAP[exp_key] - - # Get values for all frameworks - values = [] - for fw in FRAMEWORK_ORDER: - if fw in data[model_key] and exp_key in data[model_key][fw]: - values.append(data[model_key][fw][exp_key][metric_key]) - else: - values.append(None) - - # Find best value - best_val = get_best_value(values, is_complexity) - - # Format row - formatted_values = [format_value(v, best_val, is_complexity) for v in values] - - if exp_idx == 0: - lines.append(f" & {exp_name:20s} & {' & '.join(formatted_values)} \\\\") - else: - lines.append(f" & & {exp_name:20s} & {' & '.join(formatted_values)} \\\\") - - if metric_idx < 2: - lines.append(" \\cmidrule{2-8}") - - lines.append(" \\bottomrule") - lines.append(" \\end{tabular}%") - lines.append(" }") - lines.append("\\end{table}") - - return "\n".join(lines) - -if __name__ == "__main__": - import numpy as np - - print("Collecting experiment results...") - data = collect_results() - - print(f"\nFound results for models: {list(data.keys())}") - for model in data: - print(f" {model}: frameworks = {list(data[model].keys())}") - - print("\nGenerating LaTeX table...") - latex_table = generate_latex_table(data) - - print("\n" + "="*70) - print("Generated LaTeX Table:") - print("="*70) - print(latex_table) - - # Save to file - output_file = "new_experiment_result.md" - with open(output_file, 'w') as f: - f.write(latex_table + "\n") - - print(f"\n✓ Table saved to: {output_file}") diff --git a/experiments/scripts/wine_quality/script_utils/__init__.py b/experiments/scripts/wine_quality/script_utils/__init__.py deleted file mode 100644 index 7c169a4..0000000 --- a/experiments/scripts/wine_quality/script_utils/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Utility modules for Wine Quality XAI experiments. -""" -from .data_utils import ( - load_wine_quality, - transform, - invert_transform, - find_idx, -) - -from .model_utils import ( - TorchModelForXGBoost, - load_model, - wrap_model_for_pytorch, -) - -from .explainer_factory import ( - ExplainerConfig, - validate_explainer_args, - get_framework_specific_name, -) - -__all__ = [ - # Data utils - "load_wine_quality", - "transform", - "invert_transform", - "find_idx", - - # Model utils - "TorchModelForXGBoost", - "load_model", - "wrap_model_for_pytorch", - - # Explainer factory - "ExplainerConfig", - "validate_explainer_args", - "get_framework_specific_name", -] diff --git a/experiments/scripts/wine_quality/script_utils/data_utils.py b/experiments/scripts/wine_quality/script_utils/data_utils.py deleted file mode 100644 index f18e2d6..0000000 --- a/experiments/scripts/wine_quality/script_utils/data_utils.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Data loading and transformation utilities for Wine Quality dataset. -""" -import pickle -from pathlib import Path -from typing import Dict, Any, Tuple - -import numpy as np -import pandas as pd - - -def load_wine_quality( - data_dir: str = "data/Wine Quality" -) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, Dict[str, Any], pd.DataFrame]: - """ - Load Wine Quality dataset. - - Args: - data_dir: Directory containing the dataset files - - Returns: - X_train: Training features (preprocessed) - X_test: Test features (preprocessed) - y_train: Training labels - y_test: Test labels - feature_metadata: Feature metadata and encoders - raw_data: Original raw data - """ - data_path = Path(data_dir) - - X_train = np.load(data_path / "X_train.npy") - X_test = np.load(data_path / "X_test.npy") - y_train = np.load(data_path / "y_train.npy") - y_test = np.load(data_path / "y_test.npy") - - with open(data_path / "feature_metadata.pkl", "rb") as f: - feature_metadata = pickle.load(f) - - raw_data = pd.read_csv(data_path / "raw_data.csv") - - return X_train, X_test, y_train, y_test, feature_metadata, raw_data - - -def transform(X: pd.DataFrame, feature_metadata: Dict[str, Any]) -> np.ndarray: - """ - Transform raw data using feature metadata encoders. - - Args: - X: Raw feature DataFrame - feature_metadata: Feature metadata with encoders - - Returns: - Transformed feature array - """ - input_data = [] - for k, v in feature_metadata.items(): - if np.isin('missing', X[[k]].values): - X[[k]] = X[[k]].replace("missing", v['encoder'].categories_[0][-1]) - preprocessed = v['encoder'].transform(X[[k]].values) - if v['type'] == 'categorical': - preprocessed = preprocessed.toarray() - input_data.append(preprocessed) - - input_array = np.concatenate(input_data, axis=1) - return input_array - - -def invert_transform( - input_array: np.ndarray, - feature_metadata: Dict[str, Any] -) -> pd.DataFrame: - """ - Invert transformed data back to original feature space. - - Args: - input_array: Transformed feature array - feature_metadata: Feature metadata with encoders - - Returns: - DataFrame with original features - """ - inverted_data = {} - - for col, meta in feature_metadata.items(): - if meta['type'] == 'categorical': - # Extract one-hot encoded portion - start_idx, end_idx = meta['index'][0], meta['index'][-1] + 1 - cat_data = input_array[:, start_idx:end_idx] - # Inverse transform using OneHotEncoder - inverted_col = meta['encoder'].inverse_transform(cat_data) - inverted_data[col] = inverted_col.flatten() - else: - # Inverse transform numerical data - idx = meta['index'] - num_data = input_array[:, idx].reshape(-1, 1) - inverted_col = meta['encoder'].inverse_transform(num_data) - inverted_data[col] = inverted_col.flatten() - - # Convert to DataFrame - inverted_df = pd.DataFrame(inverted_data) - - return inverted_df - - -def find_idx(a: list, b: list) -> list: - """ - Find permutation index where a[idx] = b. - - Args: - a: Source list - b: Target list - - Returns: - Index permutation, or None if impossible - """ - from collections import defaultdict, deque - - # Check if a and b have the same multiset - if sorted(a) != sorted(b): - return None - - # Create mapping from value to indices - pos_map = defaultdict(deque) - for i, val in enumerate(a): - pos_map[val].append(i) - - # Build index by matching b elements - idx = [] - for val in b: - idx.append(pos_map[val].popleft()) - - return idx diff --git a/experiments/scripts/wine_quality/script_utils/explainer_factory.py b/experiments/scripts/wine_quality/script_utils/explainer_factory.py deleted file mode 100644 index 1132281..0000000 --- a/experiments/scripts/wine_quality/script_utils/explainer_factory.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -Explainer factory for creating explainers across different frameworks. -""" -from typing import Dict, Any, Tuple, Optional -import warnings - - -class ExplainerConfig: - """Configuration and validation for explainer frameworks.""" - - # Framework compatibility matrix - FRAMEWORK_MODEL_SUPPORT = { - "pnpxai": ["xgb", "tab_resnet"], - "captum": ["xgb", "tab_resnet"], - "omnixai": ["xgb"], # Only XGBoost - "openxai": ["tab_resnet"], # Only TabResNet - "autoxai": ["xgb", "tab_resnet"], # Deprecated - } - - FRAMEWORK_EXPLAINER_SUPPORT = { - "pnpxai": ["lime", "shap", "ig", "grad", "sg", "itg", "vg", "lrp"], - "captum": ["lime", "shap", "ig", "grad", "sg", "itg", "lrp"], # no vg - "omnixai": ["lime", "shap"], # Only model-agnostic - "openxai": ["lime", "shap", "ig", "grad", "sg", "itg"], # no lrp, vg - "autoxai": ["lime", "shap"], # Limited support - } - - # Explainer name mappings - EXPLAINER_NAME_MAP = { - "pnpxai": { - "shap": "kernel_shap", - "lime": "lime", - "grad": "gradient", - "itg": "grad_x_input", - "ig": "integrated_gradients", - "sg": "smooth_grad", - "vg": "var_grad", - "lrp": "lrp_uniform_epsilon", - }, - "captum": { - "shap": "KernelShap", - "lime": "Lime", - "grad": "Saliency", - "itg": "InputXGradient", - "ig": "IntegratedGradients", - "sg": "NoiseTunnel", - "lrp": "LRP", - }, - "omnixai": { - "lime": "LimeTabular", - "shap": "ShapTabular", - }, - "openxai": { - "lime": "lime", - "shap": "shap", - "ig": "ig", - "grad": "grad", - "sg": "sg", - "itg": "itg", - }, - "autoxai": { - "lime": "LIME", - "shap": "SHAP", - } - } - - @classmethod - def validate( - cls, - framework: str, - model: str, - explainer: Optional[str] = None - ) -> Tuple[bool, Optional[str]]: - """ - Validate framework/model/explainer combination. - - Args: - framework: Framework name - model: Model type - explainer: Explainer type (optional for pnpxai auto mode) - - Returns: - (is_valid, error_message) - """ - # Check framework - if framework not in cls.FRAMEWORK_MODEL_SUPPORT: - return False, f"Invalid framework: {framework}. Choose from {list(cls.FRAMEWORK_MODEL_SUPPORT.keys())}" - - # Check model support - if model not in cls.FRAMEWORK_MODEL_SUPPORT[framework]: - return False, f"Framework {framework} does not support model {model}" - - # Check explainer if provided - if explainer is not None: - if explainer not in cls.FRAMEWORK_EXPLAINER_SUPPORT[framework]: - return False, f"Framework {framework} does not support explainer {explainer}" - - return True, None - - @classmethod - def get_explainer_name(cls, framework: str, explainer: str) -> str: - """ - Get framework-specific explainer name. - - Args: - framework: Framework name - explainer: Standard explainer name - - Returns: - Framework-specific explainer name - """ - return cls.EXPLAINER_NAME_MAP.get(framework, {}).get(explainer, explainer) - - @classmethod - def get_default_params(cls, framework: str, explainer: str, n_samples: int = 64) -> Dict[str, Any]: - """ - Get default parameters for explainer. - - Args: - framework: Framework name - explainer: Explainer type - n_samples: Number of samples for sampling-based methods - - Returns: - Default parameters dictionary - """ - params = {} - - # Sampling-based explainers - if explainer in ["lime", "shap"]: - if framework == "omnixai": - if explainer == "lime": - params["num_samples"] = n_samples - elif explainer == "shap": - params["nsamples"] = n_samples - elif framework == "openxai": - params["n_samples"] = n_samples - elif framework == "pnpxai": - params["n_samples"] = n_samples - elif framework == "captum": - params["n_samples"] = n_samples - - return params - - -def validate_explainer_args( - framework: str, - model: str, - explainer: Optional[str] = None -) -> None: - """ - Validate explainer arguments and raise error if invalid. - - Args: - framework: Framework name - model: Model type - explainer: Explainer type (optional) - - Raises: - ValueError: If combination is invalid - """ - is_valid, error_msg = ExplainerConfig.validate(framework, model, explainer) - - if not is_valid: - raise ValueError(error_msg) - - # Warn about deprecated frameworks - if framework == "autoxai": - warnings.warn( - "AutoXAI is deprecated and may have compatibility issues with recent dependencies. " - "Consider using PnPXAI or Captum instead.", - DeprecationWarning - ) - - -def get_framework_specific_name(framework: str, explainer: str) -> str: - """ - Get framework-specific explainer name. - - Args: - framework: Framework name - explainer: Standard explainer name - - Returns: - Framework-specific name - """ - return ExplainerConfig.get_explainer_name(framework, explainer) diff --git a/experiments/scripts/wine_quality/script_utils/model_utils.py b/experiments/scripts/wine_quality/script_utils/model_utils.py deleted file mode 100644 index 61e8c28..0000000 --- a/experiments/scripts/wine_quality/script_utils/model_utils.py +++ /dev/null @@ -1,260 +0,0 @@ -""" -Model loading and wrapper utilities. -""" -from pathlib import Path -from typing import Union, Optional - -import torch -import torch.nn as nn -import xgboost as xgb - -try: - from huggingface_hub import hf_hub_download - HF_AVAILABLE = True -except ImportError: - HF_AVAILABLE = False - - -class TorchModelForXGBoost(nn.Module): - """ - PyTorch wrapper for XGBoost models. - - This wrapper allows XGBoost models to be used with PyTorch-based - explainability frameworks like Captum and PnPXAI. - """ - - def __init__(self, xgb_model: xgb.XGBClassifier): - """ - Args: - xgb_model: Trained XGBoost classifier - """ - super().__init__() - self.xgb_model = xgb_model - # Dummy layer to make this a proper PyTorch module - self._dummy_layer = nn.Linear(1, 1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """ - Forward pass through XGBoost model. - - Args: - x: Input tensor of shape (batch_size, n_features) or (1, batch_size, n_features) - - Returns: - Probability predictions of shape (batch_size, n_classes) - """ - # Handle 3D input (squeeze batch dimension) - if x.ndim >= 3: - x = x.squeeze(0) - - # Get predictions from XGBoost - # Detach if tensor requires grad (for gradient-based explainers) - if x.requires_grad: - out = self.xgb_model.predict_proba(x.detach().cpu().numpy()) - else: - out = self.xgb_model.predict_proba(x.cpu().numpy()) - - # Convert back to tensor - return torch.from_numpy(out) - - -def load_model_from_hf( - model_type: str, - repo_id: str, - filename: str, - input_dim: int = None, - output_dim: int = 2, - cache_dir: str = None, - **kwargs -) -> Union[xgb.XGBClassifier, nn.Module]: - """ - Load a model from Hugging Face Hub. - - Args: - model_type: Type of model ('xgb', 'tab_resnet', 'lr') - repo_id: Hugging Face repository ID (e.g., 'username/repo-name') - filename: Model filename in the repository - input_dim: Input dimension (required for neural network models) - output_dim: Output dimension (default: 2 for binary classification) - cache_dir: Cache directory for downloaded models (default: HF default cache) - **kwargs: Additional arguments for model initialization - - Returns: - Loaded model - - Raises: - ImportError: If huggingface_hub is not installed - ValueError: If model_type is invalid or input_dim not provided for neural networks - """ - if not HF_AVAILABLE: - raise ImportError( - "huggingface_hub is not installed. " - "Install it with: pip install huggingface_hub" - ) - - # Download model from HF Hub - print(f"Downloading {filename} from {repo_id}...") - model_path = hf_hub_download( - repo_id=repo_id, - filename=filename, - cache_dir=cache_dir, - repo_type="model" - ) - print(f"Downloaded to: {model_path}") - - # Load the model using the existing load_model function - return load_model( - model_type=model_type, - model_path=model_path, - input_dim=input_dim, - output_dim=output_dim, - **kwargs - ) - - -def load_model( - model_type: str, - model_path: Union[str, Path] = None, - input_dim: int = None, - output_dim: int = 2, - hf_repo: str = None, - hf_filename: str = None, - local_dir: str = None, - **kwargs -) -> Union[xgb.XGBClassifier, nn.Module]: - """ - Load a trained model with priority: local file → local directory → HF Hub. - - Priority order: - 1. If model_path is provided and exists, load from that path - 2. If local_dir + hf_filename exists, load from there - 3. If hf_repo + hf_filename provided, download from HF Hub - - Args: - model_type: Type of model ('xgb', 'tab_resnet', 'lr') - model_path: Path to model file (Priority 1) - input_dim: Input dimension (required for neural network models) - output_dim: Output dimension (default: 2 for binary classification) - hf_repo: Hugging Face repository ID (e.g., 'username/repo-name') - hf_filename: Model filename in HF repository - local_dir: Local directory to check for model (default: data/Wine Quality) - **kwargs: Additional arguments for model initialization - - Returns: - Loaded model - - Raises: - ValueError: If model_type is invalid, input_dim not provided for neural networks, - or no valid model path found - ImportError: If trying to load from HF Hub without huggingface_hub installed - - Examples: - # Load from explicit path - model = load_model('xgb', model_path='data/Wine Quality/xgb_model.json') - - # Load with local fallback to HF Hub - model = load_model('xgb', hf_repo='username/wine-models', - hf_filename='xgb_model.json', - local_dir='data/Wine Quality') - """ - # Priority 1: Explicit model_path - if model_path is not None: - model_path = Path(model_path) - if model_path.exists(): - print(f"Loading model from: {model_path}") - else: - raise ValueError(f"Model path does not exist: {model_path}") - - # Priority 2: Check local_dir + hf_filename - elif local_dir is not None and hf_filename is not None: - local_path = Path(local_dir) / hf_filename - if local_path.exists(): - print(f"Loading model from local directory: {local_path}") - model_path = local_path - # Priority 3: Fall back to HF Hub - elif hf_repo is not None: - print(f"Model not found locally. Downloading from HF Hub...") - return load_model_from_hf( - model_type=model_type, - repo_id=hf_repo, - filename=hf_filename, - input_dim=input_dim, - output_dim=output_dim, - **kwargs - ) - else: - raise ValueError( - f"Model not found in local directory: {local_path}\n" - f"Provide hf_repo to download from Hugging Face Hub." - ) - - # Priority 3: Only HF Hub specified - elif hf_repo is not None and hf_filename is not None: - print(f"Loading model from HF Hub...") - return load_model_from_hf( - model_type=model_type, - repo_id=hf_repo, - filename=hf_filename, - input_dim=input_dim, - output_dim=output_dim, - **kwargs - ) - - else: - raise ValueError( - "Must provide either:\n" - " - model_path, or\n" - " - local_dir + hf_filename, or\n" - " - hf_repo + hf_filename" - ) - - if model_type == "xgb": - model = xgb.XGBClassifier() - model.load_model(str(model_path)) - return model - - elif model_type == "tab_resnet": - if input_dim is None: - raise ValueError("input_dim is required for tab_resnet model") - - from experiments.models.tab_resnet import TabResNet - - num_blocks = kwargs.get('num_blocks', 1) - model = TabResNet(input_dim, output_dim, num_blocks=num_blocks) - model.load_state_dict(torch.load(model_path)) - model.eval() - return model - - elif model_type == "lr": - if input_dim is None: - raise ValueError("input_dim is required for lr model") - - from experiments.models.tab_resnet import LogisticRegression - - model = LogisticRegression(input_dim, output_dim) - model.load_state_dict(torch.load(model_path)) - model.eval() - return model - - else: - raise ValueError(f"Unknown model type: {model_type}. Choose from ['xgb', 'tab_resnet', 'lr']") - - -def wrap_model_for_pytorch( - model: Union[xgb.XGBClassifier, nn.Module], - model_type: str -) -> nn.Module: - """ - Wrap model as PyTorch module if needed. - - Args: - model: Model to wrap - model_type: Type of model ('xgb', 'tab_resnet', 'lr') - - Returns: - PyTorch module - """ - if model_type == "xgb": - return TorchModelForXGBoost(model) - else: - return model diff --git a/experiments/scripts/wine_quality/visualize_results.py b/experiments/scripts/wine_quality/visualize_results.py deleted file mode 100644 index 715337c..0000000 --- a/experiments/scripts/wine_quality/visualize_results.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/env python3 -""" -Wine Quality Results Visualization Script - -Aggregates and displays all experiment results from Wine Quality analysis in a markdown table format. -This script scans the results directory and creates a comprehensive summary similar to benchmark/notebooks/vis.ipynb. - -Usage: - python -m experiments.scripts.wine_quality.visualize_results [OPTIONS] - -Options: - --results-dir: Path to results directory (default: results/Wine Quality) - --output: Output format (markdown, csv, json) (default: markdown) - --round: Number of decimal places to round (default: 4) - --verbose: Enable verbose logging - -Examples: - # Display results as markdown table - python -m experiments.scripts.wine_quality.visualize_results - - # Save to CSV - python -m experiments.scripts.wine_quality.visualize_results --output csv > results.csv - - # Custom results directory - python -m experiments.scripts.wine_quality.visualize_results --results-dir "path/to/results" -""" - -import os -import sys -import argparse -import numpy as np -import pandas as pd -from pathlib import Path -from typing import Dict, List, Optional -from pandas.api.types import CategoricalDtype - - -# Mapping dictionaries for display names -EXP_MAP = { - "shap": "KernelSHAP", - "kernel_shap": "KernelSHAP", - "lime": "LIME", - "integrated_gradients": "Integrated Gradients", - "itg": "Gradient x Input", - "grad": "Gradient", - "gradient": "Gradient", - "lrp_uniform_epsilon": "LRP", - "lrp": "LRP", - "smooth_grad": "SmoothGrad", - "sg": "SmoothGrad", - "var_grad": "VarGrad", - "vg": "VarGrad", - "grad_x_input": "Gradient x Input", - "ig": "Integrated Gradients", -} - -MODEL_MAP = { - "xgb": "XGBoost", - "tab_resnet": "Resnet", -} - -FRAMEWORK_MAP = { - "pnpxai": "PnPXAI", - "captum": "Captum", - "autoxai": "AutoXAI", - "omnixai": "OmniXAI", - "openxai": "OpenXAI", -} - -METRIC_MAP = { - "abpc": "Correctness", - "cmpx": "Complexity", - "cmpd": "Compounded" -} - -# Ordering for categorical data -EXP_ORDER = [ - 'KernelSHAP', - 'LIME', - 'Gradient', - 'Gradient x Input', - 'Integrated Gradients', - 'SmoothGrad', - 'VarGrad', - 'LRP', -] - - -def collect_results(results_dir: str = "results/Wine Quality", verbose: bool = False) -> pd.DataFrame: - """ - Collect all experiment results from the results directory. - - Args: - results_dir: Path to the results directory - verbose: Enable verbose logging - - Returns: - DataFrame with columns: model, framework, explainer, abpc, cmpx, cmpd - """ - results_path = Path(results_dir) - if not results_path.exists(): - raise FileNotFoundError(f"Results directory not found: {results_dir}") - - records = [] - - # Walk through the directory structure: results/Wine Quality/{model}/{framework}/{explainer}/ - for root, dirs, files in os.walk(results_path): - # Check if this directory contains result files - if "explanations.npy" in files: - parts = Path(root).relative_to(results_path).parts - - if len(parts) != 3: - if verbose: - print(f"Skipping unexpected directory structure: {root}") - continue - - model, framework, explainer = parts - - # Load metrics - try: - abpc = np.load(os.path.join(root, "abpc.npy")).mean() - cmpx = np.load(os.path.join(root, "cmpx.npy")).mean() - cmpd = np.load(os.path.join(root, "cmpd.npy")).mean() - - records.append({ - "model": model, - "framework": framework, - "explainer": explainer, - "abpc": abpc, - "cmpx": cmpx, - "cmpd": cmpd, - }) - - if verbose: - print(f"Loaded: Model={model}, Framework={framework}, Explainer={explainer}") - print(f" ABPC={abpc:.4f}, CMPX={cmpx:.4f}, CMPD={cmpd:.4f}") - - except Exception as e: - if verbose: - print(f"Error loading metrics from {root}: {e}") - continue - - if not records: - raise ValueError(f"No experiment results found in {results_dir}") - - return pd.DataFrame(records) - - -def create_pivot_table(df: pd.DataFrame, round_decimals: int = 4) -> pd.DataFrame: - """ - Transform the results DataFrame into a pivot table format for display. - - Args: - df: DataFrame with columns: model, framework, explainer, abpc, cmpx, cmpd - round_decimals: Number of decimal places to round - - Returns: - Pivot table with metrics as rows and frameworks as columns - """ - # Replace codes with display names - name_map = {} - name_map.update(EXP_MAP) - name_map.update(MODEL_MAP) - name_map.update(FRAMEWORK_MAP) - name_map.update(METRIC_MAP) - - # Reshape data: unpivot metrics - performance = ( - df - .replace(name_map) - .melt( - id_vars=["model", "framework", "explainer"], - value_vars=["abpc", "cmpx", "cmpd"], - var_name="metric", - value_name="value" - ) - .replace(name_map) - .assign( - model=lambda d: d["model"].astype( - CategoricalDtype(list(MODEL_MAP.values()), ordered=True) - ), - metric=lambda d: d["metric"].astype( - CategoricalDtype(list(METRIC_MAP.values()), ordered=True) - ), - explainer=lambda d: d["explainer"].astype( - CategoricalDtype(EXP_ORDER, ordered=True) - ), - framework=lambda d: d["framework"].astype( - CategoricalDtype(list(FRAMEWORK_MAP.values()), ordered=True) - ) - ) - [['model', 'metric', 'explainer', 'framework', 'value']] - .pivot( - index=["model", "metric", "explainer"], - columns="framework", - values="value" - ) - .reset_index() - .sort_values(["model", "metric", "explainer"]) - .round(round_decimals) - ) - - return performance - - -def print_summary(df: pd.DataFrame, verbose: bool = False): - """Print summary statistics about the collected results.""" - print("\n" + "="*70) - print("Wine Quality Experiment Results Summary") - print("="*70) - print(f"\nTotal experiments: {len(df)}") - print(f"Models: {sorted(df['model'].unique())}") - print(f"Frameworks: {sorted(df['framework'].unique())}") - print(f"Explainers: {sorted(df['explainer'].unique())}") - print() - - if verbose: - # Count experiments by combination - print("Experiments by model:") - print(df.groupby('model').size().to_string()) - print("\nExperiments by framework:") - print(df.groupby('framework').size().to_string()) - print("\nExperiments by explainer:") - print(df.groupby('explainer').size().to_string()) - print() - - -def main(): - """Main entry point.""" - parser = argparse.ArgumentParser( - description="Visualize Wine Quality experiment results", - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - # Display results as markdown table - python -m experiments.scripts.wine_quality.visualize_results - - # Save to CSV - python -m experiments.scripts.wine_quality.visualize_results --output csv > results.csv - - # Custom results directory with verbose output - python -m experiments.scripts.wine_quality.visualize_results --results-dir "path/to/results" --verbose - """ - ) - - parser.add_argument("--results-dir", type=str, default="results/Wine Quality", - help="Path to results directory (default: results/Wine Quality)") - parser.add_argument("--output", type=str, default="markdown", - choices=["markdown", "csv", "json"], - help="Output format (default: markdown)") - parser.add_argument("--round", type=int, default=4, dest="round_decimals", - help="Number of decimal places to round (default: 4)") - parser.add_argument("--verbose", action="store_true", - help="Enable verbose logging") - - args = parser.parse_args() - - try: - # Collect results - if args.verbose: - print(f"Scanning results directory: {args.results_dir}") - print() - - df = collect_results(args.results_dir, verbose=args.verbose) - - # Print summary - print_summary(df, verbose=args.verbose) - - # Create pivot table - pivot_table = create_pivot_table(df, round_decimals=args.round_decimals) - - # Output results - print("="*70) - print("Results Table") - print("="*70) - print() - - if args.output == "markdown": - print(pivot_table.to_markdown(index=False)) - elif args.output == "csv": - print(pivot_table.to_csv(index=False)) - elif args.output == "json": - print(pivot_table.to_json(orient="records", indent=2)) - - print() - - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - if args.verbose: - import traceback - traceback.print_exc() - sys.exit(1) - - -if __name__ == "__main__": - main() From c728289872cdc1c43d490b37b2d78ee71e58a7ff Mon Sep 17 00:00:00 2001 From: shiningstone23 Date: Sat, 22 Nov 2025 14:06:05 +0900 Subject: [PATCH 17/20] feat/tabulr:mis --- docker-compose.wine_quality.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docker-compose.wine_quality.yml b/docker-compose.wine_quality.yml index 24eeb76..65c83f9 100644 --- a/docker-compose.wine_quality.yml +++ b/docker-compose.wine_quality.yml @@ -26,6 +26,9 @@ services: # Persistent results directory - ./results:/root/pnpxai-experiments/results + # Persistent logs directory + - ./logs:/root/pnpxai-experiments/logs + # Data directory with Wine Quality dataset - ./data:/root/pnpxai-experiments/data @@ -54,12 +57,13 @@ services: echo 'Wine Quality XAI Experiment Starting...' && echo '========================================' && echo '' && - python -m experiments.scripts.analyze_wine_quality --n-samples 25 --seed 42 --verbose 2>&1 | tee results/Wine\ Quality/experiment.log && + mkdir -p logs && + python -m experiments.scripts.analyze_wine_quality --n-samples 25 --seed 42 --verbose 2>&1 | tee 'logs/experiment.log' && echo '' && echo '========================================' && echo 'Experiment completed successfully!' && echo 'Results saved to: results/Wine Quality/' && - echo 'Log saved to: results/Wine Quality/experiment.log' && + echo 'Log saved to: logs/experiment.log' && echo '========================================' && tail -f /dev/null " From 0eabacbda7a9a786eb176ea71e9a053d7ce530e5 Mon Sep 17 00:00:00 2001 From: seongun-kim Date: Tue, 25 Nov 2025 14:33:36 +0900 Subject: [PATCH 18/20] refactor: standardize tabular experiment structure and utils --- README.md | 67 ++++- README_wine_quality.md | 280 ------------------ .../{Wine Quality => wine_quality}/X_test.npy | Bin .../X_train.npy | Bin .../download.sh | 0 .../feature_metadata.pkl | Bin .../preprocess.py | 0 .../raw_data.csv | 0 .../resnet_model.pth | Bin .../xgb_model.json | 0 .../{Wine Quality => wine_quality}/y_test.npy | Bin .../y_train.npy | Bin docker-compose.wine_quality.yml | 69 ----- .../configs}/tabular/dataset_config.yaml | 0 .../configs}/tabular/explainer_config.yaml | 0 .../configs}/tabular/model_config.yaml | 0 .../configs}/tabular/optuna_config.yaml | 0 experiments/models/__init__.py | 3 +- experiments/scripts/analyze_wine_quality.py | 196 +++--------- experiments/utils/__init__.py | 10 +- experiments/utils/datasets.py | 73 ++++- experiments/utils/models.py | 58 +++- 22 files changed, 226 insertions(+), 530 deletions(-) delete mode 100644 README_wine_quality.md rename data/{Wine Quality => wine_quality}/X_test.npy (100%) rename data/{Wine Quality => wine_quality}/X_train.npy (100%) rename data/{Wine Quality => wine_quality}/download.sh (100%) rename data/{Wine Quality => wine_quality}/feature_metadata.pkl (100%) rename data/{Wine Quality => wine_quality}/preprocess.py (100%) rename data/{Wine Quality => wine_quality}/raw_data.csv (100%) rename data/{Wine Quality => wine_quality}/resnet_model.pth (100%) rename data/{Wine Quality => wine_quality}/xgb_model.json (100%) rename data/{Wine Quality => wine_quality}/y_test.npy (100%) rename data/{Wine Quality => wine_quality}/y_train.npy (100%) delete mode 100644 docker-compose.wine_quality.yml rename {configs => experiments/configs}/tabular/dataset_config.yaml (100%) rename {configs => experiments/configs}/tabular/explainer_config.yaml (100%) rename {configs => experiments/configs}/tabular/model_config.yaml (100%) rename {configs => experiments/configs}/tabular/optuna_config.yaml (100%) diff --git a/README.md b/README.md index 62e8c2d..0a8c225 100644 --- a/README.md +++ b/README.md @@ -267,27 +267,68 @@ Results will be saved to the file path specified in the `FILENAME` of `--out_fil ### Experiment 6: Wine Quality Explanation -This experiment compares multiple XAI frameworks (PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI) on the Wine Quality dataset using various models and explainer methods. +This experiment compares multiple XAI frameworks (PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI) on the Wine Quality dataset using various models and explainer methods. It evaluates explanations using Faithfulness, Complexity, and their Composite score. -#### Usage +#### Data and Model + * **Data (Wine Quality):** The **Wine Quality dataset** containing ~6,497 samples (white and red wine combined) for binary classification (good vs. bad quality). + + * **Model (XGBoost & TabResNet):** + * **XGBoost:** A gradient boosting classifier trained on the tabular features. + * **TabResNet:** A ResNet-like architecture adapted for tabular data. + Pre-trained weights for both models are included in the `data/wine_quality/` directory. + +#### Setup for Wine Quality Experiment + +Due to dependency conflicts between frameworks, this experiment requires a dedicated Docker environment separate from the main setup. + +Please build the specific Docker image using the provided [`Dockerfile.wine_quality`](`./Dockerfile.wine_quality`): ```bash # Build the Wine Quality Docker image docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . +``` -# Run the container -docker run --rm -it \ - --runtime=nvidia \ - --gpus all \ - --shm-size=8g \ - -v $(pwd):/root/pnpxai-experiments \ - pnpxai_wine_quality:latest +#### Usage -# Inside the container, run all experiments -python -m experiments.scripts.wine_quality.analyze_wine_quality -``` +1. **Run the container:** + Start the interactive container with GPU support and volume mounting. + ```bash + docker run --rm -it \ + --runtime=nvidia \ + --gpus all \ + --shm-size=8g \ + -v $(pwd):/root/pnpxai-experiments \ + pnpxai_wine_quality:latest + ``` + +2. **Run the experiment:** + Inside the container, execute the analysis script: + ```bash + python -m experiments.scripts.analyze_wine_quality \ + --n_samples 25 \ + --seed 42 \ + --verbose \ + --data_dir data/wine_quality \ + --config_dir experiments/configs/tabular \ + --results_dir results/wine_quality + ``` + +#### Arguments + + * `--n-samples `: Number of samples for sampling-based explainers (LIME/SHAP). Defaults to `25`. + * `--seed `: Random seed for reproducibility. Defaults to `42`. + * `--verbose`: Enable detailed logging. + * `--data-dir `: Path to data directory. Defaults to `data/wine_quality`. + * `--config-dir `: Path to config directory. Defaults to `experiments/configs/tabular`. + * `--results-dir `: Path to results directory. Defaults to `results/wine_quality`. + +#### Output + +The experiment will generate the following in the `results/wine_quality/` directory: -For detailed documentation, see [experiments/scripts/wine_quality/README.md](experiments/scripts/wine_quality/README.md). + * **Individual explanations:** Saved in `results/wine_quality/{model}/{framework}/{explainer}/` as `.npy` files (explanations and metric scores). + * **Summary table:** `experiment_result.md` containing a LaTeX table comparing Faithfulness, Complexity, and Composite scores across all frameworks. + * **Execution log:** `experiment.log` (if verbose logging is enabled or configured). ## Citation diff --git a/README_wine_quality.md b/README_wine_quality.md deleted file mode 100644 index 603bbe4..0000000 --- a/README_wine_quality.md +++ /dev/null @@ -1,280 +0,0 @@ -# Wine Quality XAI Benchmark - -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) - -This repository provides a comprehensive benchmark for comparing explainability frameworks (PnPXAI, Captum, OmniXAI, OpenXAI, and AutoXAI) on the Wine Quality dataset using XGBoost and TabResNet models. - -## Wine Quality Dataset - -The **Wine Quality Dataset** is a widely-used dataset for classification tasks in machine learning. - -### Dataset Information - -- **Source**: UCI Machine Learning Repository -- **Reference**: P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. "Modeling wine preferences by data mining from physicochemical properties." Decision Support Systems, Elsevier, 47(4):547-553, 2009. -- **URL**: https://archive.ics.uci.edu/ml/datasets/wine+quality -- **Task**: Binary classification (good vs. bad wine quality) -- **Samples**: ~6,497 wine samples (white and red wine combined) - -## Quick Start with Docker Compose - -### Prerequisites - -- Docker -- Docker Compose -- NVIDIA GPU with CUDA support -- nvidia-docker runtime - -### Running the Experiment - -Simply run: - -```bash -docker compose -f docker-compose.wine_quality.yml up -``` - -This single command will: - -1. ✅ Start the Docker container with all dependencies -2. ✅ Load the Wine Quality dataset and pre-trained models -3. ✅ Run explanations across all framework/model/explainer combinations -4. ✅ Evaluate explanations using Faithfulness, Complexity, and Composite metrics -5. ✅ Generate a LaTeX table in markdown format -6. ✅ Save all results to `results/Wine Quality/` -7. ✅ Create an execution log at `results/Wine Quality/experiment.log` - -### Expected Output - -The experiment will generate: - -- **Individual explanations**: `results/Wine Quality/{model}/{framework}/{explainer}/` - - `explanations.npy` - Attribution values for each sample - - `abpc.npy` - Faithfulness (AbPC) metric scores - - `cmpx.npy` - Complexity metric scores - - `cmpd.npy` - Composite metric scores - - `metadata.json` - Experiment configuration and summary - -- **Summary table**: `results/Wine Quality/experiment_result.md` - - LaTeX table comparing all frameworks and explainers - - Format matching academic paper standards - -- **Execution log**: `results/Wine Quality/experiment.log` - - Complete log of the experiment execution - - Useful for debugging and tracking progress - -### Viewing Results - -After the experiment completes, you can view the generated LaTeX table: - -```bash -cat results/Wine\ Quality/experiment_result.md -``` - -Example output format: - -```latex -\begin{table}[!th] - \caption{\textbf{Comparison of explanation performance on Wine Quality dataset.} - Evaluation of XGBoost and TabResNet models across three key metrics: - Faithfulness (higher is better $\uparrow$), Complexity (lower is better $\downarrow$), - and a Composite [Faithfulness, Simplicity] score ($\uparrow$). - The table compares PnP-XAI against Captum, AutoXAI, OmniXAI, and OpenXAI. - Bold values indicate the best score per row; dashes (-) denote unsupported combinations.} - \label{tab:wine_performance} - \centering - \resizebox{\textwidth}{!}{% - \begin{tabular}{lll||cccc|c} - \toprule - \textbf{Model} & \textbf{Metric} & \textbf{Explainer} & \textbf{Captum} & \textbf{OmniXAI} & \textbf{AutoXAI} & \textbf{OpenXAI} & \textbf{PnPXAI} \\ - \midrule - XGBoost & \multirow[t]{2}{*}{Faithfulness ($\uparrow$)} - & KernelSHAP & 0.1189 & 0.1079 & 0.0117 & - & \textbf{0.1205} \\ - & & LIME & \textbf{0.1176} & 0.0448 & 0.0025 & - & 0.1126 \\ - ... - \end{tabular}% - } -\end{table} -``` - -## Experiment Configuration - -The experiment tests the following combinations: - -### Frameworks Tested - -1. **PnPXAI** - Plug-and-Play XAI framework with hyperparameter optimization -2. **Captum** - PyTorch-based explainability library -3. **OmniXAI** - Unified framework for explainable AI (XGBoost only) -4. **OpenXAI** - Open-source explainability framework (TabResNet only) -5. **AutoXAI** - Automated XAI with hyperparameter tuning - -### Models Used - -- **XGBoost** - Gradient boosting classifier -- **TabResNet** - Residual neural network for tabular data - -### Explanation Methods - -- **LIME** - Local Interpretable Model-agnostic Explanations -- **SHAP (KernelSHAP)** - SHapley Additive exPlanations -- **Gradient** - Vanilla gradient (saliency) -- **Gradient × Input** - Input times gradient -- **Integrated Gradients** - Path integral of gradients -- **SmoothGrad** - Averaged gradients with noise -- **VarGrad** - Variance-based gradients (PnPXAI only) -- **LRP** - Layer-wise Relevance Propagation - -### Evaluation Metrics - -- **Faithfulness (AbPC)**: Measures how well the explanation reflects the model's actual decision-making process -- **Complexity**: Measures the simplicity/sparsity of the explanation -- **Composite Score**: Weighted combination (0.7 × Faithfulness - 0.3 × Complexity) - -## Manual Execution - -If you prefer to run the experiment manually without Docker: - -```bash -python experiments/scripts/analyze_wine_quality.py \ - --n-samples 25 \ - --seed 42 \ - --verbose \ - --data-dir data/Wine\ Quality \ - --results-dir results/Wine\ Quality -``` - -### Parameters - -- `--n-samples`: Number of samples for LIME/SHAP (default: 25) -- `--seed`: Random seed for reproducibility (default: 42) -- `--verbose`: Enable detailed logging -- `--data-dir`: Path to data directory (default: data/Wine Quality) -- `--config-dir`: Path to config directory (default: configs/tabular) -- `--results-dir`: Path to results directory (default: results/Wine Quality) - -## Repository Structure - -``` -pnpxai-experiments/ -├── experiments/ -│ ├── scripts/ -│ │ ├── analyze_wine_quality.py # Main experiment runner -│ │ ├── wine_quality/ # Original modular scripts (deprecated) -│ │ └── lib/ # External libraries -│ │ └── AutoXAI/ # AutoXAI framework implementation -│ └── models/ -│ └── tab_resnet.py # TabResNet model definition -├── data/ -│ └── Wine Quality/ -│ ├── X_train.npy # Training features -│ ├── X_test.npy # Test features -│ ├── y_train.npy # Training labels -│ ├── y_test.npy # Test labels -│ ├── feature_metadata.pkl # Feature preprocessing info -│ ├── raw_data.csv # Original raw data -│ ├── xgb_model.json # Pre-trained XGBoost model -│ └── resnet_model.pth # Pre-trained TabResNet model -├── configs/ -│ └── tabular/ -│ ├── explainer_config.yaml # Explainer configurations -│ └── optuna_config.yaml # Optimization configurations -├── results/ -│ └── Wine Quality/ -│ ├── xgb/ # XGBoost model results -│ │ ├── pnpxai/ -│ │ ├── captum/ -│ │ ├── omnixai/ -│ │ └── autoxai/ -│ ├── tab_resnet/ # TabResNet model results -│ │ ├── pnpxai/ -│ │ ├── captum/ -│ │ ├── openxai/ -│ │ └── autoxai/ -│ ├── experiment_result.md # Generated LaTeX table -│ └── experiment.log # Execution log -├── docker-compose.wine_quality.yml # Docker Compose configuration -├── Dockerfile.wine_quality # Docker image definition -├── README.md # Main project README -└── README_wine_quality.md # This file -``` - -## Citation - -If you use this benchmark in your research, please cite: - -```bibtex -@misc{wine_quality_xai_benchmark, - title={Wine Quality XAI Benchmark: Comparing Explainability Frameworks}, - author={Your Name}, - year={2025}, - howpublished={\url{https://github.com/yourusername/pnpxai-experiments}} -} -``` - -### Original Dataset Citation - -```bibtex -@article{cortez2009modeling, - title={Modeling wine preferences by data mining from physicochemical properties}, - author={Cortez, Paulo and Cerdeira, Antonio and Almeida, Fernando and Matos, Telmo and Reis, Jose}, - journal={Decision Support Systems}, - volume={47}, - number={4}, - pages={547--553}, - year={2009}, - publisher={Elsevier} -} -``` - -## Requirements - -See [docker-compose.wine_quality.yml](docker-compose.wine_quality.yml) for the complete list of dependencies and environment setup. - -Key dependencies: -- Python 3.8+ -- PyTorch -- PnPXAI -- Captum -- OmniXAI -- OpenXAI -- XGBoost -- scikit-learn -- pandas, numpy -- CUDA-enabled GPU (recommended) - -## License - -This project is licensed under the MIT License. - -## Troubleshooting - -### Docker Issues - -If you encounter Docker-related issues: - -```bash -# Rebuild the Docker image -docker compose -f docker-compose.wine_quality.yml build --no-cache - -# Check GPU availability -docker run --rm --gpus all nvidia/cuda:11.8.0-base-ubuntu22.04 nvidia-smi -``` - -### CUDA/GPU Issues - -Ensure you have: -- NVIDIA drivers installed -- nvidia-docker2 installed -- Docker daemon configured to use nvidia runtime - -### Permission Issues - -If you encounter permission errors with the results directory: - -```bash -chmod -R 777 results/ -``` - -## Contact - -For questions or issues, please open an issue on GitHub or contact the maintainers. diff --git a/data/Wine Quality/X_test.npy b/data/wine_quality/X_test.npy similarity index 100% rename from data/Wine Quality/X_test.npy rename to data/wine_quality/X_test.npy diff --git a/data/Wine Quality/X_train.npy b/data/wine_quality/X_train.npy similarity index 100% rename from data/Wine Quality/X_train.npy rename to data/wine_quality/X_train.npy diff --git a/data/Wine Quality/download.sh b/data/wine_quality/download.sh similarity index 100% rename from data/Wine Quality/download.sh rename to data/wine_quality/download.sh diff --git a/data/Wine Quality/feature_metadata.pkl b/data/wine_quality/feature_metadata.pkl similarity index 100% rename from data/Wine Quality/feature_metadata.pkl rename to data/wine_quality/feature_metadata.pkl diff --git a/data/Wine Quality/preprocess.py b/data/wine_quality/preprocess.py similarity index 100% rename from data/Wine Quality/preprocess.py rename to data/wine_quality/preprocess.py diff --git a/data/Wine Quality/raw_data.csv b/data/wine_quality/raw_data.csv similarity index 100% rename from data/Wine Quality/raw_data.csv rename to data/wine_quality/raw_data.csv diff --git a/data/Wine Quality/resnet_model.pth b/data/wine_quality/resnet_model.pth similarity index 100% rename from data/Wine Quality/resnet_model.pth rename to data/wine_quality/resnet_model.pth diff --git a/data/Wine Quality/xgb_model.json b/data/wine_quality/xgb_model.json similarity index 100% rename from data/Wine Quality/xgb_model.json rename to data/wine_quality/xgb_model.json diff --git a/data/Wine Quality/y_test.npy b/data/wine_quality/y_test.npy similarity index 100% rename from data/Wine Quality/y_test.npy rename to data/wine_quality/y_test.npy diff --git a/data/Wine Quality/y_train.npy b/data/wine_quality/y_train.npy similarity index 100% rename from data/Wine Quality/y_train.npy rename to data/wine_quality/y_train.npy diff --git a/docker-compose.wine_quality.yml b/docker-compose.wine_quality.yml deleted file mode 100644 index 65c83f9..0000000 --- a/docker-compose.wine_quality.yml +++ /dev/null @@ -1,69 +0,0 @@ -version: '3.8' - -services: - wine_quality: - image: pnpxai_wine_quality:latest - container_name: pnpxai-wine-quality - runtime: nvidia - - # GPU configuration - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: all - capabilities: [gpu] - - # Shared memory size for PyTorch DataLoader - shm_size: '8gb' - - # Volume mounts - mount all necessary directories - volumes: - # Mount entire project for code access - - ./:/root/pnpxai-experiments - - # Persistent results directory - - ./results:/root/pnpxai-experiments/results - - # Persistent logs directory - - ./logs:/root/pnpxai-experiments/logs - - # Data directory with Wine Quality dataset - - ./data:/root/pnpxai-experiments/data - - # Config files - - ./configs:/root/pnpxai-experiments/configs - - # Experiments code - - ./experiments:/root/pnpxai-experiments/experiments - - # Working directory - working_dir: /root/pnpxai-experiments - - # Environment variables - environment: - - PYTHONUNBUFFERED=1 - - CUDA_VISIBLE_DEVICES=0 - - # Keep container running - stdin_open: true - tty: true - - # Run experiment automatically with logging - command: > - bash -c " - echo '========================================' && - echo 'Wine Quality XAI Experiment Starting...' && - echo '========================================' && - echo '' && - mkdir -p logs && - python -m experiments.scripts.analyze_wine_quality --n-samples 25 --seed 42 --verbose 2>&1 | tee 'logs/experiment.log' && - echo '' && - echo '========================================' && - echo 'Experiment completed successfully!' && - echo 'Results saved to: results/Wine Quality/' && - echo 'Log saved to: logs/experiment.log' && - echo '========================================' && - tail -f /dev/null - " diff --git a/configs/tabular/dataset_config.yaml b/experiments/configs/tabular/dataset_config.yaml similarity index 100% rename from configs/tabular/dataset_config.yaml rename to experiments/configs/tabular/dataset_config.yaml diff --git a/configs/tabular/explainer_config.yaml b/experiments/configs/tabular/explainer_config.yaml similarity index 100% rename from configs/tabular/explainer_config.yaml rename to experiments/configs/tabular/explainer_config.yaml diff --git a/configs/tabular/model_config.yaml b/experiments/configs/tabular/model_config.yaml similarity index 100% rename from configs/tabular/model_config.yaml rename to experiments/configs/tabular/model_config.yaml diff --git a/configs/tabular/optuna_config.yaml b/experiments/configs/tabular/optuna_config.yaml similarity index 100% rename from configs/tabular/optuna_config.yaml rename to experiments/configs/tabular/optuna_config.yaml diff --git a/experiments/models/__init__.py b/experiments/models/__init__.py index fe5d623..432d58e 100644 --- a/experiments/models/__init__.py +++ b/experiments/models/__init__.py @@ -1,3 +1,4 @@ from .liver_tumor import ResNet50LiverTumor from .aki import AKIClassifier -from .ecg import ResNetPlus, PatchTST \ No newline at end of file +from .ecg import ResNetPlus, PatchTST +from .tab_resnet import TabResNet \ No newline at end of file diff --git a/experiments/scripts/analyze_wine_quality.py b/experiments/scripts/analyze_wine_quality.py index 6f98251..2308e25 100644 --- a/experiments/scripts/analyze_wine_quality.py +++ b/experiments/scripts/analyze_wine_quality.py @@ -7,7 +7,7 @@ 2. Generates explanations using multiple frameworks (PnPXAI, Captum, OmniXAI, OpenXAI, AutoXAI) 3. Evaluates explanations 4. Generates a LaTeX table in markdown format -5. Saves results to results/Wine Quality/ +5. Saves results to results/wine_quality/ Usage: python analyze_wine_quality.py [--n-samples N] [--seed SEED] [--verbose] @@ -29,150 +29,29 @@ from pathlib import Path from datetime import datetime from typing import Dict, Any, Tuple, List, Optional, Union -from collections import defaultdict, deque +from collections import defaultdict import numpy as np import pandas as pd import torch import torch.nn as nn -import xgboost as xgb import yaml from tqdm import tqdm +from experiments.utils import ( + set_seed, + get_winequality_dataset, + winequality_transform, + winequality_invert_transform, + winequality_find_idx, + get_winequality_model, + TorchModelForXGBoost, +) + # Suppress warnings warnings.filterwarnings('ignore') warnings.filterwarnings('ignore', message='You are providing multiple inputs for Lime / Kernel SHAP attributions') -# ============================================================================ -# Data Loading Utilities -# ============================================================================ - -def load_wine_quality(data_dir: str = "data/Wine Quality") -> Tuple: - """Load Wine Quality dataset and feature metadata.""" - data_path = Path(data_dir) - - X_train = np.load(data_path / "X_train.npy") - X_test = np.load(data_path / "X_test.npy") - y_train = np.load(data_path / "y_train.npy") - y_test = np.load(data_path / "y_test.npy") - - with open(data_path / "feature_metadata.pkl", "rb") as f: - feature_metadata = pickle.load(f) - - raw_data = pd.read_csv(data_path / "raw_data.csv") - - return X_train, X_test, y_train, y_test, feature_metadata, raw_data - - -def transform(X: pd.DataFrame, feature_metadata: Dict[str, Any]) -> np.ndarray: - """Transform raw data using feature metadata encoders.""" - input_data = [] - for k, v in feature_metadata.items(): - if np.isin('missing', X[[k]].values): - X[[k]] = X[[k]].replace("missing", v['encoder'].categories_[0][-1]) - preprocessed = v['encoder'].transform(X[[k]].values) - if v['type'] == 'categorical': - preprocessed = preprocessed.toarray() - input_data.append(preprocessed) - - input_array = np.concatenate(input_data, axis=1) - return input_array - - -def invert_transform(input_array: np.ndarray, feature_metadata: Dict[str, Any]) -> pd.DataFrame: - """Invert transformed data back to original feature space.""" - inverted_data = {} - - for col, meta in feature_metadata.items(): - if meta['type'] == 'categorical': - start_idx, end_idx = meta['index'][0], meta['index'][-1] + 1 - cat_data = input_array[:, start_idx:end_idx] - inverted_col = meta['encoder'].inverse_transform(cat_data) - inverted_data[col] = inverted_col.flatten() - else: - idx = meta['index'] - num_data = input_array[:, idx].reshape(-1, 1) - inverted_col = meta['encoder'].inverse_transform(num_data) - inverted_data[col] = inverted_col.flatten() - - return pd.DataFrame(inverted_data) - - -def find_idx(a: list, b: list) -> list: - """Find permutation index where a[idx] = b.""" - if sorted(a) != sorted(b): - return None - - pos_map = defaultdict(deque) - for i, val in enumerate(a): - pos_map[val].append(i) - - idx = [] - for val in b: - idx.append(pos_map[val].popleft()) - - return idx - - -# ============================================================================ -# Model Loading Utilities -# ============================================================================ - -class TorchModelForXGBoost(nn.Module): - """PyTorch wrapper for XGBoost models.""" - - def __init__(self, xgb_model: xgb.XGBClassifier): - super().__init__() - self.xgb_model = xgb_model - self._dummy_layer = nn.Linear(1, 1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - if x.ndim >= 3: - x = x.squeeze(0) - - if x.requires_grad: - out = self.xgb_model.predict_proba(x.detach().cpu().numpy()) - else: - out = self.xgb_model.predict_proba(x.cpu().numpy()) - - return torch.from_numpy(out) - - -def load_model(model_type: str, model_path: Union[str, Path], input_dim: int = None, - output_dim: int = 2, **kwargs) -> Union[xgb.XGBClassifier, nn.Module]: - """Load a trained model.""" - model_path = Path(model_path) - - if model_type == "xgb": - model = xgb.XGBClassifier() - model.load_model(str(model_path)) - return model - - elif model_type == "tab_resnet": - if input_dim is None: - raise ValueError("input_dim is required for tab_resnet model") - - from experiments.models.tab_resnet import TabResNet - - num_blocks = kwargs.get('num_blocks', 1) - model = TabResNet(input_dim, output_dim, num_blocks=num_blocks) - model.load_state_dict(torch.load(model_path)) - model.eval() - return model - - else: - raise ValueError(f"Unknown model type: {model_type}") - - -def wrap_model_for_pytorch(model: Union[xgb.XGBClassifier, nn.Module], - model_type: str) -> nn.Module: - """Wrap model as PyTorch module if needed.""" - if model_type == "xgb": - return TorchModelForXGBoost(model) - else: - return model - - # ============================================================================ # Explainer Validation # ============================================================================ @@ -210,15 +89,6 @@ def validate_explainer_args(framework: str, model: str, explainer: str) -> None: # Explanation Generation Functions # ============================================================================ -def set_seeds(seed: int = 42): - """Set random seeds for reproducibility.""" - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(seed) - - def explain_with_pnpxai( model: nn.Module, X_test: np.ndarray, @@ -456,7 +326,7 @@ def evaluate(self, inputs, targets, attrs): # Re-set seeds before generating explanations for reproducibility logger.info("Re-setting random seeds for reproducible explanation generation...") - set_seeds(config['optuna'].get('seed', 42)) + set_seed(config['optuna'].get('seed', 42)) # Generate explanations opt_explainer = opt_results.explainer @@ -586,7 +456,7 @@ def explain_with_omnixai( raise ValueError("OmniXAI requires XGBoost model") # Create transformation functions - transform_fn = functools.partial(transform, feature_metadata=feature_metadata) + transform_fn = functools.partial(winequality_transform, feature_metadata=feature_metadata) def prep(z): return transform_fn(z.data.fillna("missing")) @@ -601,7 +471,7 @@ def prep(z): ) # Prepare test instances - test_instances = invert_transform(X_test, feature_metadata).fillna("missing") + test_instances = winequality_invert_transform(X_test, feature_metadata).fillna("missing") # Set parameters params = { @@ -617,7 +487,7 @@ def prep(z): scores = [] for i in range(test_instances.shape[0]): exp = exp_obj[explainer_nm].get_explanations(i) - sorted_idx = find_idx(exp['features'], exp['instance'].columns.tolist()) + sorted_idx = winequality_find_idx(exp['features'], exp['instance'].columns.tolist()) scores.append([exp['scores'][j] for j in sorted_idx]) explanations = np.array(scores) @@ -987,9 +857,9 @@ def run_single_explanation(framework: str, model_name: str, explainer: str, } -def run_all_experiments(data_dir: str = "data/Wine Quality", - config_dir: str = "configs/tabular", - results_dir: str = "results/Wine Quality", +def run_all_experiments(data_dir: str = "data/wine_quality", + config_dir: str = "experiments/configs/tabular", + results_dir: str = "results/wine_quality", n_samples: int = 25, seed: int = 42, verbose: bool = False): @@ -1005,12 +875,12 @@ def run_all_experiments(data_dir: str = "data/Wine Quality", logger.info("="*70) # Set seeds - set_seeds(seed) + set_seed(seed) logger.info(f"Random seed: {seed}") # Load data logger.info(f"Loading data from: {data_dir}") - X_train, X_test, y_train, y_test, feature_metadata, raw_data = load_wine_quality(data_dir) + X_train, X_test, y_train, y_test, feature_metadata, raw_data = get_winequality_dataset(data_dir) logger.info(f"Data loaded: Train={len(X_train)}, Test={len(X_test)}, Features={X_test.shape[1]}") # Load configs @@ -1026,10 +896,10 @@ def run_all_experiments(data_dir: str = "data/Wine Quality", models = {} logger.info("Loading models...") - xgb_model = load_model("xgb", Path(data_dir) / "xgb_model.json") - models["xgb"] = wrap_model_for_pytorch(xgb_model, "xgb") + xgb_model = get_winequality_model("xgb", Path(data_dir) / "xgb_model.json") + models["xgb"] = xgb_model - resnet_model = load_model("tab_resnet", Path(data_dir) / "resnet_model.pth", + resnet_model = get_winequality_model("tab_resnet", Path(data_dir) / "resnet_model.pth", input_dim=X_train.shape[1], output_dim=2, num_blocks=1) models["tab_resnet"] = resnet_model @@ -1052,7 +922,7 @@ def run_all_experiments(data_dir: str = "data/Wine Quality", validate_explainer_args(framework, model_name, explainer) # Reset seeds for each experiment - set_seeds(seed) + set_seed(seed) # Run explanation model = models[model_name] @@ -1089,7 +959,7 @@ def run_all_experiments(data_dir: str = "data/Wine Quality", # LaTeX Table Generation # ============================================================================ -def collect_results(results_dir="results/Wine Quality"): +def collect_results(results_dir="results/wine_quality"): """Collect all experiment results.""" results_path = Path(results_dir) data = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) @@ -1267,18 +1137,18 @@ def main(): formatter_class=argparse.RawDescriptionHelpFormatter ) - parser.add_argument("--n-samples", type=int, default=25, + parser.add_argument("--n_samples", type=int, default=25, help="Number of samples for LIME/SHAP (default: 25)") parser.add_argument("--seed", type=int, default=42, help="Random seed (default: 42)") parser.add_argument("--verbose", action="store_true", help="Enable verbose logging") - parser.add_argument("--data-dir", type=str, default="data/Wine Quality", - help="Data directory (default: data/Wine Quality)") - parser.add_argument("--config-dir", type=str, default="configs/tabular", - help="Config directory (default: configs/tabular)") - parser.add_argument("--results-dir", type=str, default="results/Wine Quality", - help="Results directory (default: results/Wine Quality)") + parser.add_argument("--data_dir", type=str, default="data/wine_quality", + help="Data directory (default: data/wine_quality)") + parser.add_argument("--config_dir", type=str, default="experiments/configs/tabular", + help="Config directory (default: experiments/configs/tabular)") + parser.add_argument("--results_dir", type=str, default="results/wine_quality", + help="Results directory (default: results/wine_quality)") args = parser.parse_args() diff --git a/experiments/utils/__init__.py b/experiments/utils/__init__.py index 4c8ead1..33a1c64 100644 --- a/experiments/utils/__init__.py +++ b/experiments/utils/__init__.py @@ -24,6 +24,10 @@ get_imagenet_sample_from_hf, get_aki_dataset, get_ecg_dataset_from_hf, + get_winequality_dataset, + winequality_transform, + winequality_invert_transform, + winequality_find_idx, ) from .models import ( @@ -40,7 +44,9 @@ vilt_collate_fn, get_aki_model_from_hf, get_ecg_resnet_from_hf, - get_ecg_patchtst_from_hf + get_ecg_patchtst_from_hf, + TorchModelForXGBoost, + get_winequality_model, ) __all__ = [ @@ -56,10 +62,12 @@ 'get_livertumor_dataset', 'get_livertumor_dataset_from_hf', 'get_imagenet_sample_from_hf', 'get_aki_dataset', + 'get_winequality_dataset', 'winequality_transform', 'winequality_invert_transform', 'winequality_find_idx', # models 'get_torchvision_model', 'get_livertumor_model', 'get_livertumor_model_from_hf', 'Bert', 'get_bert_model', 'bert_collate_fn', 'get_bert_tokenizer', 'Vilt', 'get_vilt_model', 'get_vilt_processor', 'vilt_collate_fn', 'get_aki_model_from_hf', + 'TorchModelForXGBoost', 'get_winequality_model', ] \ No newline at end of file diff --git a/experiments/utils/datasets.py b/experiments/utils/datasets.py index ed0b07c..4e66b96 100644 --- a/experiments/utils/datasets.py +++ b/experiments/utils/datasets.py @@ -1,7 +1,9 @@ -from typing import Optional, List, Tuple import os +import pickle import json import requests +from collections import defaultdict, deque +from typing import Optional, List, Tuple, Dict, Any from tqdm import tqdm from io import BytesIO from PIL import Image @@ -433,4 +435,71 @@ def get_ecg_dataset_from_hf(repo_id: str = "enver1323/ucr-twoleadecg") -> Tensor return TensorDataset( torch.from_numpy(x_data), torch.from_numpy(y_data) - ) \ No newline at end of file + ) + + +def get_winequality_dataset(data_dir: str = "data/wine_quality") -> Tuple: + """Load Wine Quality dataset and feature metadata.""" + data_path = Path(data_dir) + + X_train = np.load(data_path / "X_train.npy") + X_test = np.load(data_path / "X_test.npy") + y_train = np.load(data_path / "y_train.npy") + y_test = np.load(data_path / "y_test.npy") + + with open(data_path / "feature_metadata.pkl", "rb") as f: + feature_metadata = pickle.load(f) + + raw_data = pd.read_csv(data_path / "raw_data.csv") + + return X_train, X_test, y_train, y_test, feature_metadata, raw_data + + +def winequality_transform(X: pd.DataFrame, feature_metadata: Dict[str, Any]) -> np.ndarray: + """Transform raw data using feature metadata encoders.""" + input_data = [] + for k, v in feature_metadata.items(): + if np.isin('missing', X[[k]].values): + X[[k]] = X[[k]].replace("missing", v['encoder'].categories_[0][-1]) + preprocessed = v['encoder'].transform(X[[k]].values) + if v['type'] == 'categorical': + preprocessed = preprocessed.toarray() + input_data.append(preprocessed) + + input_array = np.concatenate(input_data, axis=1) + return input_array + + +def winequality_invert_transform(input_array: np.ndarray, feature_metadata: Dict[str, Any]) -> pd.DataFrame: + """Invert transformed data back to original feature space.""" + inverted_data = {} + + for col, meta in feature_metadata.items(): + if meta['type'] == 'categorical': + start_idx, end_idx = meta['index'][0], meta['index'][-1] + 1 + cat_data = input_array[:, start_idx:end_idx] + inverted_col = meta['encoder'].inverse_transform(cat_data) + inverted_data[col] = inverted_col.flatten() + else: + idx = meta['index'] + num_data = input_array[:, idx].reshape(-1, 1) + inverted_col = meta['encoder'].inverse_transform(num_data) + inverted_data[col] = inverted_col.flatten() + + return pd.DataFrame(inverted_data) + + +def winequality_find_idx(a: list, b: list) -> list: + """Find permutation index where a[idx] = b.""" + if sorted(a) != sorted(b): + return None + + pos_map = defaultdict(deque) + for i, val in enumerate(a): + pos_map[val].append(i) + + idx = [] + for val in b: + idx.append(pos_map[val].popleft()) + + return idx \ No newline at end of file diff --git a/experiments/utils/models.py b/experiments/utils/models.py index 31e597b..2e5fb08 100644 --- a/experiments/utils/models.py +++ b/experiments/utils/models.py @@ -1,10 +1,15 @@ import torch import torchvision +import torch.nn as nn from torchvision import transforms from transformers import BertTokenizer, BertForSequenceClassification from transformers import ViltForQuestionAnswering, ViltProcessor -from experiments.models import ResNet50LiverTumor, AKIClassifier, ResNetPlus, PatchTST +import xgboost as xgb +from typing import Union +from pathlib import Path + +from experiments.models import ResNet50LiverTumor, AKIClassifier, ResNetPlus, PatchTST, TabResNet def get_torchvision_model(model_name): @@ -135,8 +140,59 @@ def vilt_collate_fn(batch, processor=None, label2id=None): def get_aki_model_from_hf(repo_id: str = "enver1323/aki-classifier") -> AKIClassifier: return AKIClassifier.from_pretrained(repo_id) + def get_ecg_resnet_from_hf(repo_id: str = "enver1323/resnetplus-classification-ecg") -> ResNetPlus: return ResNetPlus.from_pretrained(repo_id) + def get_ecg_patchtst_from_hf(repo_id: str = "enver1323/patchtst-classification-ecg") -> PatchTST: return PatchTST.from_pretrained(repo_id) + + +class TorchModelForXGBoost(nn.Module): + """PyTorch wrapper for XGBoost models.""" + + def __init__(self, xgb_model: xgb.XGBClassifier): + super().__init__() + self.xgb_model = xgb_model + self._dummy_layer = nn.Linear(1, 1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim >= 3: + x = x.squeeze(0) + + if x.requires_grad: + out = self.xgb_model.predict_proba(x.detach().cpu().numpy()) + else: + out = self.xgb_model.predict_proba(x.cpu().numpy()) + + return torch.from_numpy(out) + + +def get_winequality_model( + model_type: str, + model_path: Union[str, Path], + input_dim: int = None, + output_dim: int = 2, + **kwargs +) -> Union[xgb.XGBClassifier, nn.Module]: + """Load a trained model.""" + model_path = Path(model_path) + + if model_type == "xgb": + model = xgb.XGBClassifier() + model.load_model(str(model_path)) + return TorchModelForXGBoost(model) + + elif model_type == "tab_resnet": + if input_dim is None: + raise ValueError("input_dim is required for tab_resnet model") + + num_blocks = kwargs.get('num_blocks', 1) + model = TabResNet(input_dim, output_dim, num_blocks=num_blocks) + model.load_state_dict(torch.load(model_path)) + model.eval() + return model + + else: + raise ValueError(f"Unknown model type: {model_type}") \ No newline at end of file From a5829c214f65b4c3e973dbf3cc4638c590de5a45 Mon Sep 17 00:00:00 2001 From: seongun-kim Date: Tue, 25 Nov 2025 14:45:09 +0900 Subject: [PATCH 19/20] fix: export missing utils for ECG experiment --- experiments/utils/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/experiments/utils/__init__.py b/experiments/utils/__init__.py index 33a1c64..10315c1 100644 --- a/experiments/utils/__init__.py +++ b/experiments/utils/__init__.py @@ -61,13 +61,13 @@ 'VQADataset', 'get_vqa_dataset', 'get_livertumor_dataset', 'get_livertumor_dataset_from_hf', 'get_imagenet_sample_from_hf', - 'get_aki_dataset', + 'get_aki_dataset', 'get_ecg_dataset_from_hf', 'get_winequality_dataset', 'winequality_transform', 'winequality_invert_transform', 'winequality_find_idx', # models 'get_torchvision_model', 'get_livertumor_model', 'get_livertumor_model_from_hf', 'Bert', 'get_bert_model', 'bert_collate_fn', 'get_bert_tokenizer', 'Vilt', 'get_vilt_model', 'get_vilt_processor', 'vilt_collate_fn', - 'get_aki_model_from_hf', + 'get_aki_model_from_hf', 'get_ecg_resnet_from_hf', 'get_ecg_patchtst_from_hf', 'TorchModelForXGBoost', 'get_winequality_model', ] \ No newline at end of file From 53f021faa1aadc6ce094b83eb3f8a1a80a36f372 Mon Sep 17 00:00:00 2001 From: seongun-kim Date: Tue, 25 Nov 2025 15:20:55 +0900 Subject: [PATCH 20/20] docs: update README and contributors --- README.md | 33 ++++++++++++++++++++++++++------- setup.py | 2 +- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 0a8c225..cd09b00 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,17 @@ This repository contains the official implementation and experimental code for t **PnPXAI** is a framework designed to overcome the challenges in generating reliable explanations for complex AI models through an end-to-end automated pipeline. This pipeline includes model architecture detection, applicable explainer recommendation, objective-driven hyperparameter optimization (HPO), and evaluation. This repository provides the necessary code to set up the environment and reproduce the key experiments presented in the paper, demonstrating PnPXAI's effectiveness across various tasks and modalities. +## Table of Contents +- [Setup](#setup) +- [Running Experiments](#running-experiments) + - [Experiment 1: ImageNet Explanation](#experiment-1-imagenet-explanation) + - [Experiment 2: Hyperparameter Impact Analysis](#experiment-2-hyperparameter-impact-analysis) + - [Experiment 3: Liver Tumor Explanation](#experiment-3-liver-tumor-explanation) + - [Experiment 4: Acute Kidney Injury (AKI) Explanation](#experiment-4-acute-kidney-injury-aki-explanation) + - [Experiment 5: ECG Explanation](#experiment-5-ecg-explanation) + - [Experiment 6: Wine Quality Explanation](#experiment-6-wine-quality-explanation) +- [License](#license) + ## Setup We provide two ways to set up the environment: using Docker (recommended for exact reproducibility) or manual installation. @@ -89,7 +100,7 @@ This experiment qualitatively analyzes the effect of HPO (optimizing for AbPC) o #### Data and Model - * **Data (ImageNet)**: The subset of ImageNet1k for this experiment, one sample per label, a totle of 1,000 samples, is hosted on Hugging Face Hub: [➡️ geonhyeongkim/imagenet-samples-for-pnpxai-experiments](https://huggingface.co/datasets/geonhyeongkim/imagenet-samples-for-pnpxai-experiments). The script **automatically downloads** the necessary files when first executed. For more details on the data loading process, refer to the `get_imagenet_samples_from_hf` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py). + * **Data (ImageNet)**: The subset of ImageNet1k for this experiment, one sample per label, a total of 1,000 samples, is hosted on Hugging Face Hub: [➡️ geonhyeongkim/imagenet-samples-for-pnpxai-experiments](https://huggingface.co/datasets/geonhyeongkim/imagenet-samples-for-pnpxai-experiments). The script **automatically downloads** the necessary files when first executed. For more details on the data loading process, refer to the `get_imagenet_samples_from_hf` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py). * **Model (ResNet-18):** This script uses a standard `ResNet-18` model pre-trained on ImageNet, loaded directly from `torchvision.models`. @@ -209,7 +220,7 @@ This experiment analyzes the effect of HPO (optimizing for AbPC) on explanations #### Data and Model - * **Data (MIMIC III):** The **MIMIC III dataset** used in this experiment is hosted on PhysioNet: [➡️ MIMIC-III Clinical Database](https://physionet.org/content/mimiciii/1.4/). This work utilizes the latest version (1.4) of [MIMIC III dataset](https://doi.org/10.13026/C2XW26). To use the analysis script, the dataset needs to be downloaded, built and formatted. Having downloaded the dataset from the official source [MIMIC III dataset](https://doi.org/10.13026/C2XW26), users are prompted to build the PostgreSQL version of the dataset with the official [Github code](https://github.com/MIT-LCP/mimic-code/tree/main/mimic-iii/buildmimic/postgres). Subsequently, the built dataset can be formatted with the set of scripts listed in [`/data/mimiciii`](./data/mimiciii/) directory. Thorough instructions on data transformation are provided in [`README.md`](./data/mimiciii/README.md). Provided that formatted data is generated, the analysis script **loads** the necessary files when first executed. For more details on the data loading process, refer to the `get_aki_dataset` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py). + * **Data (MIMIC III):** The **MIMIC III dataset** used in this experiment is hosted on PhysioNet: [➡️ MIMIC-III Clinical Database](https://physionet.org/content/mimiciii/1.4/). This work utilizes the latest version of [MIMIC III dataset](https://doi.org/10.13026/C2XW26). To use the analysis script, the dataset needs to be downloaded, built and formatted. Having downloaded the dataset from the official source [MIMIC III dataset](https://doi.org/10.13026/C2XW26), users are prompted to build the PostgreSQL version of the dataset with the official [Github code](https://github.com/MIT-LCP/mimic-code/tree/main/mimic-iii/buildmimic/postgres). Subsequently, the built dataset can be formatted with the set of scripts listed in [`/data/mimiciii`](./data/mimiciii/) directory. Thorough instructions on data transformation are provided in [`README.md`](./data/mimiciii/README.md). Provided that formatted data is generated, the analysis script **loads** the necessary files when first executed. For more details on the data loading process, refer to the `get_aki_dataset` function within [`experiments/utils/datasets.py`](./experiments/utils/datasets.py). * **Model (AKI Classifier):** The pre-trained **Linear model** adapted for this task is hosted on Hugging Face Hub: [➡️ enver1323/aki-classifier](https://huggingface.co/enver1323/aki-classifier). Similar to the dataset, the script **automatically downloads** the model weights. The model architecture is defined in [`experiments/models/aki.py`](./experiments/models/aki.py). For more details on model loading, refer to the `get_aki_model_from_hf` function within [`experiments/utils/models.py`](./experiments/utils/models.py). @@ -315,12 +326,12 @@ docker build -t pnpxai_wine_quality:latest -f Dockerfile.wine_quality . #### Arguments - * `--n-samples `: Number of samples for sampling-based explainers (LIME/SHAP). Defaults to `25`. + * `--n_samples `: Number of samples for sampling-based explainers (LIME/SHAP). Defaults to `25`. * `--seed `: Random seed for reproducibility. Defaults to `42`. * `--verbose`: Enable detailed logging. - * `--data-dir `: Path to data directory. Defaults to `data/wine_quality`. - * `--config-dir `: Path to config directory. Defaults to `experiments/configs/tabular`. - * `--results-dir `: Path to results directory. Defaults to `results/wine_quality`. + * `--data_dir `: Path to data directory. Defaults to `data/wine_quality`. + * `--config_dir `: Path to config directory. Defaults to `experiments/configs/tabular`. + * `--results_dir `: Path to results directory. Defaults to `results/wine_quality`. #### Output @@ -337,4 +348,12 @@ Will be updated later. ## License -This project's code is licensed under the [MIT License](https://www.google.com/search?q=LICENSE). The used dataset is derived from LiTS and retains its original [CC-BY-NC-SA-4.0 License](https://creativecommons.org/licenses/by-nc-sa/4.0/). +This project's code is licensed under the [MIT License](https://www.google.com/search?q=LICENSE). + +The datasets used in the experiments are derived from existing benchmarks and are subject to their original licenses: + +* **ImageNet:** Subject to the [ImageNet Terms of Access](https://image-net.org/download.php). The dataset is restricted to non-commercial research and educational purposes only. Users must obtain access via the official website and agree to the Terms of Access. +* **LiTS (Liver Tumor):** [CC-BY-NC-SA-4.0 License](https://creativecommons.org/licenses/by-nc-sa/4.0/) +* **MIMIC-III (AKI):** Subject to the [PhysioNet Credentialed Health Data License 1.5.0](https://physionet.org/content/mimiciii/view-license/1.4/). Due to license restrictions, we do not distribute the data. Users must obtain access via PhysioNet and agree to the data use agreement. +* **ECG:** Derived from the UCR Time Series Classification Archive. Free for research and educational use. ([UCR Archive](https://www.cs.ucr.edu/~eamonn/time_series_data_2018/)) +* **Wine Quality:** [CC BY 4.0 License](https://creativecommons.org/licenses/by/4.0/) diff --git a/setup.py b/setup.py index 45ddeb1..1446cf7 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ setup( name='pnpxai-experiments', version='0.1.0', - author='Seongun Kim', + author='Seongun Kim, Geonhyeong Kim, Enver Menadjiev, Chanwoo Lee', author_email='seongun@kaist.ac.kr', description='Experiments using the PnPXAI library.', long_description=long_description,