From c2702e23adcc0f81d802111f394d75622e69385a Mon Sep 17 00:00:00 2001 From: Daniel Miller Date: Tue, 11 Oct 2022 16:16:45 +1000 Subject: [PATCH 01/29] Trying something meaningful --- recognition/Miller/helloworld.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 recognition/Miller/helloworld.py diff --git a/recognition/Miller/helloworld.py b/recognition/Miller/helloworld.py new file mode 100644 index 0000000000..e69de29bb2 From 15ca543843842bbc17bfc37557903e4a1a70b011 Mon Sep 17 00:00:00 2001 From: daniel Date: Tue, 11 Oct 2022 16:28:39 +1000 Subject: [PATCH 02/29] Created required files --- recognition/Miller/README.MD | 0 recognition/Miller/dataset.py | 0 recognition/Miller/modules.py | 0 recognition/Miller/predict.py | 0 recognition/Miller/train.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 recognition/Miller/README.MD create mode 100644 recognition/Miller/dataset.py create mode 100644 recognition/Miller/modules.py create mode 100644 recognition/Miller/predict.py create mode 100644 recognition/Miller/train.py diff --git a/recognition/Miller/README.MD b/recognition/Miller/README.MD new file mode 100644 index 0000000000..e69de29bb2 diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/recognition/Miller/predict.py b/recognition/Miller/predict.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py new file mode 100644 index 0000000000..e69de29bb2 From 032ac04b23c9ee22a6234f6f2940a01a8703abf3 Mon Sep 17 00:00:00 2001 From: daniel Date: Fri, 14 Oct 2022 14:03:53 +1000 Subject: [PATCH 03/29] Provided brief description for each file of what is to be included --- recognition/Miller/README.MD | 6 ++++++ recognition/Miller/dataset.py | 5 +++++ recognition/Miller/modules.py | 6 ++++++ recognition/Miller/predict.py | 11 +++++++++++ recognition/Miller/train.py | 10 ++++++++++ 5 files changed, 38 insertions(+) diff --git a/recognition/Miller/README.MD b/recognition/Miller/README.MD index e69de29bb2..30c30388f5 100644 --- a/recognition/Miller/README.MD +++ b/recognition/Miller/README.MD @@ -0,0 +1,6 @@ +“README.MD” to sufficiently document your project (see Section 6). + + + +Create a generative model of one of the OASIS brain, ADNI brain or the OAI AKOA knee data set (see +Appendix for links) using a VQVAE [11] or VQVAE2 [12] that has a “reasonably clear image” and a Structured Similarity (SSIM) of over 0.6. [Hard Difficulty] diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index e69de29bb2..072ae372f3 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -0,0 +1,5 @@ +""" +dataset.py" containing the data loader for loading and preprocessing your data +""" + +# Load Data and Process it \ No newline at end of file diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index e69de29bb2..40d96b4b77 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -0,0 +1,6 @@ +""" +“modules.py" containing the source code of the components of your model. Each component must be +implementated as a class or a function +""" + +# Structure of VQ-VAR Archecture \ No newline at end of file diff --git a/recognition/Miller/predict.py b/recognition/Miller/predict.py index e69de29bb2..9060c5d4b6 100644 --- a/recognition/Miller/predict.py +++ b/recognition/Miller/predict.py @@ -0,0 +1,11 @@ +""" +“predict.py" showing example usage of your trained model. Print out any results and / or provide visualisations where applicable +""" +import tensorflow as tf +import pathlib +import numpy as np +from matplotlib import pyplot +from matplotlib import image +import glob + +# Show how well program performs \ No newline at end of file diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index e69de29bb2..a6e6c48ac7 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -0,0 +1,10 @@ +""" +“train.py" containing the source code for training, validating, testing and saving your model. The model +should be imported from “modules.py” and the data loader should be imported from “dataset.py”. Make +sure to plot the losses and metrics during training +""" + +import dataset +import modules + +# Train VAE \ No newline at end of file From 446c8baaa7a61ffd07c89e086c9fb367d60bbc48 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 14 Oct 2022 16:43:26 +1000 Subject: [PATCH 04/29] Added 2 new functions into datasey.py which one downloads the oasis dataset and the other loads the training images into a numpy array --- recognition/Miller/dataset.py | 77 ++++++++++++++++++++++++++++++++++- recognition/Miller/train.py | 10 ++++- 2 files changed, 84 insertions(+), 3 deletions(-) diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 072ae372f3..f96ef8659d 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -1,5 +1,80 @@ """ dataset.py" containing the data loader for loading and preprocessing your data """ +import tensorflow as tf +import pathlib +import glob +import numpy as np +from matplotlib import pyplot +from matplotlib import image -# Load Data and Process it \ No newline at end of file +# Load Data and Process it +# Download the Oasis Data +def download_oasis (): + + dataset_url = "https://cloudstor.aarnet.edu.au/plus/s/n5aZ4XX1WBKp6HZ/download" + data_dir = tf.keras.utils.get_file(origin=dataset_url,fname='oa-sis' ,untar=True) + data_dir = pathlib.Path(data_dir) + data_dir = data_dir + + # unzip data to current directory + #! unzip /root/.keras/datasets/oa-sis.tar.gz + +# Loads the training images (non segmented) in the path and store in numpy array +def load_training (path): + image_list = [] + + # Iterate through all paths and convert to 'png' + for filename in glob.glob(path + '/*.png'): + im=image.imread (filename) + image_list.append(im) + + print('train_X shape:',np.array(image_list).shape) + train_set = np.array(image_list, dtype=np.float32) + return train_set +""" +# Normalizes training images and adds 4th dimention +def process_training (data_set): + train_set = data_set + train_set = (train_set - np.mean(train_set))/ np.std(train_set) + train_set= (train_set- np.amin(train_set))/ np.amax(train_set- np.amin(train_set)) + train_set = train_set [:,:,:,np.newaxis] + + return train_set + +# loads labels images and map pixel values to class indices and convert image data type to unit8 +def load_labels (path): + image_list =[] + + for filename in glob.glob(path+'/*.png'): + im=image.imread (filename) + one_hot = np.zeros((im.shape[0], im.shape[1])) + for i, unique_value in enumerate(np.unique(im)): + one_hot[:, :][im == unique_value] = i + image_list.append(one_hot) + + print('train_y shape:',np.array(image_list).shape) + labels = np.array(image_list, dtype=np.uint8) + + pyplot.imshow(labels[2]) + pyplot.show() + + return labels + +# one hot encode label data and convert to numpy array +def process_labels(seg_data): + onehot_Y = [] + for n in range(seg_data.shape[0]): + im = seg_data[n] + n_classes = 4 + one_hot = np.zeros((im.shape[0], im.shape[1], n_classes),dtype=np.uint8) + for i, unique_value in enumerate(np.unique(im)): + one_hot[:, :, i][im == unique_value] = 1 + onehot_Y.append(one_hot) + + onehot_Y =np.array(onehot_Y) + print (onehot_Y.dtype) + #print (np.unique(onehot_validate_Y)) + print (onehot_Y.shape) + + return onehot_Y""" \ No newline at end of file diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index a6e6c48ac7..6aeb045589 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -4,7 +4,13 @@ sure to plot the losses and metrics during training """ -import dataset +import dataset as data import modules -# Train VAE \ No newline at end of file +# Train VAE +# Download Data and then unzip +#download_oasis() + +train_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_train") +pyplot.imshow(train_X[2]) +pyplot.show() \ No newline at end of file From 13144c6100173e3479054a2c1748c0a160da666a Mon Sep 17 00:00:00 2001 From: dapmiller Date: Sat, 15 Oct 2022 14:42:42 +1000 Subject: [PATCH 05/29] Add comments to function for explanation and understanding in dataset.py file for normalising and preprocessing training data --- recognition/Miller/dataset.py | 41 ++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index f96ef8659d..6e208e39f0 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -1,47 +1,52 @@ """ dataset.py" containing the data loader for loading and preprocessing your data """ + import tensorflow as tf -import pathlib import glob import numpy as np from matplotlib import pyplot from matplotlib import image -# Load Data and Process it -# Download the Oasis Data +# Download the Oasis Data as zip file. Will need to extract it manually afterwards def download_oasis (): dataset_url = "https://cloudstor.aarnet.edu.au/plus/s/n5aZ4XX1WBKp6HZ/download" - data_dir = tf.keras.utils.get_file(origin=dataset_url,fname='oa-sis' ,untar=True) - data_dir = pathlib.Path(data_dir) - data_dir = data_dir - - # unzip data to current directory - #! unzip /root/.keras/datasets/oa-sis.tar.gz + # Download file from URL Path, origin=path, fname=file name, untar=compress file + tf.keras.utils.get_file(origin=dataset_url,fname='oa-sis' ,untar=True) + -# Loads the training images (non segmented) in the path and store in numpy array +# Loads the training images (non segmented) from given path and returns an numpy array of arrays def load_training (path): image_list = [] # Iterate through all paths and convert to 'png' for filename in glob.glob(path + '/*.png'): - im=image.imread (filename) + # Read an image from the given filename into an array + im = image.imread (filename) + # Append array to list image_list.append(im) - print('train_X shape:',np.array(image_list).shape) + print('train_X shape:', np.array(image_list).shape) + + # Create array of arrays train_set = np.array(image_list, dtype=np.float32) return train_set -""" + # Normalizes training images and adds 4th dimention def process_training (data_set): - train_set = data_set - train_set = (train_set - np.mean(train_set))/ np.std(train_set) - train_set= (train_set- np.amin(train_set))/ np.amax(train_set- np.amin(train_set)) - train_set = train_set [:,:,:,np.newaxis] + + # Calculate the residuals of the data - each residual is dist from each distribution mean which is now zero + data_set = (data_set - np.mean(data_set)) / np.std(data_set) + # Rescale Data - ratio of dist of each value from min value in each dataset to range of values in each dataset -> value between (0,1) now + # Forces dataset to be same scale, and perseves shape of distribution -> "Squeezed and shifted to fit between 0 and 1" + data_set= (data_set - np.amin(data_set)) / np.amax(data_set - np.amin(data_set)) + # Add 4th dimension + data_set = data_set [:,:,:,np.newaxis] - return train_set + return data_set +""" # loads labels images and map pixel values to class indices and convert image data type to unit8 def load_labels (path): image_list =[] From 6e334b3fa66b36bbff4fdc95f6238bacb3f7fe55 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Sat, 15 Oct 2022 15:21:03 +1000 Subject: [PATCH 06/29] Added functions to dataset.py to load labels and also process them with one hot encoding. Train.py now calls this for training data. --- recognition/Miller/dataset.py | 50 +++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 6e208e39f0..715121fe0d 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -29,15 +29,17 @@ def load_training (path): print('train_X shape:', np.array(image_list).shape) - # Create array of arrays + # Create an numpy array to hold all the array turned images train_set = np.array(image_list, dtype=np.float32) return train_set # Normalizes training images and adds 4th dimention def process_training (data_set): + """ Residual Extraction -> Useful for comparing distributions with different means but similar shapes""" # Calculate the residuals of the data - each residual is dist from each distribution mean which is now zero data_set = (data_set - np.mean(data_set)) / np.std(data_set) + """ Min-Max Rescaling -> Useful for comparign distributions with different scales or different shapes""" # Rescale Data - ratio of dist of each value from min value in each dataset to range of values in each dataset -> value between (0,1) now # Forces dataset to be same scale, and perseves shape of distribution -> "Squeezed and shifted to fit between 0 and 1" data_set= (data_set - np.amin(data_set)) / np.amax(data_set - np.amin(data_set)) @@ -46,19 +48,26 @@ def process_training (data_set): return data_set -""" -# loads labels images and map pixel values to class indices and convert image data type to unit8 +# Loads labels images from given path and map pixel values to class indices and convert image data type to unit8 def load_labels (path): image_list =[] + # Iterate through all paths and convert to 'png' for filename in glob.glob(path+'/*.png'): + # Read an image from the given filename into an array im=image.imread (filename) + # Create 'im.shape[0] x im.shape[1]' shaped array of arrays of zeros one_hot = np.zeros((im.shape[0], im.shape[1])) + # Iterate through sorted and unique arrays of given array turned image for i, unique_value in enumerate(np.unique(im)): - one_hot[:, :][im == unique_value] = i + # One hot each unique array with its numerical value of its entry in the dataset -> transform categorical into numerical dummy features + one_hot[:, :][im == unique_value] = i + # Append array to list image_list.append(one_hot) print('train_y shape:',np.array(image_list).shape) + + # Create an numpy array to hold all the array turned images labels = np.array(image_list, dtype=np.uint8) pyplot.imshow(labels[2]) @@ -66,20 +75,33 @@ def load_labels (path): return labels -# one hot encode label data and convert to numpy array +# One hot encode label data and convert to numpy array def process_labels(seg_data): onehot_Y = [] + + # Iterate through all array turned images by shapes first value for n in range(seg_data.shape[0]): - im = seg_data[n] - n_classes = 4 - one_hot = np.zeros((im.shape[0], im.shape[1], n_classes),dtype=np.uint8) - for i, unique_value in enumerate(np.unique(im)): - one_hot[:, :, i][im == unique_value] = 1 - onehot_Y.append(one_hot) + + # Get data at position in array + im = seg_data[n] + + # There are 4 classes + n_classes = 4 + + # Create 'im.shape[0] x im.shape[1] x n_classes' shaped array of arrays of arrays of zeros with type uint8 + one_hot = np.zeros((im.shape[0], im.shape[1], n_classes),dtype=np.uint8) + + # Iterate through sorted and unique arrays of given array turned image + for i, unique_value in enumerate(np.unique(im)): + # One hot each unique array with its numerical value of its entry in the dataset -> transform categorical into numerical dummy features + one_hot[:, :, i][im == unique_value] = 1 + # Append array to list + onehot_Y.append(one_hot) + # Create an numpy array to hold all the array turned images onehot_Y =np.array(onehot_Y) - print (onehot_Y.dtype) + #print (onehot_Y.dtype) #print (np.unique(onehot_validate_Y)) - print (onehot_Y.shape) + #print (onehot_Y.shape) - return onehot_Y""" \ No newline at end of file + return onehot_Y \ No newline at end of file From 4c758cef0a889605da73ba1c752bf6507cb7c9d1 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Sat, 15 Oct 2022 15:24:27 +1000 Subject: [PATCH 07/29] Added calls to load validation and test labels in train.py from dataset.py --- recognition/Miller/train.py | 61 ++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 6aeb045589..876122b7f6 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -6,11 +6,70 @@ import dataset as data import modules +import tensorflow as tf +import numpy as np +from matplotlib import pyplot +from matplotlib import image -# Train VAE # Download Data and then unzip #download_oasis() +""" PROCESS TRAINING DATA""" +# Load the training data from the Oasis Data set train_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_train") + +# Check Images pyplot.imshow(train_X[2]) +pyplot.show() + +# Pre process training data set +train_X = data.process_training(train_X) + +# Load the validaton data from the oasis Data set +validate_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_validate") + +# Check Images +pyplot.imshow(validate_X[2]) +pyplot.show() + +# Pre process validation data set +validate_X = data.process_training(validate_X) + + +# Load the test data from the oasis Data Set +test_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_test") + +# Check Images +pyplot.imshow(test_X[2]) +pyplot.show() + +# Pre process test data set +test_X = data.process_training(test_X) + +""" PROCESS TRAINING LABELS DATA """ +# Load the segmented training labels data from the Oasis Data set +train_Y = data.load_labels ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_train") +# Pre process training labels data +train_Y = data.process_labels(train_Y) + +# Check Images +pyplot.imshow(train_Y[2,:,:,3]) +pyplot.show() + +# Load the segmented validation labels data from the Oasis Data set +validate_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_validate") +# Pre process validation labels data +validate_Y = data.process_labels(validate_Y) + +# Check Images +pyplot.imshow(validate_Y[2,:,:,3]) +pyplot.show() + +# Load the segmented test labels data from the Oasis Data set +test_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_test") +# Pre process test labels data +test_Y = data.process_labels(test_Y) + +# Check Images +pyplot.imshow(test_Y[2,:,:,3]) pyplot.show() \ No newline at end of file From 38430c76da7356f572a1fc1fa98b26b8489ca933 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Sat, 15 Oct 2022 15:54:05 +1000 Subject: [PATCH 08/29] Added brief summary to Read.me file about model --- recognition/Miller/README.MD | 9 +++++++++ recognition/Miller/modules.py | 7 ++++++- recognition/Miller/train.py | 37 +++++++++++++++++------------------ 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/recognition/Miller/README.MD b/recognition/Miller/README.MD index 30c30388f5..72ed58f2ae 100644 --- a/recognition/Miller/README.MD +++ b/recognition/Miller/README.MD @@ -4,3 +4,12 @@ Create a generative model of one of the OASIS brain, ADNI brain or the OAI AKOA knee data set (see Appendix for links) using a VQVAE [11] or VQVAE2 [12] that has a “reasonably clear image” and a Structured Similarity (SSIM) of over 0.6. [Hard Difficulty] + +Brief Description: +Vector Quantized Variational Autoencoder (VQ-VAE) +standard VAEs: + latent space is continous + sampled using gaussian distribution ------> Brush up on latent space and gaussian distribution + with gradient descent, becomes harder to learn continous distribution --------> What is gradient descent +VQ VAES: + disccrete latent space + optimised by using discrete "codbook" -> made by discreting dist between continuous embedding and encoded outputs, the ndiscrete codewords sent to decoder which trained to generate reconstructered samples. \ No newline at end of file diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index 40d96b4b77..d5a5692762 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -3,4 +3,9 @@ implementated as a class or a function """ -# Structure of VQ-VAR Archecture \ No newline at end of file +import tensorflow as tf + +"""Create Structure of VQ-VAR Model, set training paramters, train the model""" + +# Create a model instance and sets training paramters +def VQvaeModel(): diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 876122b7f6..06c108fd2c 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -4,12 +4,10 @@ sure to plot the losses and metrics during training """ +from ast import Mod import dataset as data -import modules -import tensorflow as tf -import numpy as np +import modules as mod from matplotlib import pyplot -from matplotlib import image # Download Data and then unzip #download_oasis() @@ -18,7 +16,7 @@ # Load the training data from the Oasis Data set train_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_train") -# Check Images +# Check training image pyplot.imshow(train_X[2]) pyplot.show() @@ -28,18 +26,17 @@ # Load the validaton data from the oasis Data set validate_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_validate") -# Check Images +# Check a validation image pyplot.imshow(validate_X[2]) pyplot.show() # Pre process validation data set validate_X = data.process_training(validate_X) - # Load the test data from the oasis Data Set test_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_test") -# Check Images +# Check a test image pyplot.imshow(test_X[2]) pyplot.show() @@ -52,24 +49,26 @@ # Pre process training labels data train_Y = data.process_labels(train_Y) -# Check Images -pyplot.imshow(train_Y[2,:,:,3]) -pyplot.show() - # Load the segmented validation labels data from the Oasis Data set validate_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_validate") # Pre process validation labels data validate_Y = data.process_labels(validate_Y) -# Check Images -pyplot.imshow(validate_Y[2,:,:,3]) -pyplot.show() - # Load the segmented test labels data from the Oasis Data set test_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_test") # Pre process test labels data test_Y = data.process_labels(test_Y) - -# Check Images + +# Check a training label image +pyplot.imshow(train_Y[2,:,:,3]) +pyplot.show() +# Check a validation label images +pyplot.imshow(validate_Y[2,:,:,3]) +pyplot.show() +# Check a test label image pyplot.imshow(test_Y[2,:,:,3]) -pyplot.show() \ No newline at end of file +pyplot.show() + +""" MODEL AND TRAIN VQ-VAE """ +# Create a instance of the VQ-VAE model +model = mod.VQvaeModel() \ No newline at end of file From d4f1ec43c79ef4aa4add382dbef0150a3fabe0d6 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Sat, 15 Oct 2022 16:07:52 +1000 Subject: [PATCH 09/29] Safety commit of working loading and processing of data before implementing VQVAE structure. --- recognition/Miller/modules.py | 21 ++++++++++++++++++++- recognition/Miller/train.py | 2 +- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index d5a5692762..232e57a463 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -6,6 +6,25 @@ import tensorflow as tf """Create Structure of VQ-VAR Model, set training paramters, train the model""" +# Vector Quantizer -> layer between encoder and decoder. Takes input from encoder and flattens. Then creates codebook +#def vq_layer(): + +""" + +activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. +e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from +""" +# Encoder Component +#def encoder_component(latent_dimension): + + +""" + +activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. +e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from +""" +# Decoder Component +#def decoder_component(latent_dimension): # Create a model instance and sets training paramters -def VQvaeModel(): +#def vqvae_model(): diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 06c108fd2c..d34342aaf9 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -71,4 +71,4 @@ """ MODEL AND TRAIN VQ-VAE """ # Create a instance of the VQ-VAE model -model = mod.VQvaeModel() \ No newline at end of file +#model = mod.vqvae_model() \ No newline at end of file From b1bb74bbd4d9645d5ede70dfa42036ef2e1ccb27 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Sat, 15 Oct 2022 17:34:28 +1000 Subject: [PATCH 10/29] Created encoder, decoder and overall vq-vae functions in modules.py however, quantised layer function still needs to be implemented and is currently preventing the code to run. --- recognition/Miller/dataset.py | 4 +-- recognition/Miller/helloworld.py | 0 recognition/Miller/modules.py | 62 +++++++++++++++++++++++++++++--- recognition/Miller/train.py | 21 ++++++----- 4 files changed, 73 insertions(+), 14 deletions(-) delete mode 100644 recognition/Miller/helloworld.py diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 715121fe0d..dfd0ab9859 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -70,8 +70,8 @@ def load_labels (path): # Create an numpy array to hold all the array turned images labels = np.array(image_list, dtype=np.uint8) - pyplot.imshow(labels[2]) - pyplot.show() + #pyplot.imshow(labels[2]) + #pyplot.show() return labels diff --git a/recognition/Miller/helloworld.py b/recognition/Miller/helloworld.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index 232e57a463..a42ce076ab 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -7,7 +7,8 @@ """Create Structure of VQ-VAR Model, set training paramters, train the model""" # Vector Quantizer -> layer between encoder and decoder. Takes input from encoder and flattens. Then creates codebook -#def vq_layer(): +def vq_layer(embedding_num, latent_dimension): + pass """ @@ -15,7 +16,24 @@ e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from """ # Encoder Component -#def encoder_component(latent_dimension): +def encoder_component(latent_dimension): + + # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) + inputs = tf.keras.Input(shape=(256,256,1)) + + #2D Convolutional Layers + # filters -> dimesion of output space + # kernal_size -> convolution window size + # activation -> activation func used + # relu -> + # strides -> spaces convolution window moves vertically and horizontally + # padding -> "same" pads with zeros to maintain output size same as input size + layer = tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(layer) + + outputs = tf.keras.layers.Conv2D(latent_dimension, 1, padding="same")(layer) + + return tf.keras.Model(inputs, outputs, name="encoder_component") """ @@ -24,7 +42,43 @@ e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from """ # Decoder Component -#def decoder_component(latent_dimension): +def decoder_component(latent_dimension): + + # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) + inputs = tf.keras.Input(shape=encoder_component(latent_dimension).output.shape[1:]) + + #Transposed Convolutional Layers (deconvolution) + # filters -> dimesion of output space + # kernal_size -> convolution window size + # activation -> activation func used + # relu -> + # strides -> spaces convolution window moves vertically and horizontally + # padding -> "same" pads with zeros to maintain output size same as input size + layer = tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(layer) + + outputs = tf.keras.layers.Conv2DTranspose(1, 3, padding="same")(layer) + + return tf.keras.Model(inputs, outputs, name="decoder_model") + # Create a model instance and sets training paramters -#def vqvae_model(): +def vqvae_model(latent_dimension, embedings_num): + + # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) + inputs = tf.keras.Input(shape=(256, 256, 1)) + + """Build Model Levels""" + # Get encoder component layer with given latent dimension + encoder = encoder_component(latent_dimension) + encoder_component_outputs = encoder(inputs) + + # Get Quantized Layer with given number of embeddings and latent dimension + quantized_layer = vq_layer(embedings_num, latent_dimension) + quantized_latents_dimensions = quantized_layer(encoder_component_outputs) + + # Get decoder component layer with given latent dimension + decoder = decoder_component(latent_dimension) + reconstructions = decoder(quantized_latents_dimensions) + + return tf.keras.Model(inputs, reconstructions, name="vqvae_model") diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index d34342aaf9..5cba1f2c90 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -4,7 +4,6 @@ sure to plot the losses and metrics during training """ -from ast import Mod import dataset as data import modules as mod from matplotlib import pyplot @@ -17,8 +16,8 @@ train_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_train") # Check training image -pyplot.imshow(train_X[2]) -pyplot.show() +#pyplot.imshow(train_X[2]) +#pyplot.show() # Pre process training data set train_X = data.process_training(train_X) @@ -27,8 +26,8 @@ validate_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_validate") # Check a validation image -pyplot.imshow(validate_X[2]) -pyplot.show() +#pyplot.imshow(validate_X[2]) +#pyplot.show() # Pre process validation data set validate_X = data.process_training(validate_X) @@ -37,8 +36,8 @@ test_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_test") # Check a test image -pyplot.imshow(test_X[2]) -pyplot.show() +#pyplot.imshow(test_X[2]) +#pyplot.show() # Pre process test data set test_X = data.process_training(test_X) @@ -59,6 +58,7 @@ # Pre process test labels data test_Y = data.process_labels(test_Y) +""" # Check a training label image pyplot.imshow(train_Y[2,:,:,3]) pyplot.show() @@ -68,7 +68,12 @@ # Check a test label image pyplot.imshow(test_Y[2,:,:,3]) pyplot.show() +""" """ MODEL AND TRAIN VQ-VAE """ # Create a instance of the VQ-VAE model -#model = mod.vqvae_model() \ No newline at end of file +latent_dimensions = 16 +embeddings_number = 64 +model = mod.vqvae_model (latent_dimensions, embeddings_number) + +model.summary() \ No newline at end of file From fa73b09a9309e8cd0ee72047e62f867776505877 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Tue, 18 Oct 2022 14:33:32 +1000 Subject: [PATCH 11/29] Finished building model in modules.py. It compiles and prints out summary in train.py --- recognition/Miller/README.MD | 8 +++++++- recognition/Miller/dataset.py | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/recognition/Miller/README.MD b/recognition/Miller/README.MD index 72ed58f2ae..a9ec1410ed 100644 --- a/recognition/Miller/README.MD +++ b/recognition/Miller/README.MD @@ -12,4 +12,10 @@ standard VAEs: with gradient descent, becomes harder to learn continous distribution --------> What is gradient descent VQ VAES: disccrete latent space - optimised by using discrete "codbook" -> made by discreting dist between continuous embedding and encoded outputs, the ndiscrete codewords sent to decoder which trained to generate reconstructered samples. \ No newline at end of file + optimised by using discrete "codbook" -> made by discreting dist between continuous embedding and encoded outputs, the ndiscrete codewords sent to decoder which trained to generate reconstructered samples. + + Generative model based on VAE. + Aims to make latent space discrete using VQ techniques + + cons: loses the "easy latent sampling" propery of VAES. 2 stage training required to learn fitting categorical prior + + training objective not correspond to bound on log-likelihood amnymore \ No newline at end of file diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index dfd0ab9859..9cabd7ca16 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -12,6 +12,7 @@ def download_oasis (): dataset_url = "https://cloudstor.aarnet.edu.au/plus/s/n5aZ4XX1WBKp6HZ/download" + # Download file from URL Path, origin=path, fname=file name, untar=compress file tf.keras.utils.get_file(origin=dataset_url,fname='oa-sis' ,untar=True) From b2c7b4974268c2f2c58f615f344cc9633695425a Mon Sep 17 00:00:00 2001 From: dapmiller Date: Tue, 18 Oct 2022 14:38:11 +1000 Subject: [PATCH 12/29] Finished building model in modules.py and it successfully compiles in train.py --- recognition/Miller/modules.py | 188 ++++++++++++++++++++++++++++------ recognition/Miller/train.py | 43 +++++++- 2 files changed, 200 insertions(+), 31 deletions(-) diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index a42ce076ab..3092e4add9 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -2,13 +2,104 @@ “modules.py" containing the source code of the components of your model. Each component must be implementated as a class or a function """ - import tensorflow as tf +from tensorflow.python.keras.engine import input_spec + +"""CREATE STRUCTURE OF VQ-VAR MODEL""" + +class vq_layer(tf.keras.layers.Layer): + def __init__(self, embedding_num, latent_dimension, beta, **kwargs): + super().__init__(**kwargs) + self.embedding_num = embedding_num + self.latent_dimension = latent_dimension + self.beta = beta + + # Initialize the embeddings which we will quantize. + initial = tf.random_uniform_initializer() + self.embeddings = tf.Variable(initial_value=initial((self.latent_dimension, self.embedding_num), dtype="float32"),trainable=True) + + def call(self, x): + # Calculate the input shape + input = tf.shape(x) + print(input) + print("ahhhh") + + # Flatten the inputs to keep the embedding dimension intact. + flatten = tf.reshape(x, [-1, self.latent_dimension]) + + # Get code indices + # Calculate L2-normalized distance between the inputs and the embeddings. + similarity = tf.matmul(flatten, self.embeddings) + distances = (tf.reduce_sum(flatten ** 2, axis=1, keepdims=True) + tf.reduce_sum(self.embeddings ** 2, axis=0) - 2 * similarity) + + # Derive the indices for minimum distances. + encoded_indices = tf.argmin(distances, axis=1) + + # Turn the indices into a one hot encoded vectors + encodings = tf.one_hot(encoded_indices, self.embedding_num) + quantized = tf.matmul(encodings, self.embeddings, transpose_b=True) + + # Reshape the quantized values back to its original input shape + quantized = tf.reshape(quantized, input) + """ + # Calculate vector quantization loss and add that to the layer + commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) + codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2) + + #self.add_loss(self.beta * commitment_loss + codebook_loss) + """ + # Straight-through estimator. + quantized = x + tf.stop_gradient(quantized - x) + + return quantized + + +""" +def initialise_embeddings(embedding_num, latent_dimension): + initial = tf.random_uniform_initializer() + return tf.Variable(initial_value=initial(shape=(embedding_num, latent_dimension), dtype="float32"), trainable=True, name="embeddings") -"""Create Structure of VQ-VAR Model, set training paramters, train the model""" # Vector Quantizer -> layer between encoder and decoder. Takes input from encoder and flattens. Then creates codebook -def vq_layer(embedding_num, latent_dimension): - pass +def vq_layer(embedding_num, latent_dimension, beta, x): + + # Initialize the embeddings which will be quantized. + embeddings = initialise_embeddings(embedding_num, latent_dimension) + + # Calculate the input shape + input = tf.keras.layers.shape(x) + print(input) + print("ahhhh") + + # Flatten the inputs to keep the embedding dimension intact. + flatten = tf.reshape(x, [-1, latent_dimension]) + + # Get code indices + # Calculate L2-normalized distance between the inputs and the embeddings. + similarity = tf.matmul(flatten, embeddings) + distances = (tf.reduce_sum(flatten ** 2, axis=1, keepdims=True) + tf.reduce_sum(embeddings ** 2, axis=0) - 2 * similarity) + + # Derive the indices for minimum distances. + encoded_indices = tf.argmin(distances, axis=1) + + # Turn the indices into a one hot encoded vectors + encodings = tf.one_hot(encoded_indices, embedding_num) + quantized = tf.matmul(encodings, embeddings, transpose_b=True) + + # Reshape the quantized values back to its original input shape + quantized = tf.reshape(quantized, input) + + # Calculate vector quantization loss and add that to the layer + commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) + codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2) + + #self.add_loss(self.beta * commitment_loss + codebook_loss) + + # Straight-through estimator. + quantized = x + tf.stop_gradient(quantized - x) + + return quantized +""" + """ @@ -16,11 +107,13 @@ def vq_layer(embedding_num, latent_dimension): e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from """ # Encoder Component -def encoder_component(latent_dimension): - - # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) - inputs = tf.keras.Input(shape=(256,256,1)) +def encoder_component(image_size, latent_dimension): + encoder = tf.keras.models.Sequential(name = "encoder") + # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) + """inputs = tf.keras.layers.Input(shape=(image_size, image_size, 1 ), batch_size=1) + print(inputs.shape)""" + #2D Convolutional Layers # filters -> dimesion of output space # kernal_size -> convolution window size @@ -28,12 +121,13 @@ def encoder_component(latent_dimension): # relu -> # strides -> spaces convolution window moves vertically and horizontally # padding -> "same" pads with zeros to maintain output size same as input size - layer = tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(inputs) - layer = tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(layer) - - outputs = tf.keras.layers.Conv2D(latent_dimension, 1, padding="same")(layer) + encoder.add(tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same", input_shape=(image_size,image_size,1))) + encoder.add(tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")) + + encoder.add(tf.keras.layers.Conv2D(latent_dimension, 1, padding="same")) - return tf.keras.Model(inputs, outputs, name="encoder_component") + #return tf.keras.Model(inputs, outputs, name="encoder_component") + return encoder """ @@ -42,11 +136,12 @@ def encoder_component(latent_dimension): e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from """ # Decoder Component -def decoder_component(latent_dimension): +def decoder_component(image_size): # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) - inputs = tf.keras.Input(shape=encoder_component(latent_dimension).output.shape[1:]) - + """inputs = tf.keras.Input((encoder_shape), batch_size=1) + print(inputs.shape)""" + decoder = tf.keras.models.Sequential(name="decoder") #Transposed Convolutional Layers (deconvolution) # filters -> dimesion of output space # kernal_size -> convolution window size @@ -54,31 +149,66 @@ def decoder_component(latent_dimension): # relu -> # strides -> spaces convolution window moves vertically and horizontally # padding -> "same" pads with zeros to maintain output size same as input size - layer = tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(inputs) - layer = tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(layer) + decoder.add(tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")) + decoder.add(tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")) - outputs = tf.keras.layers.Conv2DTranspose(1, 3, padding="same")(layer) + decoder.add(tf.keras.layers.Conv2DTranspose(1, 3, padding="same")) - return tf.keras.Model(inputs, outputs, name="decoder_model") + #return tf.keras.Model(inputs, outputs, name="decoder_model") + return decoder # Create a model instance and sets training paramters -def vqvae_model(latent_dimension, embedings_num): +class vqvae_model(tf.keras.models.Sequential): + def __init__(self, image_size, latent_dimension, embeddings_num, beta, **kwargs): + + super(vqvae_model, self).__init__(**kwargs) + self.image_size = image_size + self.latent_dimension = latent_dimension + self.embeddings_num = embeddings_num + self.beta = beta + + # Create the model sequentially + vector_quantiser_layer = vq_layer(embeddings_num, latent_dimension, beta) + encoder = encoder_component(image_size, latent_dimension) + decoder = decoder_component(image_size) + + # Add components of model + self.add(encoder) + self.add(vector_quantiser_layer) + self.add(decoder) + + """print("START") # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) - inputs = tf.keras.Input(shape=(256, 256, 1)) + inputs = tf.keras.layers.Input((image_size, image_size, 1 ), batch_size=1) + - """Build Model Levels""" + #encoder_shape = inputs.shape[1:] + print("The original inputs: ", inputs.shape) + + """ + #Build Model Levels + """ # Get encoder component layer with given latent dimension - encoder = encoder_component(latent_dimension) - encoder_component_outputs = encoder(inputs) + encoder = encoder_component(image_size, latent_dimension) + #print("The encoder shape is: ", encoder.shape) + #exit() + #encoder_component_outputs = encoder(inputs) + #print(encoder_component_outputs.shape) + # Get Quantized Layer with given number of embeddings and latent dimension - quantized_layer = vq_layer(embedings_num, latent_dimension) - quantized_latents_dimensions = quantized_layer(encoder_component_outputs) + quantized_layer = vq_layer(embedings_num, latent_dimension, beta) + quantized_latents_dimensions = quantized_layer(encoder) # Get decoder component layer with given latent dimension - decoder = decoder_component(latent_dimension) - reconstructions = decoder(quantized_latents_dimensions) + decoder = decoder_component(image_size, latent_dimension, inputs.shape[1:]) + print(decoder.shape) + reconstructions = decoder*(quantized_latents_dimensions) return tf.keras.Model(inputs, reconstructions, name="vqvae_model") + """ + + + diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 5cba1f2c90..42ad71a7ab 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -7,6 +7,8 @@ import dataset as data import modules as mod from matplotlib import pyplot +import tensorflow as tf +import numpy as np # Download Data and then unzip #download_oasis() @@ -74,6 +76,43 @@ # Create a instance of the VQ-VAE model latent_dimensions = 16 embeddings_number = 64 -model = mod.vqvae_model (latent_dimensions, embeddings_number) +image_size = 256 +# beta = [0.25, 2] +beta = 0.25 +model = mod.vqvae_model(image_size, latent_dimensions, embeddings_number, beta) -model.summary() \ No newline at end of file +model.summary() + +""" +model.compile (optimizer='Adam', loss= 'CategoricalCrossentropy') + +# record history of training to display loss over ephocs +history = model.fit(train_X, train_Y, validation_data= (validate_X, validate_Y) ,batch_size=32,shuffle='True',epochs=5) + +# evaluate against testing data +model.evaluate(test_X,test_Y) + +# validate output +out = model.predict(test_X) +out_r = np.round(out) +out_argmax = np.argmax (out,-1) +gt_test_Y = np.argmax(test_Y,-1) + +im = 5 + +for i in range (4): + print("prediction") + pyplot.imshow(out_r[im,:,:,i]) + pyplot.show() + print("ground truth") + pyplot.imshow(test_Y[im,:,:,i]) + pyplot.show() + +print ("prediction") +pyplot.imshow(out_argmax[im,:,:]) +pyplot.show() + +print ("ground truth") +pyplot.imshow(gt_test_Y [im,:,:]) +pyplot.show() +""" \ No newline at end of file From df70801b3a4bee1574345957a67dc237789c5619 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Tue, 18 Oct 2022 14:52:33 +1000 Subject: [PATCH 13/29] Testing commit --- recognition/Miller/modules.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index 3092e4add9..669d9ca02d 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -22,7 +22,8 @@ def call(self, x): # Calculate the input shape input = tf.shape(x) print(input) - print("ahhhh") + print("ahhh") + # Flatten the inputs to keep the embedding dimension intact. flatten = tf.reshape(x, [-1, self.latent_dimension]) From dee848c806afea4f28eae1d8a86a6987dd250f27 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Tue, 18 Oct 2022 16:52:07 +1000 Subject: [PATCH 14/29] Commited the module.py file and removed unnecessary code --- recognition/Miller/dataset.py | 1 - recognition/Miller/modules.py | 154 ++++++++++------------------------ 2 files changed, 45 insertions(+), 110 deletions(-) diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 9cabd7ca16..89f55fbaa5 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -16,7 +16,6 @@ def download_oasis (): # Download file from URL Path, origin=path, fname=file name, untar=compress file tf.keras.utils.get_file(origin=dataset_url,fname='oa-sis' ,untar=True) - # Loads the training images (non segmented) from given path and returns an numpy array of arrays def load_training (path): image_list = [] diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index 669d9ca02d..5c698c6fdf 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -3,10 +3,24 @@ implementated as a class or a function """ import tensorflow as tf -from tensorflow.python.keras.engine import input_spec + +import tensorflow as tf +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) """CREATE STRUCTURE OF VQ-VAR MODEL""" + +""" +Class Representation of the Vector Quantization laye + +Structure is: + 1. Reshape into (n,h,w,d) + 2. Calculate L2-normalized distance between the inputs and the embeddings. -> (n*h*w, d) + 3. Argmin -> find minimum distance between indices for each n*w*h vector + 4. Index from dictionary: index the closest vector from the dictionary for each of n*h*w vectors + 5. Reshape into original shape (n, h, w, d) + 6. Copy gradients from q -> x +""" class vq_layer(tf.keras.layers.Layer): def __init__(self, embedding_num, latent_dimension, beta, **kwargs): super().__init__(**kwargs) @@ -19,90 +33,43 @@ def __init__(self, embedding_num, latent_dimension, beta, **kwargs): self.embeddings = tf.Variable(initial_value=initial((self.latent_dimension, self.embedding_num), dtype="float32"),trainable=True) def call(self, x): - # Calculate the input shape + # Calculate the input shape and store for later -> Shape of (n,h,w,d) input = tf.shape(x) - print(input) - print("ahhh") - - # Flatten the inputs to keep the embedding dimension intact. + # Flatten the inputs to keep the embedding dimension intact. + # Combine all dimensions into last one 'd' -> (n*h*w, d) flatten = tf.reshape(x, [-1, self.latent_dimension]) # Get code indices # Calculate L2-normalized distance between the inputs and the embeddings. + # For each n*h*w vectors, we calculate the distance from each of k vectors of embedding dictionaty to obtain matrix of shape (n*h*w, k) similarity = tf.matmul(flatten, self.embeddings) distances = (tf.reduce_sum(flatten ** 2, axis=1, keepdims=True) + tf.reduce_sum(self.embeddings ** 2, axis=0) - 2 * similarity) - # Derive the indices for minimum distances. + # For each n*h*w vectors, find the indices of closest k vector from dictionary; find minimum distance. encoded_indices = tf.argmin(distances, axis=1) - # Turn the indices into a one hot encoded vectors + # Turn the indices into a one hot encoded vectors; index the closest vector from the dictionary for each n*h*w vector encodings = tf.one_hot(encoded_indices, self.embedding_num) quantized = tf.matmul(encodings, self.embeddings, transpose_b=True) - # Reshape the quantized values back to its original input shape + # Reshape the quantized values back to its original input shape -> (n,h,w,d) quantized = tf.reshape(quantized, input) """ # Calculate vector quantization loss and add that to the layer - commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) - codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2) - + commitment_loss = tf.reduan((quantized - tf.stop_gradient(x)) ** 2) + codebook_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) + #self.add_loss(self.beta * commitment_loss + codebook_loss) """ # Straight-through estimator. + # Unable to back propragate as gradient wont flow through argmin. Hence copy gradient from qunatised to x quantized = x + tf.stop_gradient(quantized - x) - + return quantized - -""" -def initialise_embeddings(embedding_num, latent_dimension): - initial = tf.random_uniform_initializer() - return tf.Variable(initial_value=initial(shape=(embedding_num, latent_dimension), dtype="float32"), trainable=True, name="embeddings") - -# Vector Quantizer -> layer between encoder and decoder. Takes input from encoder and flattens. Then creates codebook -def vq_layer(embedding_num, latent_dimension, beta, x): - - # Initialize the embeddings which will be quantized. - embeddings = initialise_embeddings(embedding_num, latent_dimension) - - # Calculate the input shape - input = tf.keras.layers.shape(x) - print(input) - print("ahhhh") - - # Flatten the inputs to keep the embedding dimension intact. - flatten = tf.reshape(x, [-1, latent_dimension]) - - # Get code indices - # Calculate L2-normalized distance between the inputs and the embeddings. - similarity = tf.matmul(flatten, embeddings) - distances = (tf.reduce_sum(flatten ** 2, axis=1, keepdims=True) + tf.reduce_sum(embeddings ** 2, axis=0) - 2 * similarity) - - # Derive the indices for minimum distances. - encoded_indices = tf.argmin(distances, axis=1) - - # Turn the indices into a one hot encoded vectors - encodings = tf.one_hot(encoded_indices, embedding_num) - quantized = tf.matmul(encodings, embeddings, transpose_b=True) - - # Reshape the quantized values back to its original input shape - quantized = tf.reshape(quantized, input) - - # Calculate vector quantization loss and add that to the layer - commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) - codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2) - - #self.add_loss(self.beta * commitment_loss + codebook_loss) - - # Straight-through estimator. - quantized = x + tf.stop_gradient(quantized - x) - - return quantized -""" - - """ +Returns layered model for encoder architecture built from convolutional layers. activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from @@ -110,10 +77,8 @@ def vq_layer(embedding_num, latent_dimension, beta, x): # Encoder Component def encoder_component(image_size, latent_dimension): + # Create model for layers encoder = tf.keras.models.Sequential(name = "encoder") - # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) - """inputs = tf.keras.layers.Input(shape=(image_size, image_size, 1 ), batch_size=1) - print(inputs.shape)""" #2D Convolutional Layers # filters -> dimesion of output space @@ -122,27 +87,25 @@ def encoder_component(image_size, latent_dimension): # relu -> # strides -> spaces convolution window moves vertically and horizontally # padding -> "same" pads with zeros to maintain output size same as input size - encoder.add(tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same", input_shape=(image_size,image_size,1))) + encoder.add(tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")) encoder.add(tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")) - encoder.add(tf.keras.layers.Conv2D(latent_dimension, 1, padding="same")) - #return tf.keras.Model(inputs, outputs, name="encoder_component") return encoder """ +Returns layered model for decoder architecture built from tranposed convolutional layers. activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from """ # Decoder Component -def decoder_component(image_size): +def decoder_component(): - # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) - """inputs = tf.keras.Input((encoder_shape), batch_size=1) - print(inputs.shape)""" + # Create model for layers decoder = tf.keras.models.Sequential(name="decoder") + #Transposed Convolutional Layers (deconvolution) # filters -> dimesion of output space # kernal_size -> convolution window size @@ -152,10 +115,8 @@ def decoder_component(image_size): # padding -> "same" pads with zeros to maintain output size same as input size decoder.add(tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")) decoder.add(tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")) - decoder.add(tf.keras.layers.Conv2DTranspose(1, 3, padding="same")) - #return tf.keras.Model(inputs, outputs, name="decoder_model") return decoder @@ -170,46 +131,21 @@ def __init__(self, image_size, latent_dimension, embeddings_num, beta, **kwargs) self.beta = beta # Create the model sequentially + input_layer = tf.keras.layers.InputLayer(input_shape=(image_size,image_size,1)) vector_quantiser_layer = vq_layer(embeddings_num, latent_dimension, beta) encoder = encoder_component(image_size, latent_dimension) - decoder = decoder_component(image_size) - + decoder = decoder_component() + # Add components of model + self.add(input_layer) self.add(encoder) self.add(vector_quantiser_layer) self.add(decoder) - - """print("START") - # Instantiate a lower level keras tensor to start building model of known input shape size (not including batch size) - inputs = tf.keras.layers.Input((image_size, image_size, 1 ), batch_size=1) - - - #encoder_shape = inputs.shape[1:] - print("The original inputs: ", inputs.shape) - - """ - #Build Model Levels - """ - # Get encoder component layer with given latent dimension - encoder = encoder_component(image_size, latent_dimension) - - #print("The encoder shape is: ", encoder.shape) - #exit() - #encoder_component_outputs = encoder(inputs) - #print(encoder_component_outputs.shape) - - # Get Quantized Layer with given number of embeddings and latent dimension - quantized_layer = vq_layer(embedings_num, latent_dimension, beta) - quantized_latents_dimensions = quantized_layer(encoder) - - # Get decoder component layer with given latent dimension - decoder = decoder_component(image_size, latent_dimension, inputs.shape[1:]) - print(decoder.shape) - reconstructions = decoder*(quantized_latents_dimensions) - - return tf.keras.Model(inputs, reconstructions, name="vqvae_model") - """ - - - +latent_dimensions = 16 +embeddings_number = 64 +image_size = 256 +# beta = [0.25, 2] +beta = 0.25 +model = vqvae_model(image_size, latent_dimensions, embeddings_number, beta) +model.summary() \ No newline at end of file From 9ee75d3a4021c89c85da5c48503a4db8e3e15e3c Mon Sep 17 00:00:00 2001 From: dapmiller Date: Wed, 19 Oct 2022 17:51:30 +1000 Subject: [PATCH 15/29] created a function to batch data before training in dataset.py --- recognition/Miller/dataset.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 89f55fbaa5..636561cc9a 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -5,8 +5,9 @@ import tensorflow as tf import glob import numpy as np -from matplotlib import pyplot from matplotlib import image +import random + # Download the Oasis Data as zip file. Will need to extract it manually afterwards def download_oasis (): @@ -76,7 +77,7 @@ def load_labels (path): return labels # One hot encode label data and convert to numpy array -def process_labels(seg_data): +def process_labels (seg_data): onehot_Y = [] # Iterate through all array turned images by shapes first value @@ -104,4 +105,15 @@ def process_labels(seg_data): #print (np.unique(onehot_validate_Y)) #print (onehot_Y.shape) - return onehot_Y \ No newline at end of file + return onehot_Y + +# returns list of batch_size number of processed images randomly selected from the pre-processed training dataset +def batched_and_processed (processed_data, batch_size): + xs = [] + + for i in range(batch_size): + # randomly pick an image from the training datset + img = random.choice(processed_data) + xs.append(img) + + return xs \ No newline at end of file From 605cb17ee5e705078fecde5087f298b4544adfa4 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 01:07:46 +1000 Subject: [PATCH 16/29] Successfully trained data. Safety Check Commit. Removed Batching function from dataset.py. Reformatted the functions in modules.py -> encoder and decoder are now in terms of modules instead of sequenitial and the vq layer and overall model builder are in classes to allow other atrributes to be assessed of the model functionality. train.py has reduced in code whereas data is no longer being batched before training (now batched in model.fit). Read.me file has notes being written --- recognition/Miller/train.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 42ad71a7ab..7cb1b2f6ed 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -72,6 +72,16 @@ pyplot.show() """ +# %% +batch_size = 32 + +#train_X = tf.convert_to_tensor(train_X) +#validate_X = tf.convert_to_tensor(validate_X) +batched_and_processed_train = tf.convert_to_tensor(data.batched_and_processed(train_X, batch_size)) +batched_and_processed_validate = tf.convert_to_tensor(data.batched_and_processed(validate_X, batch_size)) +#print(batched_and_processed_train.shape) +#print(batched_and_processed_validate.shape) +exit() """ MODEL AND TRAIN VQ-VAE """ # Create a instance of the VQ-VAE model latent_dimensions = 16 From 1e995760d16b2ead66bf5254804cf5007d302c21 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 03:11:54 +1000 Subject: [PATCH 17/29] Created functions in train.py that compute structural similiarity. It currently does not work however. Training for data is also working, however there is a high loss. --- recognition/Miller/README.MD | 21 +++-- recognition/Miller/dataset.py | 13 +-- recognition/Miller/modules.py | 167 +++++++++++++++++++--------------- 3 files changed, 107 insertions(+), 94 deletions(-) diff --git a/recognition/Miller/README.MD b/recognition/Miller/README.MD index a9ec1410ed..f3f44efe4c 100644 --- a/recognition/Miller/README.MD +++ b/recognition/Miller/README.MD @@ -1,15 +1,16 @@ -“README.MD” to sufficiently document your project (see Section 6). +Vector Quantized Variational Auto-encoder(VQ VAE Model) +Oasis Brain Data Set +This program creates a create a generative model of the OASIS brain data set that has a “reasonably clear image” and a Structured Similarity (SSIM) of over 0.6. +Description of VQ VAE Algorithm: +A standard VAE (encoder->decoder) uses a continous latent space that was sampled using gaussain distribution; this makes it hard to leatn continuous distribution with a gradient descent. In comparison, VQ VAE (encoder-> VQ layer-> decoder) ues discrete latent space; and consists of three parts: +1. Encoder -> Convolutional network to downsample the features of an image -Create a generative model of one of the OASIS brain, ADNI brain or the OAI AKOA knee data set (see -Appendix for links) using a VQVAE [11] or VQVAE2 [12] that has a “reasonably clear image” and a Structured Similarity (SSIM) of over 0.6. [Hard Difficulty] +2. Latent space -> Discrete "codebook" that describes the latent space. + codebook consists of n latent embedding vectors of dimension D each + each code represents the distance between each embedding and encoded output+ -Brief Description: -Vector Quantized Variational Autoencoder (VQ-VAE) -standard VAEs: - latent space is continous + sampled using gaussian distribution ------> Brush up on latent space and gaussian distribution - with gradient descent, becomes harder to learn continous distribution --------> What is gradient descent VQ VAES: disccrete latent space optimised by using discrete "codbook" -> made by discreting dist between continuous embedding and encoded outputs, the ndiscrete codewords sent to decoder which trained to generate reconstructered samples. @@ -18,4 +19,6 @@ VQ VAES: Aims to make latent space discrete using VQ techniques cons: loses the "easy latent sampling" propery of VAES. 2 stage training required to learn fitting categorical prior - + training objective not correspond to bound on log-likelihood amnymore \ No newline at end of file + + training objective not correspond to bound on log-likelihood amnymore + + diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 636561cc9a..6b6f42dbe1 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -105,15 +105,4 @@ def process_labels (seg_data): #print (np.unique(onehot_validate_Y)) #print (onehot_Y.shape) - return onehot_Y - -# returns list of batch_size number of processed images randomly selected from the pre-processed training dataset -def batched_and_processed (processed_data, batch_size): - xs = [] - - for i in range(batch_size): - # randomly pick an image from the training datset - img = random.choice(processed_data) - xs.append(img) - - return xs \ No newline at end of file + return onehot_Y \ No newline at end of file diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index 5c698c6fdf..56d0a4d822 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -1,15 +1,14 @@ """ “modules.py" containing the source code of the components of your model. Each component must be implementated as a class or a function -""" -import tensorflow as tf +Based on Neural Discrete Representation Learning by van der Oord et al https://arxiv.org/pdf/1711.00937.pdf +and the given example on https://keras.io/examples/generative/vq_vae/ +""" import tensorflow as tf -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) """CREATE STRUCTURE OF VQ-VAR MODEL""" - """ Class Representation of the Vector Quantization laye @@ -21,20 +20,21 @@ 5. Reshape into original shape (n, h, w, d) 6. Copy gradients from q -> x """ -class vq_layer(tf.keras.layers.Layer): - def __init__(self, embedding_num, latent_dimension, beta, **kwargs): +class VQ_layer(tf.keras.layers.Layer): + def __init__(self, embedding_num, latent_dimension, beta=0.25, **kwargs): super().__init__(**kwargs) self.embedding_num = embedding_num self.latent_dimension = latent_dimension self.beta = beta # Initialize the embeddings which we will quantize. - initial = tf.random_uniform_initializer() - self.embeddings = tf.Variable(initial_value=initial((self.latent_dimension, self.embedding_num), dtype="float32"),trainable=True) - + w_init = tf.random_uniform_initializer() + self.embeddings = tf.Variable(initial_value=w_init(shape=(self.latent_dimension, self.embedding_num), dtype="float32"),trainable=True,name="embeddings_vqvae",) + + # Forward Pass behaviour. Takes Tensor as input def call(self, x): # Calculate the input shape and store for later -> Shape of (n,h,w,d) - input = tf.shape(x) + input_shape = tf.shape(x) # Flatten the inputs to keep the embedding dimension intact. # Combine all dimensions into last one 'd' -> (n*h*w, d) @@ -54,16 +54,25 @@ def call(self, x): quantized = tf.matmul(encodings, self.embeddings, transpose_b=True) # Reshape the quantized values back to its original input shape -> (n,h,w,d) - quantized = tf.reshape(quantized, input) + quantized = tf.reshape(quantized, input_shape) + + """ LOSS CALCULATIONS """ """ - # Calculate vector quantization loss and add that to the layer - commitment_loss = tf.reduan((quantized - tf.stop_gradient(x)) ** 2) - codebook_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) - - #self.add_loss(self.beta * commitment_loss + codebook_loss) + COMMITMENT LOSS + Since volume of embedding spcae is dimensionless, it may grow arbitarily if embedding ei does not + train as fast as encoder parameters. Thus add a commitment loss to make sure encoder commits to an embedding + CODE BOOK LOSS + Gradients bypass embedding, so we use a dictionary learningn algorithm which uses l2 error to + move embedding vectors ei towards encoder output + + tf.stop_gradient -> no gradient flows through """ + commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) + codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2) + self.add_loss(self.beta * commitment_loss + codebook_loss) # Straight-through estimator. # Unable to back propragate as gradient wont flow through argmin. Hence copy gradient from qunatised to x + # During backpropagation, (quantized -x) wont be included in computation anf the gradient obtained will be copied for inputs quantized = x + tf.stop_gradient(quantized - x) return quantized @@ -75,24 +84,21 @@ def call(self, x): e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from """ # Encoder Component -def encoder_component(image_size, latent_dimension): - - # Create model for layers - encoder = tf.keras.models.Sequential(name = "encoder") - +def encoder_component(latent_dimension): #2D Convolutional Layers - # filters -> dimesion of output space - # kernal_size -> convolution window size - # activation -> activation func used - # relu -> - # strides -> spaces convolution window moves vertically and horizontally - # padding -> "same" pads with zeros to maintain output size same as input size - encoder.add(tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")) - encoder.add(tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")) - encoder.add(tf.keras.layers.Conv2D(latent_dimension, 1, padding="same")) - - return encoder + # filters -> dimesion of output space + # kernal_size -> convolution window size + # activation -> activation func used + # relu -> + # strides -> spaces convolution window moves vertically and horizontally + # padding -> "same" pads with zeros to maintain output size same as input size + inputs = tf.keras.Input(shape=(256, 256, 1)) + + layer = tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(layer) + outputs = tf.keras.layers.Conv2D(latent_dimension, 1, padding="same")(layer) + return tf.keras.Model(inputs, outputs, name="encoder") """ Returns layered model for decoder architecture built from tranposed convolutional layers. @@ -101,51 +107,66 @@ def encoder_component(image_size, latent_dimension): e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from """ # Decoder Component -def decoder_component(): - - # Create model for layers - decoder = tf.keras.models.Sequential(name="decoder") - - #Transposed Convolutional Layers (deconvolution) - # filters -> dimesion of output space - # kernal_size -> convolution window size - # activation -> activation func used - # relu -> - # strides -> spaces convolution window moves vertically and horizontally - # padding -> "same" pads with zeros to maintain output size same as input size - decoder.add(tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")) - decoder.add(tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")) - decoder.add(tf.keras.layers.Conv2DTranspose(1, 3, padding="same")) - - return decoder - +def decoder_component(latent_dimension): + inputs = tf.keras.Input(shape=encoder_component(latent_dimension).output.shape[1:]) + layer = tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(layer) + outputs = tf.keras.layers.Conv2DTranspose(1, 3, padding="same")(layer) + return tf.keras.Model(inputs, outputs, name="decoder") + +def build_model(embeddings_num=64, latent_dimension=16): + vq_layer = VQ_layer(embeddings_num, latent_dimension, name="vector_quantizer") + encoder = encoder_component(latent_dimension) + decoder = decoder_component(latent_dimension) + inputs = tf.keras.Input(shape=(256, 256, 1)) + encoder_outputs = encoder(inputs) + quantized_latents = vq_layer(encoder_outputs) + reconstructions = decoder(quantized_latents) + return tf.keras.Model(inputs, reconstructions, name="vq_vae") + +build_model().summary() # Create a model instance and sets training paramters -class vqvae_model(tf.keras.models.Sequential): - def __init__(self, image_size, latent_dimension, embeddings_num, beta, **kwargs): +class vqvae_model(tf.keras.models.Model): + def __init__(self, variance, latent_dimension=32, embeddings_num=128, **kwargs): super(vqvae_model, self).__init__(**kwargs) - self.image_size = image_size self.latent_dimension = latent_dimension self.embeddings_num = embeddings_num - self.beta = beta - - # Create the model sequentially - input_layer = tf.keras.layers.InputLayer(input_shape=(image_size,image_size,1)) - vector_quantiser_layer = vq_layer(embeddings_num, latent_dimension, beta) - encoder = encoder_component(image_size, latent_dimension) - decoder = decoder_component() + self.variance = variance - # Add components of model - self.add(input_layer) - self.add(encoder) - self.add(vector_quantiser_layer) - self.add(decoder) - -latent_dimensions = 16 -embeddings_number = 64 -image_size = 256 -# beta = [0.25, 2] -beta = 0.25 -model = vqvae_model(image_size, latent_dimensions, embeddings_number, beta) -model.summary() \ No newline at end of file + self.model = build_model(embeddings_num, latent_dimension) + + self.total_loss = tf.keras.metrics.Mean(name="total_loss") + self.reconstruction_loss = tf.keras.metrics.Mean(name="reconstruction_loss") + self.vq_loss = tf.keras.metrics.Mean(name="vq_loss") + + @property + def metrics(self): + # Model metrics -> returns losses (total loss, reconstruction loss and the vq_loss) + return [self.total_loss, self.reconstruction_loss, self.vq_loss] + + def train_step(self, x): + with tf.GradientTape() as tape: + # Outputs from the VQ-VAE. + reconstructions = self.model(x) + + # Calculate the losses. + reconstruction_loss = (tf.reduce_mean((x - reconstructions) ** 2) / self.variance) + total_loss = reconstruction_loss + sum(self.model.losses) + + # Backpropagation. + grads = tape.gradient(total_loss, self.model.trainable_variables) + self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) + + # Loss tracking. + self.total_loss.update_state(total_loss) + self.reconstruction_loss.update_state(reconstruction_loss) + self.vq_loss.update_state(sum(self.model.losses)) + + # Log results. + return { + "loss": self.total_loss.result(), + "reconstruction_loss": self.reconstruction_loss.result(), + "vqvae_loss": self.vq_loss.result(), + } \ No newline at end of file From d3617772d66ff7089b30442f0a4d4ebf1a7658d8 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 11:42:19 +1000 Subject: [PATCH 18/29] Edited the ssim in train.py. Also added references to dataset.py anhd modules.py --- recognition/Miller/dataset.py | 4 +- recognition/Miller/modules.py | 9 +- recognition/Miller/predict.py | 50 ++++++++- recognition/Miller/train.py | 187 ++++++++++++++++++++-------------- 4 files changed, 167 insertions(+), 83 deletions(-) diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 6b6f42dbe1..4dd63193c4 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -1,13 +1,13 @@ """ dataset.py" containing the data loader for loading and preprocessing your data + +This was file utilises and modifies the fucntions found in https://github.com/shakes76/PatternFlow/tree/master/recognition/MySolution """ import tensorflow as tf import glob import numpy as np from matplotlib import image -import random - # Download the Oasis Data as zip file. Will need to extract it manually afterwards def download_oasis (): diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index 56d0a4d822..556fcedcdf 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -101,7 +101,7 @@ def encoder_component(latent_dimension): return tf.keras.Model(inputs, outputs, name="encoder") """ -Returns layered model for decoder architecture built from tranposed convolutional layers. +Returns the model for decoder architecture built from tranposed convolutional layers. activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from @@ -114,7 +114,8 @@ def decoder_component(latent_dimension): outputs = tf.keras.layers.Conv2DTranspose(1, 3, padding="same")(layer) return tf.keras.Model(inputs, outputs, name="decoder") -def build_model(embeddings_num=64, latent_dimension=16): +# Build Model +def build_model(embeddings_num, latent_dimension): vq_layer = VQ_layer(embeddings_num, latent_dimension, name="vector_quantizer") encoder = encoder_component(latent_dimension) decoder = decoder_component(latent_dimension) @@ -124,11 +125,9 @@ def build_model(embeddings_num=64, latent_dimension=16): reconstructions = decoder(quantized_latents) return tf.keras.Model(inputs, reconstructions, name="vq_vae") -build_model().summary() - # Create a model instance and sets training paramters class vqvae_model(tf.keras.models.Model): - def __init__(self, variance, latent_dimension=32, embeddings_num=128, **kwargs): + def __init__(self, variance, latent_dimension, embeddings_num, **kwargs): super(vqvae_model, self).__init__(**kwargs) self.latent_dimension = latent_dimension diff --git a/recognition/Miller/predict.py b/recognition/Miller/predict.py index 9060c5d4b6..1585fea693 100644 --- a/recognition/Miller/predict.py +++ b/recognition/Miller/predict.py @@ -4,8 +4,54 @@ import tensorflow as tf import pathlib import numpy as np -from matplotlib import pyplot +import matplotlib.pyplot as plt from matplotlib import image import glob +import modules as mod +import train as t +import warnings +warnings.filterwarnings("ignore", category=DeprecationWarning) +# Show how well program performs -# Show how well program performs \ No newline at end of file + +""" MODEL AND TRAIN VQ-VAE """ +# Create a instance of the VQ-VAE model +latent_dimensions = 16 +embeddings_number = 64 +image_size = 256 +# beta = [0.25, 2] +beta = 0.25 +model = mod.vqvae_model(image_size, latent_dimensions, embeddings_number, beta) + +model.summary() + + +model.compile (optimizer="Adam", loss= tf.keras.losses.CategoricalCrossentropy()) + +# record history of training to display loss over ephocs +history = model.fit(t.train_X, t.train_Y, validation_data= (t.validate_X, t.validate_Y) ,batch_size=32,shuffle=True,epochs=5) + +# evaluate against testing data +model.evaluate(t.test_X,t.test_Y) + +def show_subplot(original, reconstructed): + plt.subplot(1, 2, 1) + plt.imshow(original.squeeze() + 0.5) + plt.title("Original") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(reconstructed.squeeze() + 0.5) + plt.title("Reconstructed") + plt.axis("off") + + plt.show() + + + +idx = np.random.choice(len(t.test_X), 10) +test_images = t.test_X[idx] +reconstructions_test = model.predict(test_images) + +for test_image, reconstructed_image in zip(test_images, reconstructions_test): + show_subplot(test_image, reconstructed_image) \ No newline at end of file diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 7cb1b2f6ed..3b4a210305 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -3,11 +3,9 @@ should be imported from “modules.py” and the data loader should be imported from “dataset.py”. Make sure to plot the losses and metrics during training """ - import dataset as data import modules as mod -from matplotlib import pyplot -import tensorflow as tf +import matplotlib.pyplot as plt import numpy as np # Download Data and then unzip @@ -27,20 +25,12 @@ # Load the validaton data from the oasis Data set validate_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_validate") -# Check a validation image -#pyplot.imshow(validate_X[2]) -#pyplot.show() - # Pre process validation data set validate_X = data.process_training(validate_X) # Load the test data from the oasis Data Set test_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_test") -# Check a test image -#pyplot.imshow(test_X[2]) -#pyplot.show() - # Pre process test data set test_X = data.process_training(test_X) @@ -60,69 +50,118 @@ # Pre process test labels data test_Y = data.process_labels(test_Y) -""" -# Check a training label image -pyplot.imshow(train_Y[2,:,:,3]) -pyplot.show() -# Check a validation label images -pyplot.imshow(validate_Y[2,:,:,3]) -pyplot.show() -# Check a test label image -pyplot.imshow(test_Y[2,:,:,3]) -pyplot.show() -""" - -# %% -batch_size = 32 - -#train_X = tf.convert_to_tensor(train_X) -#validate_X = tf.convert_to_tensor(validate_X) -batched_and_processed_train = tf.convert_to_tensor(data.batched_and_processed(train_X, batch_size)) -batched_and_processed_validate = tf.convert_to_tensor(data.batched_and_processed(validate_X, batch_size)) -#print(batched_and_processed_train.shape) -#print(batched_and_processed_validate.shape) -exit() """ MODEL AND TRAIN VQ-VAE """ # Create a instance of the VQ-VAE model -latent_dimensions = 16 -embeddings_number = 64 -image_size = 256 -# beta = [0.25, 2] -beta = 0.25 -model = mod.vqvae_model(image_size, latent_dimensions, embeddings_number, beta) - -model.summary() - -""" -model.compile (optimizer='Adam', loss= 'CategoricalCrossentropy') - -# record history of training to display loss over ephocs -history = model.fit(train_X, train_Y, validation_data= (validate_X, validate_Y) ,batch_size=32,shuffle='True',epochs=5) - +latent_dimensions = 16 #dimensionality if each latent embedding vector +embeddings_number = 128 #number of embeddings in the codebook +variance = np.var(train_X / 255.0) +model = mod.vqvae_model(variance, latent_dimensions, embeddings_number) +model.model.summary() + +model.compile (optimizer="Adam") +history = model.fit(train_X, epochs=5, batch_size=128) +print("disaster!!!!") # evaluate against testing data -model.evaluate(test_X,test_Y) - -# validate output -out = model.predict(test_X) -out_r = np.round(out) -out_argmax = np.argmax (out,-1) -gt_test_Y = np.argmax(test_Y,-1) - -im = 5 - -for i in range (4): - print("prediction") - pyplot.imshow(out_r[im,:,:,i]) - pyplot.show() - print("ground truth") - pyplot.imshow(test_Y[im,:,:,i]) - pyplot.show() - -print ("prediction") -pyplot.imshow(out_argmax[im,:,:]) -pyplot.show() - -print ("ground truth") -pyplot.imshow(gt_test_Y [im,:,:]) -pyplot.show() -""" \ No newline at end of file +#model.evaluate(test_X,test_Y) + + +# Plots the original image against the reconstructed one +def plot_comparision_original_to_reconstructed(original, reconstructed): + plt.figure(figsize = (10,12)) + plt.subplot(1, 2, 1) + plt.imshow(original.squeeze() + 0.5, cmap = 'gray') + plt.title("Original") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(reconstructed.squeeze() + 0.5, cmap = 'gray') + plt.title("Reconstructed") + plt.axis("off") + + plt.show() + +trained_model = model.model +idx = np.random.choice(len(test_X), 10) +test_images = test_X[idx] +reconstructions_test = trained_model.predict(test_images) + +for test_image, reconstructed_image in zip(test_images, reconstructions_test): + plot_comparision_original_to_reconstructed(test_image, reconstructed_image) + + + +# Return the average pixel value for the image and the reconstruction +def calculate_mean(image, reconstructed_image): + image_pixel = 0 + reconstructed_pixel = 0 + + for row in range(256): + for col in range(256): + image_pixel += image[row][col] + reconstructed_pixel += reconstructed_image[row][col] + + image_pixel = image_pixel / (256**2) + reconstructed_pixel = reconstructed_pixel / (256**2) + + return image_pixel, reconstructed_image + +# Returns std dev for the pixel value of each image +def calculate_stddev(image, reconstructed_image, image_mean, reconstructed_image_mean): + + image_variance = 0 + reconstructed_image_variance = 0 + + for row in range(256): + for col in range(256): + image_variance += np.square(image[row][col] - image_mean) + reconstructed_image_variance += np.square(reconstructed_image[row][col] - reconstructed_image_mean) + + image_variance = np.sqrt(image_variance/256**2 - 1) + reconstructed_image_variance = np.sqrt(reconstructed_image_variance/256**2 - 1) + return image_variance, reconstructed_image_variance + +# Returns the covariance for both images +def calculate_covariance(image, reconstructed_image, image_mean, predicted_mean): + covariance_value = 0 + + for row in range(256): + for col in range(256): + covariance_value += (image[row][col] - image_mean)*(reconstructed_image[row][col] - predicted_mean) + + return covariance_value/256**256-1 + + +# Return the structural similarity between two images; measures the window x and y of common size. +# https://en.wikipedia.org/wiki/Structural_similarity +def structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance): + K1 = 0.01 # default value + K2 = 0.03 # default value + L = 255 # dynamic range of pixel value (2^bits per pixel -1) + C1 = (K1 * L)**2 + C2 = (K2 * L)**2 + C3 = C2 / 2 + + luminance_x_y = (2*mean_X*predicted_mean + C1)/(mean_X**2+predicted_mean**2+C1) + contrast_x_y = (2*stddev_X*predicted_stddev + C2)/(stddev_X**2+np. predicted_stddev**2+C2) + structure_x_y = (covariance+C3)/(stddev_X*predicted_stddev+C3) + return luminance_x_y * contrast_x_y * structure_x_y + +# Returns the structured similarity for the entire data set +def structural_similarity_mean(test_X, model): + structured_similarity_coef = 0 + + for i, data in enumerate(test_X): + # get reconstructed image + image_reconstruction = model.predict(data) + data = data[0,:,:,0] + image_reconstruction = image_reconstruction[0,:,:,0] + + # Calculate structured similarity and add to total + mean_X, predicted_mean = calculate_mean(data, image_reconstruction) + stddev_X, predicted_stddev = calculate_stddev(data, image_reconstruction, mean_X, predicted_mean) + covariance = calculate_covariance(data, image_reconstruction, mean_X, predicted_mean) + structured_similarity_coef += structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance) + + return structured_similarity_coef / len(test_X) + +print(structural_similarity_mean(test_X, trained_model)) \ No newline at end of file From 235585b4cdc31becc700d6ee795df44191122fa6 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 17:09:48 +1000 Subject: [PATCH 19/29] Fixed bug that was stopping loss reduce in model.fit. Currently implementing results for post model training --- recognition/Miller/modules.py | 156 ++++++++++++++++++++-------------- recognition/Miller/train.py | 58 ++++++++++--- 2 files changed, 137 insertions(+), 77 deletions(-) diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index 556fcedcdf..d90b6f087e 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -20,7 +20,7 @@ 5. Reshape into original shape (n, h, w, d) 6. Copy gradients from q -> x """ -class VQ_layer(tf.keras.layers.Layer): +class VectorQ_layer(tf.keras.layers.Layer): def __init__(self, embedding_num, latent_dimension, beta=0.25, **kwargs): super().__init__(**kwargs) self.embedding_num = embedding_num @@ -77,95 +77,119 @@ def call(self, x): return quantized -""" -Returns layered model for encoder architecture built from convolutional layers. - -activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. -e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from -""" -# Encoder Component -def encoder_component(latent_dimension): - #2D Convolutional Layers - # filters -> dimesion of output space - # kernal_size -> convolution window size - # activation -> activation func used - # relu -> - # strides -> spaces convolution window moves vertically and horizontally - # padding -> "same" pads with zeros to maintain output size same as input size - inputs = tf.keras.Input(shape=(256, 256, 1)) - - layer = tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(inputs) - layer = tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(layer) - - outputs = tf.keras.layers.Conv2D(latent_dimension, 1, padding="same")(layer) - return tf.keras.Model(inputs, outputs, name="encoder") - -""" -Returns the model for decoder architecture built from tranposed convolutional layers. - -activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. -e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from -""" -# Decoder Component -def decoder_component(latent_dimension): - inputs = tf.keras.Input(shape=encoder_component(latent_dimension).output.shape[1:]) - layer = tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(inputs) - layer = tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(layer) - outputs = tf.keras.layers.Conv2DTranspose(1, 3, padding="same")(layer) - return tf.keras.Model(inputs, outputs, name="decoder") - -# Build Model -def build_model(embeddings_num, latent_dimension): - vq_layer = VQ_layer(embeddings_num, latent_dimension, name="vector_quantizer") - encoder = encoder_component(latent_dimension) - decoder = decoder_component(latent_dimension) - inputs = tf.keras.Input(shape=(256, 256, 1)) - encoder_outputs = encoder(inputs) - quantized_latents = vq_layer(encoder_outputs) - reconstructions = decoder(quantized_latents) - return tf.keras.Model(inputs, reconstructions, name="vq_vae") +# Represents the VAE Structure +class VAE: + def __init__(self, embedding_num, latent_dimension, beta=0.25): + self.embedding_num = embedding_num + self.latent_dimension = latent_dimension + self.beta=beta + """ + Returns layered model for encoder architecture built from convolutional layers. + + activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. + e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from + """ + # Encoder Component + def encoder_component(self): + #2D Convolutional Layers + # filters -> dimesion of output space + # kernal_size -> convolution window size + # activation -> activation func used + # relu -> + # strides -> spaces convolution window moves vertically and horizontally + # padding -> "same" pads with zeros to maintain output size same as input size + inputs = tf.keras.Input(shape=(256, 256, 1)) + + layer = tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(layer) + + outputs = tf.keras.layers.Conv2D(self.latent_dimension, 1, padding="same")(layer) + return tf.keras.Model(inputs, outputs, name="encoder") + + # Returns the vq Layer + def vq_layer(self): + return VectorQ_layer(self.embedding_num, self.latent_dimension, self.beta, name="vector_quantizer") + + """ + Returns the model for decoder architecture built from tranposed convolutional layers. + + activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. + e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from + """ + # Decoder Component + def decoder_component(self): + inputs = tf.keras.Input(shape=self.encoder_component().output.shape[1:]) + #2D Convolutional Transpose Layers + # filters -> dimesion of output space + # kernal_size -> convolution window size + # activation -> activation func used + # relu -> + # strides -> spaces convolution window moves vertically and horizontally + # padding -> "same" pads with zeros to maintain output size same as input size + layer = tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(layer) + outputs = tf.keras.layers.Conv2DTranspose(1, 3, padding="same")(layer) + return tf.keras.Model(inputs, outputs, name="decoder") + + # Build Model + def build_model(self): + vq_layer = self.vq_layer() + encoder = self.encoder_component() + decoder = self.decoder_component() + + inputs = tf.keras.Input(shape=(256, 256, 1)) + encoder_outputs = encoder(inputs) + quantized_latents = vq_layer(encoder_outputs) + reconstructions = decoder(quantized_latents) + model = tf.keras.Model(inputs, reconstructions, name="vq_vae") + model.summary() + return model # Create a model instance and sets training paramters -class vqvae_model(tf.keras.models.Model): - def __init__(self, variance, latent_dimension, embeddings_num, **kwargs): +class VQVAETRAINER(tf.keras.models.Model): + def __init__(self, variance, latent_dimension=32, embeddings_num=128, **kwargs): - super(vqvae_model, self).__init__(**kwargs) + super(VQVAETRAINER, self).__init__(**kwargs) self.latent_dimension = latent_dimension self.embeddings_num = embeddings_num self.variance = variance - self.model = build_model(embeddings_num, latent_dimension) + VAE_model = VAE(self.embeddings_num, self.latent_dimension) + self.vqvae_model = VAE_model.build_model() + - self.total_loss = tf.keras.metrics.Mean(name="total_loss") - self.reconstruction_loss = tf.keras.metrics.Mean(name="reconstruction_loss") - self.vq_loss = tf.keras.metrics.Mean(name="vq_loss") + self.total_loss_tracker = tf.keras.metrics.Mean(name="total_loss") + self.reconstruction_loss_tracker = tf.keras.metrics.Mean(name="reconstruction_loss") + self.vq_loss_tracker = tf.keras.metrics.Mean(name="vq_loss") @property def metrics(self): # Model metrics -> returns losses (total loss, reconstruction loss and the vq_loss) - return [self.total_loss, self.reconstruction_loss, self.vq_loss] + return [self.total_loss_tracker, self.reconstruction_loss_tracker, self.vq_loss_tracker] def train_step(self, x): with tf.GradientTape() as tape: # Outputs from the VQ-VAE. - reconstructions = self.model(x) + reconstructions = self.vqvae_model(x) # Calculate the losses. reconstruction_loss = (tf.reduce_mean((x - reconstructions) ** 2) / self.variance) - total_loss = reconstruction_loss + sum(self.model.losses) + total_loss = reconstruction_loss + sum(self.vqvae_model.losses) # Backpropagation. - grads = tape.gradient(total_loss, self.model.trainable_variables) - self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) + grads = tape.gradient(total_loss, self.vqvae_model.trainable_variables) + self.optimizer.apply_gradients(zip(grads, self.vqvae_model.trainable_variables)) # Loss tracking. - self.total_loss.update_state(total_loss) - self.reconstruction_loss.update_state(reconstruction_loss) - self.vq_loss.update_state(sum(self.model.losses)) + """CODEBOOK LOSS + COMMITMENT LOSS -> euclidean loss + encoder loss""" + self.total_loss_tracker.update_state(total_loss) + """RECONSTRUCTION ERROR (MSE) -> between input and reconstruction""" + self.reconstruction_loss_tracker.update_state(reconstruction_loss) + self.vq_loss_tracker.update_state(sum(self.vqvae_model.losses)) # Log results. return { - "loss": self.total_loss.result(), - "reconstruction_loss": self.reconstruction_loss.result(), - "vqvae_loss": self.vq_loss.result(), + "loss": self.total_loss_tracker.result(), + "reconstruction_loss": self.reconstruction_loss_tracker.result(), + "vqvae_loss": self.vq_loss_tracker.result(), } \ No newline at end of file diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 3b4a210305..78fd0240e3 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -3,13 +3,16 @@ should be imported from “modules.py” and the data loader should be imported from “dataset.py”. Make sure to plot the losses and metrics during training """ +# %% import dataset as data import modules as mod import matplotlib.pyplot as plt import numpy as np +import tensorflow as tf # Download Data and then unzip #download_oasis() +# %% """ PROCESS TRAINING DATA""" # Load the training data from the Oasis Data set @@ -52,19 +55,53 @@ """ MODEL AND TRAIN VQ-VAE """ # Create a instance of the VQ-VAE model -latent_dimensions = 16 #dimensionality if each latent embedding vector +latent_dimensions = 32 #dimensionality if each latent embedding vector embeddings_number = 128 #number of embeddings in the codebook -variance = np.var(train_X / 255.0) -model = mod.vqvae_model(variance, latent_dimensions, embeddings_number) -model.model.summary() +#variance = np.var(train_X / 255.0) +model = mod.VQVAETRAINER(1, latent_dimensions, embeddings_number) -model.compile (optimizer="Adam") -history = model.fit(train_X, epochs=5, batch_size=128) +""" +Optimiser -> learning rate +'adam' adjusts learning rate whilst training; learning rate deterines how fast optimal weights are calculated. Smaller +Learning rate = more wights but takes longer to compute +""" +# Create Model +model.compile (optimizer='adam') + +# Train model +history = model.fit(train_X, epochs=2, validation_data=(test_X), batch_size=128) print("disaster!!!!") + + +# Plot Accuracy +plt.plot(history.history['accuracy'], label='accuracy') +plt.plot(history.history['val_accuracy'], label='val_accuracy') +plt.title('Model Accuracy') +plt.xlabel('Epoch') +plt.ylabel('Accuracy') +plt.ylim([0.5,1]) +plt.legend(['Train', 'Validation'], loc='upper left') +plt.show() + +# Plot Loss +plt.plot(history.history['loss'], label='loss') +plt.plot(history.history['val_loss'], label='val_loss') +plt.title('Model Loss') +plt.xlabel('Epoch') +plt.ylabel('Loss') +plt.legend(['Train', 'Validation'], loc='upper left') +plt.show() + # evaluate against testing data -#model.evaluate(test_X,test_Y) +test_loss, test_acc = model.evaluate(test_X, test_Y, verbose=2) +print("Accuracy test is: ", test_acc) + + +""" MODEL AND TRAIN VQ-VAE """ + +""" RECONSTRUCTION RESULTS""" # Plots the original image against the reconstructed one def plot_comparision_original_to_reconstructed(original, reconstructed): plt.figure(figsize = (10,12)) @@ -80,7 +117,7 @@ def plot_comparision_original_to_reconstructed(original, reconstructed): plt.show() -trained_model = model.model +trained_model = mod.model.model idx = np.random.choice(len(test_X), 10) test_images = test_X[idx] reconstructions_test = trained_model.predict(test_images) @@ -88,8 +125,6 @@ def plot_comparision_original_to_reconstructed(original, reconstructed): for test_image, reconstructed_image in zip(test_images, reconstructions_test): plot_comparision_original_to_reconstructed(test_image, reconstructed_image) - - # Return the average pixel value for the image and the reconstruction def calculate_mean(image, reconstructed_image): image_pixel = 0 @@ -164,4 +199,5 @@ def structural_similarity_mean(test_X, model): return structured_similarity_coef / len(test_X) -print(structural_similarity_mean(test_X, trained_model)) \ No newline at end of file +print(structural_similarity_mean(test_X, trained_model)) +# %% From b0d498b394b952ce6a2bb3c32c40f86d7b897870 Mon Sep 17 00:00:00 2001 From: dapmiller <96099274+dapmiller@users.noreply.github.com> Date: Fri, 21 Oct 2022 19:56:21 +1000 Subject: [PATCH 20/29] Create Images --- recognition/Miller/Images | 1 + 1 file changed, 1 insertion(+) create mode 100644 recognition/Miller/Images diff --git a/recognition/Miller/Images b/recognition/Miller/Images new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/recognition/Miller/Images @@ -0,0 +1 @@ + From 9b90b9f793b08f065584e8af772ab916e82e7e43 Mon Sep 17 00:00:00 2001 From: dapmiller <96099274+dapmiller@users.noreply.github.com> Date: Fri, 21 Oct 2022 19:56:39 +1000 Subject: [PATCH 21/29] Delete Images --- recognition/Miller/Images | 1 - 1 file changed, 1 deletion(-) delete mode 100644 recognition/Miller/Images diff --git a/recognition/Miller/Images b/recognition/Miller/Images deleted file mode 100644 index 8b13789179..0000000000 --- a/recognition/Miller/Images +++ /dev/null @@ -1 +0,0 @@ - From f396991ef9ba666a292dc88aa8eed577f5240769 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 19:59:20 +1000 Subject: [PATCH 22/29] Edited train.py bug in training. It now runs and the losses appear to decrease realistically. The bug occurred due to the value of the variance variable being feed into model.fit. --- recognition/Miller/train.py | 127 +++++++++++++++++++++--------------- 1 file changed, 73 insertions(+), 54 deletions(-) diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index 78fd0240e3..a3c88d2afb 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -24,12 +24,12 @@ # Pre process training data set train_X = data.process_training(train_X) - +train_x_var = np.var(train_X) # Load the validaton data from the oasis Data set -validate_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_validate") +#validate_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_validate") # Pre process validation data set -validate_X = data.process_training(validate_X) +#validate_X = data.process_training(validate_X) # Load the test data from the oasis Data Set test_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_test") @@ -44,21 +44,21 @@ train_Y = data.process_labels(train_Y) # Load the segmented validation labels data from the Oasis Data set -validate_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_validate") +#validate_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_validate") # Pre process validation labels data -validate_Y = data.process_labels(validate_Y) +#validate_Y = data.process_labels(validate_Y) # Load the segmented test labels data from the Oasis Data set test_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_test") # Pre process test labels data test_Y = data.process_labels(test_Y) - +#%% """ MODEL AND TRAIN VQ-VAE """ # Create a instance of the VQ-VAE model -latent_dimensions = 32 #dimensionality if each latent embedding vector +latent_dimensions = 16 #dimensionality if each latent embedding vector embeddings_number = 128 #number of embeddings in the codebook -#variance = np.var(train_X / 255.0) -model = mod.VQVAETRAINER(1, latent_dimensions, embeddings_number) + +model = mod.VQVAETRAINER(train_x_var, latent_dimensions, embeddings_number) """ Optimiser -> learning rate @@ -69,62 +69,23 @@ model.compile (optimizer='adam') # Train model -history = model.fit(train_X, epochs=2, validation_data=(test_X), batch_size=128) +history = model.fit(train_X, epochs=15, batch_size=128) print("disaster!!!!") - -# Plot Accuracy -plt.plot(history.history['accuracy'], label='accuracy') -plt.plot(history.history['val_accuracy'], label='val_accuracy') -plt.title('Model Accuracy') -plt.xlabel('Epoch') -plt.ylabel('Accuracy') -plt.ylim([0.5,1]) -plt.legend(['Train', 'Validation'], loc='upper left') -plt.show() - +#%% # Plot Loss -plt.plot(history.history['loss'], label='loss') -plt.plot(history.history['val_loss'], label='val_loss') -plt.title('Model Loss') +plt.plot(history.history['reconstruction_loss'], label='Reconstruction Loss') +plt.title('VQVAE Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend(['Train', 'Validation'], loc='upper left') plt.show() -# evaluate against testing data -test_loss, test_acc = model.evaluate(test_X, test_Y, verbose=2) - -print("Accuracy test is: ", test_acc) - - +#%% """ MODEL AND TRAIN VQ-VAE """ """ RECONSTRUCTION RESULTS""" -# Plots the original image against the reconstructed one -def plot_comparision_original_to_reconstructed(original, reconstructed): - plt.figure(figsize = (10,12)) - plt.subplot(1, 2, 1) - plt.imshow(original.squeeze() + 0.5, cmap = 'gray') - plt.title("Original") - plt.axis("off") - - plt.subplot(1, 2, 2) - plt.imshow(reconstructed.squeeze() + 0.5, cmap = 'gray') - plt.title("Reconstructed") - plt.axis("off") - - plt.show() - -trained_model = mod.model.model -idx = np.random.choice(len(test_X), 10) -test_images = test_X[idx] -reconstructions_test = trained_model.predict(test_images) - -for test_image, reconstructed_image in zip(test_images, reconstructions_test): - plot_comparision_original_to_reconstructed(test_image, reconstructed_image) - # Return the average pixel value for the image and the reconstruction def calculate_mean(image, reconstructed_image): image_pixel = 0 @@ -181,6 +142,42 @@ def structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, co structure_x_y = (covariance+C3)/(stddev_X*predicted_stddev+C3) return luminance_x_y * contrast_x_y * structure_x_y +# Plots the original image against the reconstructed one with their Structured similarity rating +def plot_comparision_original_to_reconstructed(original, reconstructed, ssim): + plt.suptitle("Structured Similiarity Rating: %.2f" %ssim) + + plt.figure(figsize = (10,12)) + plt.subplot(1, 2, 1) + plt.imshow(original.squeeze() + 0.5, cmap = 'gray') + plt.title("Original") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(reconstructed.squeeze() + 0.5, cmap = 'gray') + plt.title("Reconstructed") + plt.axis("off") + + plt.show() + +trained_model = model.vqvae_model + +# Select 5 random Test images +idx = np.random.choice(len(test_X), 5) +test_images = test_X[idx] +reconstructions_test = trained_model.predict(test_images) + +# Perform Predictions on the test images +for test_image, reconstructed_image in zip(test_images, reconstructions_test): + mean, mean_r = calculate_mean(test_image, reconstructed_image) + stddev, stddev_r = calculate_stddev(test_image, mean, reconstructed_image, mean_r) + cov = calculate_covariance(test_image, reconstructed_image, mean, mean_r) + structured_similiarity_rating = structural_similarity(mean, mean_r, stddev, stddev_r, cov) + plot_comparision_original_to_reconstructed(test_image, reconstructed_image, structured_similiarity_rating) + + + +#%% + # Returns the structured similarity for the entire data set def structural_similarity_mean(test_X, model): structured_similarity_coef = 0 @@ -199,5 +196,27 @@ def structural_similarity_mean(test_X, model): return structured_similarity_coef / len(test_X) -print(structural_similarity_mean(test_X, trained_model)) +# Calculate the mean structural Similarity for the reconstructed images +mean_structured_similiarity = structural_similarity_mean(test_X, trained_model) +print(mean_structured_similiarity) + # %% +encoder = model.vqvae_model.get_layer("encoder") +quantizer = model.vqvae_model.get_layer("vector_quantizer") + +encoded_outputs = encoder.predict(test_images) +flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1]) +codebook_indices = quantizer.get_code_indices(flat_enc_outputs) +codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1]) + +for i in range(len(test_images)): + plt.subplot(1, 2, 1) + plt.imshow(test_images[i].squeeze() + 0.5) + plt.title("Original") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(codebook_indices[i]) + plt.title("Code") + plt.axis("off") + plt.show() From 3aeadda522ad329a6dcb35e2d065bf12cff35fb7 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 21:19:44 +1000 Subject: [PATCH 23/29] Fixed Bug in model.fit that caused loss. It was due to variance being calculated incorrectly. Wrote draft read.me file --- recognition/Miller/train.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/recognition/Miller/train.py b/recognition/Miller/train.py index a3c88d2afb..de4984a1e3 100644 --- a/recognition/Miller/train.py +++ b/recognition/Miller/train.py @@ -112,8 +112,8 @@ def calculate_stddev(image, reconstructed_image, image_mean, reconstructed_image image_variance += np.square(image[row][col] - image_mean) reconstructed_image_variance += np.square(reconstructed_image[row][col] - reconstructed_image_mean) - image_variance = np.sqrt(image_variance/256**2 - 1) - reconstructed_image_variance = np.sqrt(reconstructed_image_variance/256**2 - 1) + image_variance = np.sqrt(image_variance/(256**2 - 1)) + reconstructed_image_variance = np.sqrt(reconstructed_image_variance/(256**2 - 1)) return image_variance, reconstructed_image_variance # Returns the covariance for both images @@ -124,7 +124,7 @@ def calculate_covariance(image, reconstructed_image, image_mean, predicted_mean) for col in range(256): covariance_value += (image[row][col] - image_mean)*(reconstructed_image[row][col] - predicted_mean) - return covariance_value/256**256-1 + return covariance_value/(256**256-1) # Return the structural similarity between two images; measures the window x and y of common size. @@ -146,7 +146,7 @@ def structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, co def plot_comparision_original_to_reconstructed(original, reconstructed, ssim): plt.suptitle("Structured Similiarity Rating: %.2f" %ssim) - plt.figure(figsize = (10,12)) + #plt.figure(figsize = (10,12)) plt.subplot(1, 2, 1) plt.imshow(original.squeeze() + 0.5, cmap = 'gray') plt.title("Original") @@ -168,10 +168,12 @@ def plot_comparision_original_to_reconstructed(original, reconstructed, ssim): # Perform Predictions on the test images for test_image, reconstructed_image in zip(test_images, reconstructions_test): - mean, mean_r = calculate_mean(test_image, reconstructed_image) - stddev, stddev_r = calculate_stddev(test_image, mean, reconstructed_image, mean_r) + """mean, mean_r = calculate_mean(test_image, reconstructed_image) + stddev, stddev_r = calculate_stddev(test_image,reconstructed_image, mean, mean_r) cov = calculate_covariance(test_image, reconstructed_image, mean, mean_r) structured_similiarity_rating = structural_similarity(mean, mean_r, stddev, stddev_r, cov) + """ + structured_similiarity_rating = tf.image.ssim(test_image, reconstructed_image, max_val=1.0) plot_comparision_original_to_reconstructed(test_image, reconstructed_image, structured_similiarity_rating) From a8f82d076f0b434d54a87b287e505df08fe4626a Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 21:21:22 +1000 Subject: [PATCH 24/29] Fixed Bug in model.fit that caused loss. It was due to variance being calculated incorrectly. --- recognition/Miller/README.MD | 61 +++++++++++++----- recognition/Miller/dataset.py | 9 ++- recognition/Miller/modules.py | 4 +- recognition/Miller/predict.py | 118 +++++++++++++++++++++++++--------- 4 files changed, 142 insertions(+), 50 deletions(-) diff --git a/recognition/Miller/README.MD b/recognition/Miller/README.MD index f3f44efe4c..5b59bad248 100644 --- a/recognition/Miller/README.MD +++ b/recognition/Miller/README.MD @@ -1,24 +1,53 @@ -Vector Quantized Variational Auto-encoder(VQ VAE Model) -Oasis Brain Data Set -This program creates a create a generative model of the OASIS brain data set that has a “reasonably clear image” and a Structured Similarity (SSIM) of over 0.6. +# Vector Quantized Variational Auto-encoder(VQ VAE Model) -Description of VQ VAE Algorithm: -A standard VAE (encoder->decoder) uses a continous latent space that was sampled using gaussain distribution; this makes it hard to leatn continuous distribution with a gradient descent. In comparison, VQ VAE (encoder-> VQ layer-> decoder) ues discrete latent space; and consists of three parts: +In this report, a generative model of the Vector Quantized Variational AutoEncoder (VQ VAE) was used to generate reconstructed images of the OASIS brain data set that are "reasonably clear" and have a Structured Similarity (SSIM) of over 0.6. The VQ VAE was adapted using tensorflow keras. -1. Encoder -> Convolutional network to downsample the features of an image +#### Description of VQ VAE Algorithm +![](https://miro.medium.com/max/1400/1*yRdNe3xi4f3KV6ULW7yArA.png) +>Figure 1: Graphical representation of a VQ-VAE network. -2. Latent space -> Discrete "codebook" that describes the latent space. - codebook consists of n latent embedding vectors of dimension D each - each code represents the distance between each embedding and encoded output+ +A standard VAE (encoder->decoder) uses a continous latent space that is sampled using gaussain distribution; this makes it hard to learn a continuous distribution with a gradient descent. In comparison, VQ VAE uses a discrete latent space; and consists of three parts as seen above: -VQ VAES: - disccrete latent space - optimised by using discrete "codbook" -> made by discreting dist between continuous embedding and encoded outputs, the ndiscrete codewords sent to decoder which trained to generate reconstructered samples. +1. Encoder: + * Convolutional network to downsample the features of an image +2. Latent Space: + * Codebook consists of n latent embedding vectors of dimension D each + * Each code represents the distance between each embedding and encoded output (euclidean distance) ->outputs embeded vector + * feed closest encoder output to codebook as input to decoder +3. Decoder: + * Convolutional network to upsample and gnerate reconstructed samples. - Generative model based on VAE. - Aims to make latent space discrete using VQ techniques +#### ==============Oasis Brain Data Set============== +![](https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRl7czOsj3uzWRQ6NT2ofed7QBsKiqrUq6Bsw&usqp=CAU) +>Figure 2: Comparison of an image stored in the train vs test data sets - cons: loses the "easy latent sampling" propery of VAES. 2 stage training required to learn fitting categorical prior - + training objective not correspond to bound on log-likelihood amnymore +The Oasis MRI Dataset cobtains 9664 training images, 544 test images and 1120 validation images. An example of train and test data is shown above. The images are preloaded into a file location and from there extracted into processing for use. +##### Data Pre-Processing +Before the data was used, it was normalised through residual extration and rescaling. This makes it easier to compare the distributions with different means and scales to maintain the shape of the distribution. + +## ==============Training============== + +The three data groups - train, test, and validate are split 0.85/0.1/0.05. The training set contains the most images so the model has enough information to learn from to produce accurate reconstructions later. The test set is used to validate these reconstructions. The validation set is not required, as the model is judged by the quality of the reconstructons on the test set. The model is trained with ... epochs on a batch size of 128. +*insert image + +## ==============Results============== + +The reconstructed images achieved a mean Structured Similarity of ... +*Inerset image +## Dependencies +* Python 3.7 +* TensorFlow 2.6.0 +* Numpy 1.19.5 +* matplotlib 3.2.2 +* Pillow 7.1.2 +* os +* Pre-processed OASIS MRI dataset (accessible at https://cloudstor.aarnet.edu.au/plus/s/n5aZ4XX1WBKp6HZ/download). + +## References +[1] A. v. d. Oord, O. Vinyals, and K. Kavukcuoglu, 2018. Neural Discrete Representation Learning. [Online]. Available at: https://arxiv.org/pdf/1711.00937.pdf. + +[2] Paul, S., 2021. Keras documentation: Vector-Quantized Variational Autoencoders. [online] Keras.io. Available at: https://keras.io/examples/generative/vq_vae/. + +[3] https://github.com/shakes76/PatternFlow/tree/master/recognition/MySolution diff --git a/recognition/Miller/dataset.py b/recognition/Miller/dataset.py index 4dd63193c4..ecb9d05c38 100644 --- a/recognition/Miller/dataset.py +++ b/recognition/Miller/dataset.py @@ -8,6 +8,9 @@ import glob import numpy as np from matplotlib import image +import os +from PIL import Image + # Download the Oasis Data as zip file. Will need to extract it manually afterwards def download_oasis (): @@ -19,8 +22,8 @@ def download_oasis (): # Loads the training images (non segmented) from given path and returns an numpy array of arrays def load_training (path): + image_list = [] - # Iterate through all paths and convert to 'png' for filename in glob.glob(path + '/*.png'): # Read an image from the given filename into an array @@ -32,11 +35,13 @@ def load_training (path): # Create an numpy array to hold all the array turned images train_set = np.array(image_list, dtype=np.float32) + + return train_set # Normalizes training images and adds 4th dimention def process_training (data_set): - + """ Residual Extraction -> Useful for comparing distributions with different means but similar shapes""" # Calculate the residuals of the data - each residual is dist from each distribution mean which is now zero data_set = (data_set - np.mean(data_set)) / np.std(data_set) diff --git a/recognition/Miller/modules.py b/recognition/Miller/modules.py index d90b6f087e..0ece4cbbfc 100644 --- a/recognition/Miller/modules.py +++ b/recognition/Miller/modules.py @@ -192,4 +192,6 @@ def train_step(self, x): "loss": self.total_loss_tracker.result(), "reconstruction_loss": self.reconstruction_loss_tracker.result(), "vqvae_loss": self.vq_loss_tracker.result(), - } \ No newline at end of file + } + + diff --git a/recognition/Miller/predict.py b/recognition/Miller/predict.py index 1585fea693..43d540db6e 100644 --- a/recognition/Miller/predict.py +++ b/recognition/Miller/predict.py @@ -1,57 +1,113 @@ """ “predict.py" showing example usage of your trained model. Print out any results and / or provide visualisations where applicable """ -import tensorflow as tf -import pathlib import numpy as np import matplotlib.pyplot as plt -from matplotlib import image -import glob import modules as mod -import train as t + import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # Show how well program performs """ MODEL AND TRAIN VQ-VAE """ -# Create a instance of the VQ-VAE model -latent_dimensions = 16 -embeddings_number = 64 -image_size = 256 -# beta = [0.25, 2] -beta = 0.25 -model = mod.vqvae_model(image_size, latent_dimensions, embeddings_number, beta) - -model.summary() - - -model.compile (optimizer="Adam", loss= tf.keras.losses.CategoricalCrossentropy()) - -# record history of training to display loss over ephocs -history = model.fit(t.train_X, t.train_Y, validation_data= (t.validate_X, t.validate_Y) ,batch_size=32,shuffle=True,epochs=5) - -# evaluate against testing data -model.evaluate(t.test_X,t.test_Y) -def show_subplot(original, reconstructed): +""" RECONSTRUCTION RESULTS""" +# Plots the original image against the reconstructed one +def plot_comparision_original_to_reconstructed(original, reconstructed): + plt.figure(figsize = (10,12)) plt.subplot(1, 2, 1) - plt.imshow(original.squeeze() + 0.5) + plt.imshow(original.squeeze() + 0.5, cmap = 'gray') plt.title("Original") plt.axis("off") plt.subplot(1, 2, 2) - plt.imshow(reconstructed.squeeze() + 0.5) + plt.imshow(reconstructed.squeeze() + 0.5, cmap = 'gray') plt.title("Reconstructed") plt.axis("off") plt.show() +trained_model = mod.model.model +idx = np.random.choice(len(test_X), 10) +test_images = test_X[idx] +reconstructions_test = trained_model.predict(test_images) +for test_image, reconstructed_image in zip(test_images, reconstructions_test): + plot_comparision_original_to_reconstructed(test_image, reconstructed_image) -idx = np.random.choice(len(t.test_X), 10) -test_images = t.test_X[idx] -reconstructions_test = model.predict(test_images) +# Return the average pixel value for the image and the reconstruction +def calculate_mean(image, reconstructed_image): + image_pixel = 0 + reconstructed_pixel = 0 -for test_image, reconstructed_image in zip(test_images, reconstructions_test): - show_subplot(test_image, reconstructed_image) \ No newline at end of file + for row in range(256): + for col in range(256): + image_pixel += image[row][col] + reconstructed_pixel += reconstructed_image[row][col] + + image_pixel = image_pixel / (256**2) + reconstructed_pixel = reconstructed_pixel / (256**2) + + return image_pixel, reconstructed_image + +# Returns std dev for the pixel value of each image +def calculate_stddev(image, reconstructed_image, image_mean, reconstructed_image_mean): + + image_variance = 0 + reconstructed_image_variance = 0 + + for row in range(256): + for col in range(256): + image_variance += np.square(image[row][col] - image_mean) + reconstructed_image_variance += np.square(reconstructed_image[row][col] - reconstructed_image_mean) + + image_variance = np.sqrt(image_variance/256**2 - 1) + reconstructed_image_variance = np.sqrt(reconstructed_image_variance/256**2 - 1) + return image_variance, reconstructed_image_variance + +# Returns the covariance for both images +def calculate_covariance(image, reconstructed_image, image_mean, predicted_mean): + covariance_value = 0 + + for row in range(256): + for col in range(256): + covariance_value += (image[row][col] - image_mean)*(reconstructed_image[row][col] - predicted_mean) + + return covariance_value/256**256-1 + + +# Return the structural similarity between two images; measures the window x and y of common size. +# https://en.wikipedia.org/wiki/Structural_similarity +def structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance): + K1 = 0.01 # default value + K2 = 0.03 # default value + L = 255 # dynamic range of pixel value (2^bits per pixel -1) + C1 = (K1 * L)**2 + C2 = (K2 * L)**2 + C3 = C2 / 2 + + luminance_x_y = (2*mean_X*predicted_mean + C1)/(mean_X**2+predicted_mean**2+C1) + contrast_x_y = (2*stddev_X*predicted_stddev + C2)/(stddev_X**2+np. predicted_stddev**2+C2) + structure_x_y = (covariance+C3)/(stddev_X*predicted_stddev+C3) + return luminance_x_y * contrast_x_y * structure_x_y + +# Returns the structured similarity for the entire data set +def structural_similarity_mean(test_X, model): + structured_similarity_coef = 0 + + for i, data in enumerate(test_X): + # get reconstructed image + image_reconstruction = model.predict(data) + data = data[0,:,:,0] + image_reconstruction = image_reconstruction[0,:,:,0] + + # Calculate structured similarity and add to total + mean_X, predicted_mean = calculate_mean(data, image_reconstruction) + stddev_X, predicted_stddev = calculate_stddev(data, image_reconstruction, mean_X, predicted_mean) + covariance = calculate_covariance(data, image_reconstruction, mean_X, predicted_mean) + structured_similarity_coef += structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance) + + return structured_similarity_coef / len(test_X) + +print(structural_similarity_mean(test_X, trained_model)) \ No newline at end of file From ab702fe00162131a5b648a3dddc7534549fdcaa8 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 21:29:55 +1000 Subject: [PATCH 25/29] Fixed Bug in model.fit that caused loss. It was due to variance being calculated incorrectly. --- recognition/s4581053 VQVAE OASIS/README.MD | 53 +++++ recognition/s4581053 VQVAE OASIS/dataset.py | 113 ++++++++++ recognition/s4581053 VQVAE OASIS/modules.py | 197 +++++++++++++++++ recognition/s4581053 VQVAE OASIS/predict.py | 113 ++++++++++ recognition/s4581053 VQVAE OASIS/train.py | 224 ++++++++++++++++++++ 5 files changed, 700 insertions(+) create mode 100644 recognition/s4581053 VQVAE OASIS/README.MD create mode 100644 recognition/s4581053 VQVAE OASIS/dataset.py create mode 100644 recognition/s4581053 VQVAE OASIS/modules.py create mode 100644 recognition/s4581053 VQVAE OASIS/predict.py create mode 100644 recognition/s4581053 VQVAE OASIS/train.py diff --git a/recognition/s4581053 VQVAE OASIS/README.MD b/recognition/s4581053 VQVAE OASIS/README.MD new file mode 100644 index 0000000000..5b59bad248 --- /dev/null +++ b/recognition/s4581053 VQVAE OASIS/README.MD @@ -0,0 +1,53 @@ +# Vector Quantized Variational Auto-encoder(VQ VAE Model) + +In this report, a generative model of the Vector Quantized Variational AutoEncoder (VQ VAE) was used to generate reconstructed images of the OASIS brain data set that are "reasonably clear" and have a Structured Similarity (SSIM) of over 0.6. The VQ VAE was adapted using tensorflow keras. + +#### Description of VQ VAE Algorithm +![](https://miro.medium.com/max/1400/1*yRdNe3xi4f3KV6ULW7yArA.png) +>Figure 1: Graphical representation of a VQ-VAE network. + +A standard VAE (encoder->decoder) uses a continous latent space that is sampled using gaussain distribution; this makes it hard to learn a continuous distribution with a gradient descent. In comparison, VQ VAE uses a discrete latent space; and consists of three parts as seen above: + +1. Encoder: + * Convolutional network to downsample the features of an image +2. Latent Space: + * Codebook consists of n latent embedding vectors of dimension D each + * Each code represents the distance between each embedding and encoded output (euclidean distance) ->outputs embeded vector + * feed closest encoder output to codebook as input to decoder +3. Decoder: + * Convolutional network to upsample and gnerate reconstructed samples. + +#### ==============Oasis Brain Data Set============== +![](https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRl7czOsj3uzWRQ6NT2ofed7QBsKiqrUq6Bsw&usqp=CAU) +>Figure 2: Comparison of an image stored in the train vs test data sets + +The Oasis MRI Dataset cobtains 9664 training images, 544 test images and 1120 validation images. An example of train and test data is shown above. The images are preloaded into a file location and from there extracted into processing for use. + +##### Data Pre-Processing + +Before the data was used, it was normalised through residual extration and rescaling. This makes it easier to compare the distributions with different means and scales to maintain the shape of the distribution. + +## ==============Training============== + +The three data groups - train, test, and validate are split 0.85/0.1/0.05. The training set contains the most images so the model has enough information to learn from to produce accurate reconstructions later. The test set is used to validate these reconstructions. The validation set is not required, as the model is judged by the quality of the reconstructons on the test set. The model is trained with ... epochs on a batch size of 128. +*insert image + +## ==============Results============== + +The reconstructed images achieved a mean Structured Similarity of ... +*Inerset image +## Dependencies +* Python 3.7 +* TensorFlow 2.6.0 +* Numpy 1.19.5 +* matplotlib 3.2.2 +* Pillow 7.1.2 +* os +* Pre-processed OASIS MRI dataset (accessible at https://cloudstor.aarnet.edu.au/plus/s/n5aZ4XX1WBKp6HZ/download). + +## References +[1] A. v. d. Oord, O. Vinyals, and K. Kavukcuoglu, 2018. Neural Discrete Representation Learning. [Online]. Available at: https://arxiv.org/pdf/1711.00937.pdf. + +[2] Paul, S., 2021. Keras documentation: Vector-Quantized Variational Autoencoders. [online] Keras.io. Available at: https://keras.io/examples/generative/vq_vae/. + +[3] https://github.com/shakes76/PatternFlow/tree/master/recognition/MySolution diff --git a/recognition/s4581053 VQVAE OASIS/dataset.py b/recognition/s4581053 VQVAE OASIS/dataset.py new file mode 100644 index 0000000000..ecb9d05c38 --- /dev/null +++ b/recognition/s4581053 VQVAE OASIS/dataset.py @@ -0,0 +1,113 @@ +""" +dataset.py" containing the data loader for loading and preprocessing your data + +This was file utilises and modifies the fucntions found in https://github.com/shakes76/PatternFlow/tree/master/recognition/MySolution +""" + +import tensorflow as tf +import glob +import numpy as np +from matplotlib import image +import os +from PIL import Image + + +# Download the Oasis Data as zip file. Will need to extract it manually afterwards +def download_oasis (): + + dataset_url = "https://cloudstor.aarnet.edu.au/plus/s/n5aZ4XX1WBKp6HZ/download" + + # Download file from URL Path, origin=path, fname=file name, untar=compress file + tf.keras.utils.get_file(origin=dataset_url,fname='oa-sis' ,untar=True) + +# Loads the training images (non segmented) from given path and returns an numpy array of arrays +def load_training (path): + + image_list = [] + # Iterate through all paths and convert to 'png' + for filename in glob.glob(path + '/*.png'): + # Read an image from the given filename into an array + im = image.imread (filename) + # Append array to list + image_list.append(im) + + print('train_X shape:', np.array(image_list).shape) + + # Create an numpy array to hold all the array turned images + train_set = np.array(image_list, dtype=np.float32) + + + return train_set + +# Normalizes training images and adds 4th dimention +def process_training (data_set): + + """ Residual Extraction -> Useful for comparing distributions with different means but similar shapes""" + # Calculate the residuals of the data - each residual is dist from each distribution mean which is now zero + data_set = (data_set - np.mean(data_set)) / np.std(data_set) + """ Min-Max Rescaling -> Useful for comparign distributions with different scales or different shapes""" + # Rescale Data - ratio of dist of each value from min value in each dataset to range of values in each dataset -> value between (0,1) now + # Forces dataset to be same scale, and perseves shape of distribution -> "Squeezed and shifted to fit between 0 and 1" + data_set= (data_set - np.amin(data_set)) / np.amax(data_set - np.amin(data_set)) + # Add 4th dimension + data_set = data_set [:,:,:,np.newaxis] + + return data_set + +# Loads labels images from given path and map pixel values to class indices and convert image data type to unit8 +def load_labels (path): + image_list =[] + + # Iterate through all paths and convert to 'png' + for filename in glob.glob(path+'/*.png'): + # Read an image from the given filename into an array + im=image.imread (filename) + # Create 'im.shape[0] x im.shape[1]' shaped array of arrays of zeros + one_hot = np.zeros((im.shape[0], im.shape[1])) + # Iterate through sorted and unique arrays of given array turned image + for i, unique_value in enumerate(np.unique(im)): + # One hot each unique array with its numerical value of its entry in the dataset -> transform categorical into numerical dummy features + one_hot[:, :][im == unique_value] = i + # Append array to list + image_list.append(one_hot) + + print('train_y shape:',np.array(image_list).shape) + + # Create an numpy array to hold all the array turned images + labels = np.array(image_list, dtype=np.uint8) + + #pyplot.imshow(labels[2]) + #pyplot.show() + + return labels + +# One hot encode label data and convert to numpy array +def process_labels (seg_data): + onehot_Y = [] + + # Iterate through all array turned images by shapes first value + for n in range(seg_data.shape[0]): + + # Get data at position in array + im = seg_data[n] + + # There are 4 classes + n_classes = 4 + + # Create 'im.shape[0] x im.shape[1] x n_classes' shaped array of arrays of arrays of zeros with type uint8 + one_hot = np.zeros((im.shape[0], im.shape[1], n_classes),dtype=np.uint8) + + # Iterate through sorted and unique arrays of given array turned image + for i, unique_value in enumerate(np.unique(im)): + # One hot each unique array with its numerical value of its entry in the dataset -> transform categorical into numerical dummy features + one_hot[:, :, i][im == unique_value] = 1 + # Append array to list + onehot_Y.append(one_hot) + + # Create an numpy array to hold all the array turned images + onehot_Y =np.array(onehot_Y) + #print (onehot_Y.dtype) + #print (np.unique(onehot_validate_Y)) + #print (onehot_Y.shape) + + return onehot_Y \ No newline at end of file diff --git a/recognition/s4581053 VQVAE OASIS/modules.py b/recognition/s4581053 VQVAE OASIS/modules.py new file mode 100644 index 0000000000..0ece4cbbfc --- /dev/null +++ b/recognition/s4581053 VQVAE OASIS/modules.py @@ -0,0 +1,197 @@ +""" +“modules.py" containing the source code of the components of your model. Each component must be +implementated as a class or a function + +Based on Neural Discrete Representation Learning by van der Oord et al https://arxiv.org/pdf/1711.00937.pdf +and the given example on https://keras.io/examples/generative/vq_vae/ +""" +import tensorflow as tf + +"""CREATE STRUCTURE OF VQ-VAR MODEL""" + +""" +Class Representation of the Vector Quantization laye + +Structure is: + 1. Reshape into (n,h,w,d) + 2. Calculate L2-normalized distance between the inputs and the embeddings. -> (n*h*w, d) + 3. Argmin -> find minimum distance between indices for each n*w*h vector + 4. Index from dictionary: index the closest vector from the dictionary for each of n*h*w vectors + 5. Reshape into original shape (n, h, w, d) + 6. Copy gradients from q -> x +""" +class VectorQ_layer(tf.keras.layers.Layer): + def __init__(self, embedding_num, latent_dimension, beta=0.25, **kwargs): + super().__init__(**kwargs) + self.embedding_num = embedding_num + self.latent_dimension = latent_dimension + self.beta = beta + + # Initialize the embeddings which we will quantize. + w_init = tf.random_uniform_initializer() + self.embeddings = tf.Variable(initial_value=w_init(shape=(self.latent_dimension, self.embedding_num), dtype="float32"),trainable=True,name="embeddings_vqvae",) + + # Forward Pass behaviour. Takes Tensor as input + def call(self, x): + # Calculate the input shape and store for later -> Shape of (n,h,w,d) + input_shape = tf.shape(x) + + # Flatten the inputs to keep the embedding dimension intact. + # Combine all dimensions into last one 'd' -> (n*h*w, d) + flatten = tf.reshape(x, [-1, self.latent_dimension]) + + # Get code indices + # Calculate L2-normalized distance between the inputs and the embeddings. + # For each n*h*w vectors, we calculate the distance from each of k vectors of embedding dictionaty to obtain matrix of shape (n*h*w, k) + similarity = tf.matmul(flatten, self.embeddings) + distances = (tf.reduce_sum(flatten ** 2, axis=1, keepdims=True) + tf.reduce_sum(self.embeddings ** 2, axis=0) - 2 * similarity) + + # For each n*h*w vectors, find the indices of closest k vector from dictionary; find minimum distance. + encoded_indices = tf.argmin(distances, axis=1) + + # Turn the indices into a one hot encoded vectors; index the closest vector from the dictionary for each n*h*w vector + encodings = tf.one_hot(encoded_indices, self.embedding_num) + quantized = tf.matmul(encodings, self.embeddings, transpose_b=True) + + # Reshape the quantized values back to its original input shape -> (n,h,w,d) + quantized = tf.reshape(quantized, input_shape) + + """ LOSS CALCULATIONS """ + """ + COMMITMENT LOSS + Since volume of embedding spcae is dimensionless, it may grow arbitarily if embedding ei does not + train as fast as encoder parameters. Thus add a commitment loss to make sure encoder commits to an embedding + CODE BOOK LOSS + Gradients bypass embedding, so we use a dictionary learningn algorithm which uses l2 error to + move embedding vectors ei towards encoder output + + tf.stop_gradient -> no gradient flows through + """ + commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2) + codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2) + self.add_loss(self.beta * commitment_loss + codebook_loss) + # Straight-through estimator. + # Unable to back propragate as gradient wont flow through argmin. Hence copy gradient from qunatised to x + # During backpropagation, (quantized -x) wont be included in computation anf the gradient obtained will be copied for inputs + quantized = x + tf.stop_gradient(quantized - x) + + return quantized + +# Represents the VAE Structure +class VAE: + def __init__(self, embedding_num, latent_dimension, beta=0.25): + self.embedding_num = embedding_num + self.latent_dimension = latent_dimension + self.beta=beta + """ + Returns layered model for encoder architecture built from convolutional layers. + + activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. + e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from + """ + # Encoder Component + def encoder_component(self): + #2D Convolutional Layers + # filters -> dimesion of output space + # kernal_size -> convolution window size + # activation -> activation func used + # relu -> + # strides -> spaces convolution window moves vertically and horizontally + # padding -> "same" pads with zeros to maintain output size same as input size + inputs = tf.keras.Input(shape=(256, 256, 1)) + + layer = tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(layer) + + outputs = tf.keras.layers.Conv2D(self.latent_dimension, 1, padding="same")(layer) + return tf.keras.Model(inputs, outputs, name="encoder") + + # Returns the vq Layer + def vq_layer(self): + return VectorQ_layer(self.embedding_num, self.latent_dimension, self.beta, name="vector_quantizer") + + """ + Returns the model for decoder architecture built from tranposed convolutional layers. + + activations: ReLU advised as other activations are not optimal for encoder/decoder quantization architecture. + e.g. Leaky ReLU activated models are difficult to train -> cause sporadic loss spikes that model struggles to recover from + """ + # Decoder Component + def decoder_component(self): + inputs = tf.keras.Input(shape=self.encoder_component().output.shape[1:]) + #2D Convolutional Transpose Layers + # filters -> dimesion of output space + # kernal_size -> convolution window size + # activation -> activation func used + # relu -> + # strides -> spaces convolution window moves vertically and horizontally + # padding -> "same" pads with zeros to maintain output size same as input size + layer = tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(inputs) + layer = tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(layer) + outputs = tf.keras.layers.Conv2DTranspose(1, 3, padding="same")(layer) + return tf.keras.Model(inputs, outputs, name="decoder") + + # Build Model + def build_model(self): + vq_layer = self.vq_layer() + encoder = self.encoder_component() + decoder = self.decoder_component() + + inputs = tf.keras.Input(shape=(256, 256, 1)) + encoder_outputs = encoder(inputs) + quantized_latents = vq_layer(encoder_outputs) + reconstructions = decoder(quantized_latents) + model = tf.keras.Model(inputs, reconstructions, name="vq_vae") + model.summary() + return model + +# Create a model instance and sets training paramters +class VQVAETRAINER(tf.keras.models.Model): + def __init__(self, variance, latent_dimension=32, embeddings_num=128, **kwargs): + + super(VQVAETRAINER, self).__init__(**kwargs) + self.latent_dimension = latent_dimension + self.embeddings_num = embeddings_num + self.variance = variance + + VAE_model = VAE(self.embeddings_num, self.latent_dimension) + self.vqvae_model = VAE_model.build_model() + + + self.total_loss_tracker = tf.keras.metrics.Mean(name="total_loss") + self.reconstruction_loss_tracker = tf.keras.metrics.Mean(name="reconstruction_loss") + self.vq_loss_tracker = tf.keras.metrics.Mean(name="vq_loss") + + @property + def metrics(self): + # Model metrics -> returns losses (total loss, reconstruction loss and the vq_loss) + return [self.total_loss_tracker, self.reconstruction_loss_tracker, self.vq_loss_tracker] + + def train_step(self, x): + with tf.GradientTape() as tape: + # Outputs from the VQ-VAE. + reconstructions = self.vqvae_model(x) + + # Calculate the losses. + reconstruction_loss = (tf.reduce_mean((x - reconstructions) ** 2) / self.variance) + total_loss = reconstruction_loss + sum(self.vqvae_model.losses) + + # Backpropagation. + grads = tape.gradient(total_loss, self.vqvae_model.trainable_variables) + self.optimizer.apply_gradients(zip(grads, self.vqvae_model.trainable_variables)) + + # Loss tracking. + """CODEBOOK LOSS + COMMITMENT LOSS -> euclidean loss + encoder loss""" + self.total_loss_tracker.update_state(total_loss) + """RECONSTRUCTION ERROR (MSE) -> between input and reconstruction""" + self.reconstruction_loss_tracker.update_state(reconstruction_loss) + self.vq_loss_tracker.update_state(sum(self.vqvae_model.losses)) + + # Log results. + return { + "loss": self.total_loss_tracker.result(), + "reconstruction_loss": self.reconstruction_loss_tracker.result(), + "vqvae_loss": self.vq_loss_tracker.result(), + } + + diff --git a/recognition/s4581053 VQVAE OASIS/predict.py b/recognition/s4581053 VQVAE OASIS/predict.py new file mode 100644 index 0000000000..43d540db6e --- /dev/null +++ b/recognition/s4581053 VQVAE OASIS/predict.py @@ -0,0 +1,113 @@ +""" +“predict.py" showing example usage of your trained model. Print out any results and / or provide visualisations where applicable +""" +import numpy as np +import matplotlib.pyplot as plt +import modules as mod + +import warnings +warnings.filterwarnings("ignore", category=DeprecationWarning) +# Show how well program performs + + +""" MODEL AND TRAIN VQ-VAE """ + +""" RECONSTRUCTION RESULTS""" +# Plots the original image against the reconstructed one +def plot_comparision_original_to_reconstructed(original, reconstructed): + plt.figure(figsize = (10,12)) + plt.subplot(1, 2, 1) + plt.imshow(original.squeeze() + 0.5, cmap = 'gray') + plt.title("Original") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(reconstructed.squeeze() + 0.5, cmap = 'gray') + plt.title("Reconstructed") + plt.axis("off") + + plt.show() + +trained_model = mod.model.model +idx = np.random.choice(len(test_X), 10) +test_images = test_X[idx] +reconstructions_test = trained_model.predict(test_images) + +for test_image, reconstructed_image in zip(test_images, reconstructions_test): + plot_comparision_original_to_reconstructed(test_image, reconstructed_image) + +# Return the average pixel value for the image and the reconstruction +def calculate_mean(image, reconstructed_image): + image_pixel = 0 + reconstructed_pixel = 0 + + for row in range(256): + for col in range(256): + image_pixel += image[row][col] + reconstructed_pixel += reconstructed_image[row][col] + + image_pixel = image_pixel / (256**2) + reconstructed_pixel = reconstructed_pixel / (256**2) + + return image_pixel, reconstructed_image + +# Returns std dev for the pixel value of each image +def calculate_stddev(image, reconstructed_image, image_mean, reconstructed_image_mean): + + image_variance = 0 + reconstructed_image_variance = 0 + + for row in range(256): + for col in range(256): + image_variance += np.square(image[row][col] - image_mean) + reconstructed_image_variance += np.square(reconstructed_image[row][col] - reconstructed_image_mean) + + image_variance = np.sqrt(image_variance/256**2 - 1) + reconstructed_image_variance = np.sqrt(reconstructed_image_variance/256**2 - 1) + return image_variance, reconstructed_image_variance + +# Returns the covariance for both images +def calculate_covariance(image, reconstructed_image, image_mean, predicted_mean): + covariance_value = 0 + + for row in range(256): + for col in range(256): + covariance_value += (image[row][col] - image_mean)*(reconstructed_image[row][col] - predicted_mean) + + return covariance_value/256**256-1 + + +# Return the structural similarity between two images; measures the window x and y of common size. +# https://en.wikipedia.org/wiki/Structural_similarity +def structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance): + K1 = 0.01 # default value + K2 = 0.03 # default value + L = 255 # dynamic range of pixel value (2^bits per pixel -1) + C1 = (K1 * L)**2 + C2 = (K2 * L)**2 + C3 = C2 / 2 + + luminance_x_y = (2*mean_X*predicted_mean + C1)/(mean_X**2+predicted_mean**2+C1) + contrast_x_y = (2*stddev_X*predicted_stddev + C2)/(stddev_X**2+np. predicted_stddev**2+C2) + structure_x_y = (covariance+C3)/(stddev_X*predicted_stddev+C3) + return luminance_x_y * contrast_x_y * structure_x_y + +# Returns the structured similarity for the entire data set +def structural_similarity_mean(test_X, model): + structured_similarity_coef = 0 + + for i, data in enumerate(test_X): + # get reconstructed image + image_reconstruction = model.predict(data) + data = data[0,:,:,0] + image_reconstruction = image_reconstruction[0,:,:,0] + + # Calculate structured similarity and add to total + mean_X, predicted_mean = calculate_mean(data, image_reconstruction) + stddev_X, predicted_stddev = calculate_stddev(data, image_reconstruction, mean_X, predicted_mean) + covariance = calculate_covariance(data, image_reconstruction, mean_X, predicted_mean) + structured_similarity_coef += structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance) + + return structured_similarity_coef / len(test_X) + +print(structural_similarity_mean(test_X, trained_model)) \ No newline at end of file diff --git a/recognition/s4581053 VQVAE OASIS/train.py b/recognition/s4581053 VQVAE OASIS/train.py new file mode 100644 index 0000000000..de4984a1e3 --- /dev/null +++ b/recognition/s4581053 VQVAE OASIS/train.py @@ -0,0 +1,224 @@ +""" +“train.py" containing the source code for training, validating, testing and saving your model. The model +should be imported from “modules.py” and the data loader should be imported from “dataset.py”. Make +sure to plot the losses and metrics during training +""" +# %% +import dataset as data +import modules as mod +import matplotlib.pyplot as plt +import numpy as np +import tensorflow as tf + +# Download Data and then unzip +#download_oasis() +# %% + +""" PROCESS TRAINING DATA""" +# Load the training data from the Oasis Data set +train_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_train") + +# Check training image +#pyplot.imshow(train_X[2]) +#pyplot.show() + +# Pre process training data set +train_X = data.process_training(train_X) +train_x_var = np.var(train_X) +# Load the validaton data from the oasis Data set +#validate_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_validate") + +# Pre process validation data set +#validate_X = data.process_training(validate_X) + +# Load the test data from the oasis Data Set +test_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_test") + +# Pre process test data set +test_X = data.process_training(test_X) + +""" PROCESS TRAINING LABELS DATA """ +# Load the segmented training labels data from the Oasis Data set +train_Y = data.load_labels ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_train") +# Pre process training labels data +train_Y = data.process_labels(train_Y) + +# Load the segmented validation labels data from the Oasis Data set +#validate_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_validate") +# Pre process validation labels data +#validate_Y = data.process_labels(validate_Y) + +# Load the segmented test labels data from the Oasis Data set +test_Y = data.load_labels("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_seg_test") +# Pre process test labels data +test_Y = data.process_labels(test_Y) +#%% +""" MODEL AND TRAIN VQ-VAE """ +# Create a instance of the VQ-VAE model +latent_dimensions = 16 #dimensionality if each latent embedding vector +embeddings_number = 128 #number of embeddings in the codebook + +model = mod.VQVAETRAINER(train_x_var, latent_dimensions, embeddings_number) + +""" +Optimiser -> learning rate +'adam' adjusts learning rate whilst training; learning rate deterines how fast optimal weights are calculated. Smaller +Learning rate = more wights but takes longer to compute +""" +# Create Model +model.compile (optimizer='adam') + +# Train model +history = model.fit(train_X, epochs=15, batch_size=128) +print("disaster!!!!") + +#%% +# Plot Loss +plt.plot(history.history['reconstruction_loss'], label='Reconstruction Loss') +plt.title('VQVAE Loss') +plt.xlabel('Epoch') +plt.ylabel('Loss') +plt.legend(['Train', 'Validation'], loc='upper left') +plt.show() + +#%% + +""" MODEL AND TRAIN VQ-VAE """ + +""" RECONSTRUCTION RESULTS""" +# Return the average pixel value for the image and the reconstruction +def calculate_mean(image, reconstructed_image): + image_pixel = 0 + reconstructed_pixel = 0 + + for row in range(256): + for col in range(256): + image_pixel += image[row][col] + reconstructed_pixel += reconstructed_image[row][col] + + image_pixel = image_pixel / (256**2) + reconstructed_pixel = reconstructed_pixel / (256**2) + + return image_pixel, reconstructed_image + +# Returns std dev for the pixel value of each image +def calculate_stddev(image, reconstructed_image, image_mean, reconstructed_image_mean): + + image_variance = 0 + reconstructed_image_variance = 0 + + for row in range(256): + for col in range(256): + image_variance += np.square(image[row][col] - image_mean) + reconstructed_image_variance += np.square(reconstructed_image[row][col] - reconstructed_image_mean) + + image_variance = np.sqrt(image_variance/(256**2 - 1)) + reconstructed_image_variance = np.sqrt(reconstructed_image_variance/(256**2 - 1)) + return image_variance, reconstructed_image_variance + +# Returns the covariance for both images +def calculate_covariance(image, reconstructed_image, image_mean, predicted_mean): + covariance_value = 0 + + for row in range(256): + for col in range(256): + covariance_value += (image[row][col] - image_mean)*(reconstructed_image[row][col] - predicted_mean) + + return covariance_value/(256**256-1) + + +# Return the structural similarity between two images; measures the window x and y of common size. +# https://en.wikipedia.org/wiki/Structural_similarity +def structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance): + K1 = 0.01 # default value + K2 = 0.03 # default value + L = 255 # dynamic range of pixel value (2^bits per pixel -1) + C1 = (K1 * L)**2 + C2 = (K2 * L)**2 + C3 = C2 / 2 + + luminance_x_y = (2*mean_X*predicted_mean + C1)/(mean_X**2+predicted_mean**2+C1) + contrast_x_y = (2*stddev_X*predicted_stddev + C2)/(stddev_X**2+np. predicted_stddev**2+C2) + structure_x_y = (covariance+C3)/(stddev_X*predicted_stddev+C3) + return luminance_x_y * contrast_x_y * structure_x_y + +# Plots the original image against the reconstructed one with their Structured similarity rating +def plot_comparision_original_to_reconstructed(original, reconstructed, ssim): + plt.suptitle("Structured Similiarity Rating: %.2f" %ssim) + + #plt.figure(figsize = (10,12)) + plt.subplot(1, 2, 1) + plt.imshow(original.squeeze() + 0.5, cmap = 'gray') + plt.title("Original") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(reconstructed.squeeze() + 0.5, cmap = 'gray') + plt.title("Reconstructed") + plt.axis("off") + + plt.show() + +trained_model = model.vqvae_model + +# Select 5 random Test images +idx = np.random.choice(len(test_X), 5) +test_images = test_X[idx] +reconstructions_test = trained_model.predict(test_images) + +# Perform Predictions on the test images +for test_image, reconstructed_image in zip(test_images, reconstructions_test): + """mean, mean_r = calculate_mean(test_image, reconstructed_image) + stddev, stddev_r = calculate_stddev(test_image,reconstructed_image, mean, mean_r) + cov = calculate_covariance(test_image, reconstructed_image, mean, mean_r) + structured_similiarity_rating = structural_similarity(mean, mean_r, stddev, stddev_r, cov) + """ + structured_similiarity_rating = tf.image.ssim(test_image, reconstructed_image, max_val=1.0) + plot_comparision_original_to_reconstructed(test_image, reconstructed_image, structured_similiarity_rating) + + + +#%% + +# Returns the structured similarity for the entire data set +def structural_similarity_mean(test_X, model): + structured_similarity_coef = 0 + + for i, data in enumerate(test_X): + # get reconstructed image + image_reconstruction = model.predict(data) + data = data[0,:,:,0] + image_reconstruction = image_reconstruction[0,:,:,0] + + # Calculate structured similarity and add to total + mean_X, predicted_mean = calculate_mean(data, image_reconstruction) + stddev_X, predicted_stddev = calculate_stddev(data, image_reconstruction, mean_X, predicted_mean) + covariance = calculate_covariance(data, image_reconstruction, mean_X, predicted_mean) + structured_similarity_coef += structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance) + + return structured_similarity_coef / len(test_X) + +# Calculate the mean structural Similarity for the reconstructed images +mean_structured_similiarity = structural_similarity_mean(test_X, trained_model) +print(mean_structured_similiarity) + +# %% +encoder = model.vqvae_model.get_layer("encoder") +quantizer = model.vqvae_model.get_layer("vector_quantizer") + +encoded_outputs = encoder.predict(test_images) +flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1]) +codebook_indices = quantizer.get_code_indices(flat_enc_outputs) +codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1]) + +for i in range(len(test_images)): + plt.subplot(1, 2, 1) + plt.imshow(test_images[i].squeeze() + 0.5) + plt.title("Original") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(codebook_indices[i]) + plt.title("Code") + plt.axis("off") + plt.show() From 6899684ab9806e84b9c4109e35d7fb43f657f971 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 21:38:40 +1000 Subject: [PATCH 26/29] Trying to add images --- .../Images/Reconstructed Image 1.png | Bin 0 -> 37545 bytes .../Images/Reconstructed Image 2.png | Bin 0 -> 34045 bytes .../Images/Reconstructed Image 3.png | Bin 0 -> 35634 bytes .../Images/Reconstructed Image 4.png | Bin 0 -> 32611 bytes .../Images/Reconstructed Image 5.png | Bin 0 -> 34694 bytes 5 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 1.png create mode 100644 recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 2.png create mode 100644 recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 3.png create mode 100644 recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 4.png create mode 100644 recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 5.png diff --git a/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 1.png b/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 1.png new file mode 100644 index 0000000000000000000000000000000000000000..2e7d10022f66d118ab3ea27df62a9922e93d89fb GIT binary patch literal 37545 zcmdqHby$?&7d`q8UD7ci4Fb|b2-2NW!iSP{Xrz%&5d|4aBt(z~5fDMTK^Q<9q?JyQ zmacpF-rsZoxqsb%ug^HnGtRto-hK92d#$y1w63=59RfN62!igYt0_H(AecFLy@H1W zo+TwtTEIUKy_Jo<_1)~eeXTuhAuVff_h)Y2&m5mJ``CJVIl8$D^NaEe@iIGjd%Jr{ z2ne|RKOf+C^RyS>e!!#*E`sl_X5s}wgcP@bVNiMo9D?3bt1HPH_+@NgIli-*e9jmZ z!ug8+>0nfk|0kA`FgoZ(lDrz0nzUZ7k|}wk<7k=vlbXNl#evnocUE&|T}${*zS&Oq z>DOM__?|fT(Ya_;swcAEd!P=(f+bMi|0+lN-zSCt4F4N^Peyx~Ks&b0L^eOjKtq<8 z_+PUA_;2UcAN_TlJJKg}UdLz0wXc!9eJ|MJ#FwJjUbB74+X=gU-IK>2H~PGjOqKqj zlE=F*|9gw&O`bP{2Y8QIkG^0l3cTkTwZvtvO7GE`s7R^0DYNtx<=+lJT;JVq#76c{ z2VU;nMW-=2TwQ%br&+pONl!|YUoZ|`WFXegn}bdSmF8>N;tRz!COOb)_OXa^II0Xhv_^d_;JuaCeurP^I!y@q$I z($+-4`!uNH`P{km#}nriTj?)5i*bmx*E_3b=(I63yrdeB(scct>EyaF=w|*d@;baM zlwtRRxf6cKi9-0sB9xK+_9(;%|Mo}ylOCo%_+huf2e9gS$JmH`#99_TT3QMP?)4rz zjWlc?wpR`*!5(3rUO+y_9&v#R4kQXF#7EqqAE7X2Xn{3MFf0plKZIE>hxfvM$=-Wv zVhFWyLYFA8B9aXWl1C^+D(rF>3L$6{4W|&Y5*1`$w z5tIgVf7aF>P0?4Gku^aQc7oC4{m_T3?LnACI;POr`Q<$3sTkA(W#cWphMJ&-p_1Ke zk+9UCH>o4^o%f-;&^2)w6Sgi)*Cq3&+Hh793Q-F_v4>qcZS9UhywD936a!g23%*{5 z%rFyh_LQNbpC_U4whvGxbS)ZI{-ZW5nY>c)t`2ni8hQZ*jFOM zzRD1if#`$ciLP}=evDwCYoq(g(7!C`^d1!Y#s!7Y$#EPB3Mr3+BR|6Sux3WCcQM3CtcV5zuc{}i zZ{jEBZ}OjSGwpiLWI=mOFeNC6dLS}k<6z8z-%d|telgT>O9u@fwM<_26+ymINQw5Z`Nu{N1FW^@T9YHP4Hx#_Pg1#cgu*ajcUG+uv8v8Cecw-AWmMSB`Po3X>{I7lAIN`zn1*Pg+xhsN0ZEWM}t9 zXc{!)ciCeRwjwl%ATMdhTSF%UUQ0T#P8OjMiy^xtT7(;fW%mDgpt>m;%r?x0&~LmY zP9_tfDipRc#wk=4yd|@&OmY;#&b_(#L;=0sF!qtEFfL&zguLzM6^HJrRM@;LA5q+2 zefk3Dq?F2Y$G%FtVI@(pbww7HRq>krxzU15`5Rkc(XBPQx;R<{-?O9F3ca87Rr3DZ zXtXu41$Q-AS$YCRK{c#99ZLkrsI{uQzL1 z4Y(9oJU?{fCX#F}5S=%D(i+DRyy$W?;DS2yNA~j}9}#Q)3ERG)YTihOh^p&0??XIg zzDmi_+FD~q|1>N3ffS1*3)>g;h|=9$weO;r)- z{@nUcmzCSYz8;|`OHt&io7@PrnIQTS9O*|}Hzp#9!(ygd51{^k)`sS=Jr5*m2<}C! zrAo8v3q7JkUZ*4MhT#+<=q8=a8*jq!O|Pqb1lk$pFOVMeMBuCzy7uV{yn}o+1wZVa zs2IYmdxkpmBhPey3hn)MA2>z`s#{OKNcTql^b)c2Dr1cF)S)vMsNr+EY(U~fA?Hc2HcK6SrCC(oEG1K|K zGegU3g)MqtSSeKZ=6BcogHTxA&xT|BW3R*%+8YlMn!iNpgRS%71eVXb#$+Z}FWjy@2YZ-($Rtd)D_W5kGg&H)n6c{(a}C$6%7=5ZI18M|3+)V>tc95kfLEw0g3X|7 zv#jDR`EYVG6^&T?09}{anzZ`;4E9)=>ymGXlk$9u+RuqTzmCt_(G)QQ(#W z#nI=At4#vlZpB%3&m4vRv z!^WU%Ksf>ZY_&z5{Ri;1O{Z4o;=kt~!D8#3QPV=`%SVJ2m%3YNRsWToW+BkLbjY`W zfU0M%1A;mki=ajJrz6%ZeIH-ONnJiQnUDw*$Sn@CM)`v>tl|8u$qsb}s}V&jfnRVy znTVqEV-Z<;-PbP=YlR37@SCLwI}L_!Aj_xWhlX2CH?l~SBZH(p>g+RuW0tNc%PIK! z>DE?xHR)L>_%saJUx`=?6-1X}o_3`ioxW}1j7^9|d`G8+PNa*0x~+*$vxcNX3!h)_ zI#=I#q0^kpE-rErYwsj>N$w)&o@K6EVhWNhP|?4U^3}4Ij26Y??g0+#< z%ACW)<}pZNd(I7Ih&4b?p*>F6qul6}qLaFvZxAEy9t(8y4YBrg>rf)>hEa~b?F2A9 zv5QtdRqX~i(hds3YSTjYi-Zkfp5j8e&^1ojjSY&{Mf8RWl7UiTDA*}Ynb#$ASm;ZR zs5C5hKn1JJ|8zr_WYDfd^+n>u`i&)YBNj#m2}9RZka5aU_{4q@?=9IsDxSEZC~L{Y z*rU0q>k1`Ukaies=L&1Z&W44p$5##Bfa(A)!~ikj3KA?hL)VI1jLQy#^Q|#!P4(Cd zSHWgC#yxqTyqBT3DYoMh^iP}_aKp-cuq1_=>-<3p>?^ zTEORMOp$wDR=d^UHo-xOkGR`NQ}R+c{aIU`R8U4OJSkz`m?`v6CpLD%$QH-J|Fvib)s zcJvRD^9<1KL|c zVi8U`vG=1uL~+BWz-2BQa#6ml@tOoHrS`MiA^zT%UV=^NBx#_mrXYC9$3ltmJYV0*aeG%=9WuJIw9 zynQn}yVPW=V;xQ7_80olwbAq4V#E2u{o^zHoRc7F!vikv-{qK7@?+1)9Q{$U7z;dQ z@c}H8q9OwVA--_Azl;}$ZV*r#O|~X1B3hp*zUw?ZSjdkwypc|~I|V6P!ON2ya+zX# z6|`mi(>t!lL|NY&2B8okBHe2mOp}&?ObQsMh+;1Yz)mBjS*ibewma*5LaIZv z(AtGK$#7G5t4-sLHDJ8*Ue^Pkx8PSd9;vF@o2Dtr%43PgXm~Bc?$c^h zdIhiZownWFVoFt0WLnckD6|KFXIBJGlve$pqw@yz37+fFPGZ8tPjGU;*!rs&5^SX)m-MqL1-4KQeVEPe;Wl~(< zg)DIQ_(V2e09`SjyME0QmilBp6kS=WQyX{FO}%|AEyAZ~oRSPk3M3oP@!XTKtuJ_P zIWfj@lqK$0eATCDqIAmkZML1bTOR_onyA!pE5c=6QTx^6-$LK45rnKe1g_o|bYG8m zJI#IF!``N)xsG#}`KxJnZXFKBrD4MVQ{IXHUsi|4&<{B@{B@xFiX0%ZZ0cD17gq}H zPf{Wc2WWnm*9K2)!DY8j%(`=~)c-hhK8y=48+ZCNx#eDO_G#RSL2@o>>^X2vaQRdV zykEb5%?FutRl^a~2pF*topl>0|{+$s6I98C%)2yX$jt4doXup+8Rw6WzsbU7Yy*Ix~Lbl<@}-s}8mw`2S@c!^YQ~VSRDtD%*X& ziobhw*}S6HT#H*ae9};BftSAI4hY|9W$LDb?1VE0_|0ufiNDVCI+iE1*Vgr7-HB^a zZ00uQ2?&jHb#LA6i}!8a1?A0g()}kB+h$iUQ4jyU_+JzTa@)&mD)lWyZm7Ga~WJs9R6k)o#d)@5Nxq?A8#I%|(*8TOiAN$jrQ(8T-8i^!cTnOo}V-C3;-4^KB|1|nSXTtSH5QVpT8_cEfelo#Liap zuMbyK9y?nhvb)};OcLK0Z`WHe*118YVRLgKUtP;odo$;j_EY}&NPd3SV|S~$YWKCQ z&#mLfRd1y9y!Drj*vqQA2e<{9xC_0y~+8r+snK>yji`#16F1pQ3-IF!(Q4pFK zch=2b?{Yhb*0@}772)vRqN|9%TOn}_-nSboKYw>GDiXE7c7XtaT# zPx5D`;TY>?PScOrRk+0{^n7zm{?}|o0Levjt#+$quVS9JG$RRJEY6=J2^7M;@i!D^ zT80B1(Bh)zvOac;;~ybZ8MPnw%n)9<%x_Cx)wHHM2TC!X^=}>3RBx})f1cce(uc|( z7yt6QT?l*g>yXtP-3G>ZT>ezTqW}i*Y*9{wh8vXp2ZhaUzXFfGRfU4bWCfpR>5#hR zldJR>dQE}vDX#u~h?q(Ip6Hs?6&EDIu=Vdoud23oYx3XP`Xwzr{a-)6tN-D`S-|~F zi|+Ga>MN|9P_KMa z(xQ<`U@MQ~BD4P_;7ee=PtlP0gD~@dGnz2>pn5ZYIC9}A03j|OrOWkGe{(coC9P(R zER!<-BU4jIwQ2Bdvv*_dh2G~YM#-pS66t2SNCG(=a$Kxrj`*!z$U)!dnXHHiR(y9v zP7VtZ5s{UNlok$2Bws7duA*dS!;TVFzBLOz9jWKWZsq|b3AaaZQejuQj&?f_KwSvds(X)PtAwK0+K1rC`cMmCO|wYqhkKRUWlwIo zcsV^6YE@&~i+ugv`jNIaD|HMDT`VhkwAHg`M2qh55eLfmk8mc-Emc_)+t6s7(M^%j z&8pHSAI?l$lU(}n-}0*bX>kv@gC5hNqL`n9Ge$;)Q_!k3=34vY9WUfstvasa18$c0k61Ic^9;DHy}T$=YkN;RSrJR1)406%OSh+!b@Rvb9yF_Y#?n%AdAx(I4=4*64`q^q2L%X5g5S<}TPe zJezT?%)<9IegmH>AmQyJmyMX*QU)hpicyu9S`oxWoSyoNkDnjB>nWL-+gxYtT`{Fz zm|Tu7sj^AQxDD!03#s&ce8LkBZelD9kn{=>4y5|L)QM{BAXoHwsX@sB&t19sSuc>4 zBMw&f_UpqmomVJWaFZE-mRW(OK$A+XfygDQ$HUc?SXfy27UH^>Z-X&+`0-?6qU_1( zUwhe@rxjcR;vlqOPu$(V@-2mt#9>CNC6tI6Iis)0wE(if$Gn~P#4`yL6%}tWl9ACh z4+}u#Os=3`>*YrPUqkDP;AX16HaEw1bt&@-2(Tsf;+c+!gsapCsib#Td36N9HYx3V zbUicPxt8~i_pxvLl+GUq(2u=c_B#n^hBGmBZ)|)zWd~biXJ4b!TFhk#;lV)<{9UQ6>6y^NKREU>FSVHCIC?&UDGl$IwJ{b z_D+}M*~p_I$n{rUczMWCNnPN{7v>?2f)_SI_XYHLdL9>JrS?TeN8{5bD2-lzh{NM{ ze2nq_k;*3wNGM_24dAwe$Qyu)9!)S(oBkGK!=%Q;faXfU&Zh`a!=hhhA84_Ys}+p_ zG#0A&mbzD7R*ZQyw;h!#bzsdkmsdy=kTc+Vz&v{>EliX^hKJ$;;Z93W@7NqGLP$#T zWB?tZTx`&>JzHDR(h_IDeUDJEJBy+IG1;F4RZ~O2osu=MS3ONS7@1xsU|7RBB4dK8 zO}qB?zGj~R=mxBTIayEom^<5Z&m0`SMEzkjDZbQ2%9_OzraSL{J>HM$Mna; zVk`T8?0>>IQozpgQhz@?T;KmUrjuvJzuZM**~xUh$ds$g)wb%%0w-e48UnlB^JZwu zjN9m-`j(T>(gKP=W7{{(lP-OWD-XVE(y}M1d(@H1Lv4U4fIqst)CdU)f^2mGL8lj7 z$(P*U`|CD91uHEpYllbl_Nq6XH1Br(`bD3TntFS$jlR18rZqCkRqA~1eZJukm0u~z zbS5dGf4+@D87sP|X{1RVGvR`K;Kvkcz(ohcd&e2Wh$ z6eB{xOiGiH3b1jf|JqbJHC$RVLdy)l1DIhpyE1cN5psGR7S%Jie#nxnkpnUiiNuUn z!blk>isz+{IbeKF)EF{Ie?ju6`RGD>+vTt4vt<5EyYuk3Y?&WFqQQbMLuA9&-pGiG zf`sC~MP;KeX6-!XO)sjeMFHZoFvjOulp_I%)teE6?ZU*wtZX1AB2o+U11JBwZ`%h6 zP&8al&H)GK6=oxsvOZJL)L9TCAUjX#?tpT?epo&J5+u-pERykkReRT~r05(QdL{4? z-<@B#Gz^@f!7SPMvon9%3{VTe1Ed7bXBDht`5ftSiLD*$JNLF~j}^*$$BKi?cc)~F zD(}MKq*!+eB419VgOkwNiKU{d3OLy<{TdhmDU0vl4L}O47VNIe$Iw@Ch_2KhcfwF7 zx?JoU@0TMoc)Dwb)+w%i0|TGXH(e3%@eYhQt$ykjfoU~cr230Ke28B=8w)rMxD?o7 z7C@3f09HL|K?PXj81gTF&Uzl~cEz~G(fxSy@uE{pqJy`O5B1^UAvl@u!bPQ}W5MkJ zS{t;fY1tvxvVR9k*!v0sDvy__rbJJKdPom)4phe&Hk%BVVWk%r4K zfqy|VfcU<2wzW)i2b3IaL4g2>5`U`ZCCf!(Gxd&29?QF)!9ik;0&+mLJ1<^+w+7hK zKQNGESYiz_)pvKU>-=1za=Mh4tBSw+TUpZI6QB-*Whv1W`VA<|pO_yDulxInI5;^G z!om@pAN?@E$|xu(0I#a^TqTgXm>ya>-kychH&U&Ijsnb^njumm9A$ic`L~jv4NIn` zr{(L-QlfCccDjrIdL`GlTFJ#(XgP}CMfBj+3y_~|qE)vh9wwU@nL(`g9HDVtG2clu(4al4Z;hEvYxgV`6f7N^8f{bQkW4q-cYinzQva<2T#rOA9L4o+y z97t2^HV+P_W$<{GE9 zoB^pP6;O=RJ>?vl`zfsLP|3eOfNQK#l zA_`v*8%aJaZLgXAohVF=C>ZkFJOscH4J#IQt-BMCe}8YqC;Sgc;cb7vysc|3y(r4Y4_5^FNO}6p*A*Hw<-VreS4CQ%>A{Ag99Pg-wly&A%`CT zD^5x6p1~`1Y5S?B!Vc^J`Gl1gIElK!qmTXThtW@MymzL&>AsiDb8RrHmxU?^xJMu}K+Sp1-avBu=TOqsn0EON=3wo@kYQ%f!Cv_m@ zSx=I0wzbj@I&83IK6Rzkve29T zMGBCTPb;EK%U&t5!pbLkt`kXu7BFvd=40@&3E&-zGu6%@Uk>JuF8;2FZ~xQY&U#LV znHl2ymhHcWV}&IMV4cVDUlXM~J*ndj-LLR;&_H%|KFUqAX4NK55&c}B0Ygg`g#1bF6_}SD`gIbi7t7~Rb%et zPL^(=06;JZ=1{S*>O!wn{w5_Qffk3$emyG8iV_Xj8`yxzkJ(8t&Ad30fCt&JvcmpA zOl<$4Pk=3JXc@rz_t&zQ){k^_@(fEB0WK(rPHp0_K0`9bMZF49L^m+hhE8m!eX+h9 z({=08fc!?q#oam_pak^Vkl$7TAc_Q}^mrAE)XMcd$hRG_P09@KWoGOOl1JmkDn|m1 ziS3Pwk6->f{q3>nv!Eb)fbT-+s<)%cXO6cYTXxEQBxC8U$nv?u%somT~XQ$HL$cpA>zJfzz4jrER0Z&c% z;|Gb*YvjdtLbRpj<==NtVv43#J=JI3#d)Rf;u4KTzkc;bj=Qr|cK)B-k@R z!})#?O!5lvh~x zgBk7r{)qrFFflRlwWfxnV%dPJ%0P`}bHB6V(Rwm}L%t^B538tBHYyF4UO?{%8yFmX z8Wd#4`UGSExH(8J*V9iR^8vQiDO;J;gn71N?1(%lYA!!9Hl+u7MqdIdUimd4&o1gN z)xDB5gmBX2Q(hVQj@7RVcffxZ6U5Nl!-mswwBCyJh) zz7uGBaRsgS_ga=WICMEy zj2#E=V7V%qeDL+*M1a;O?UQMj7aj`wn}GEK)@L0MK$ob7HT{e_T1ml)eWUBn)bj@j z?yyfa2HdbP%Av{e+E3oJ;n^1ow!Ozn6{Ytoo0?FPzB_8?l28S~L$QG5>y=zN{#5O+ z!0yaZhN=p@Sz6fKCJE zEdC_ZrKY<29<7`RNxeAm!>xjY8GtIhEe#;P*=CO^n{alM$cRLCVNqV#4#Xnb{eDvX z#UpYQ48oVO5g~SVI6%Rbx14|GeIWZ!QI>so6W|BHLxwF;((R8n-7^5UL&O_g2WjFw z`YLU#=|Kto^(*7$!^%1_vQKe3xXdY(`UAa*PTlaYAG*4po+`zkGj4Bg^e=eUo4tLZ zS&|?suO?fDi9c15nKVI=(k>}dfyF-&mGm1(h%& z=5F(I5Fn)CUbV6vA*$NA|moI0x zQ_TEdVT1x22v6Cluh;Mq+flL5A_+Z`_G*`ZU4FgUuV9lTzZC3B8{HVh|E7QGC|c$yuods+5(=pzW3aehh!6- zMCg)JHFrP9DP>Piz}fH+seSlvkFa~%b?72QUBwF%0j3L(s7AnCBadsaszKfY)JlF5q{(2O6+JRPDl z{+^iu+B!0dou6#dVpYCng@x@VRog!oE!RHzv>W~dm4)~S3dOA-4{Qe);jV3s;t$^w z2YF0`i8$HJRr}tyMLMKshp}YkQ+A`#`8{`rTzory|0q_QDzhdg_=lVw*k(uv8nz9A z9iJ&3aow$-tq6&mUDbW(%DK~D$qOvg;<%%2x6r$jU(CPv^xz$wNdW@3SKEJPCw}KF z4@7vHJCo<0a8FNfipIVBj$%?$D8Lj- zf{STl6UvcFQb|%(93Oi!-%C;lJEFc|#lcXFog1Pph-TF9y|@Yi1TA_AhuPXqs`iVH z1YlCP0bHUce!UOWT?9bQj+4!&o9ELW@_z9AN%pyz-DL$OVqc(8foTBR)*U6~EpJv= zZK1~Tu74kGh(g&FYz*1N0`lKxCxF@A36uE0Znc45^|@Cv@5a7*=JZ~>$YU9O2b@5B z7EI=ALhj&K-#z9wNQGThrhy#&v*`)T!C3#6{nqYZKRDh91cJrO9f$*&ES+OROlA!; zBUxk=ZF?Iqx`6wWkEhB?_UO^01)#;4fQkeikD~{30kS(7vA;DK{sLmI4|VoI`-`eO6<0OQ2P9>r&&MJvBr<aE*@3d>)0VM_u-jKR ztn(2ZF8OVdl_B4VYN~c&e`>4f@#eG?qbnpI#pwJ%_aNyVA4F@M%32hPeF40rmuO zl|(1<U^G`!-`U zh&)wIAxnNTNLR$oTOOG{?M<*Jys&-0OeD4LjgtVMk$$e$K~24x_JcO|AwoIU`*1}~ zTb-mt4iZJ%v@iXC5jzxJr&a@r9$iE0hcAvFj+_ytg>3pX-X1S?IAC2I*m@KYVjE0S z0I%w6RnWj35;8rMG^l!!1vshuGxu{6|l6OnIa|x@At556YWH9ToD~=H3{sRq3-r z=Va1-p^B&&?C`Kc)d2i;E;iDQ-QgV9j9DzE^YvX8G@0XR8)p(n8E40oFM8qy~7Ku1)hn^ z7zY?tvyQZnNs$mHxw*P(aL_7FJA5YlO`g0L2+Vmu+k<9*sTupA zr2mDt(Ufsw4as)`6Mh`s_h()1FqXyR+qZ9ZUh8pE^CY3C-~Hw<=5F)8_v0wVDLeP? zt5>Map}*T_AM9k=h^p#KyGcVG^!%k5@fmF3NpTlDot$H=OTimU|NaWhdM_<(|EM=C z!GCOPN86`8vq>Dg7i~d%fUXL(s-=Dzyek^G0;94iTJPIn(y|wndb?1h%T=|l=|Iwo z#k_ez#8S`&x*6`9qxU@MW|l`U7%-3P-B;9%eA{+UmVpI%>y%EmUO(5P?Cb06X!e}c zABcX7o2M|5L2L=9;?cWbk7UK*W8>j!}yVt4kDVeo;qBtcOaqRD8tRuue^kyZYM|&^GcMBJ*uv5yC__;Z}0@R zH$ZQo1ri4A5Wv6&>KJ%I&2<4s+@MdN_MFq?{0{9{sIQaGsxa`GBxT{qm)xwP`N{mq z#^Q{+x>2{bNDH1^?RO9rZZ}pkFV%yGTDb`ZuGinsi{&2q;=Rd;7>OItB)+0w9cmUxpA8Vs$Jv4S$=r zdUmS#nlz&RO!1|>7wWwn(e=7!{)w-*HzhzyS52x34$NVC123=J=u?PjTBv&&{-wxd z6Tz&<+lW?dN%dAQHXtv8CrV{u>nnQIXRs>(A7-)N;RRKUGB_awnQdxB$H3fF6TEI~ z3(My#FE2-e#`djKeb=oi9I)^bqw*!(kOka5;7%=rE=G^tw*#}|4~-#aHsvzD3aP(Y z;SPU1g}Z>=O~}^Ge;;~GSmpic@}D+t0MXX9Qy1`k6V%whwzTj!esq^|bjA-GexCo= z0e`w2G;_FrXxp3kc)s&=o$|K{?2=Pa+Tdc7@OnNY5KhcSqt>|Z|L0RK*v|3s|3HSR zYP|hpY{6=+HM@G~zc_{&4tdsRP@cXAVhtHD{pwbRZZBP-Vzj&Cv76<(d_by#_Uyv! z{B@1gvjF0Hjm=o*YE`51mow}+?nit;7Tm1@umf749%O-GV#z?Zqk#_%%mqQv9P^Wk z+u!%NFP=w5Bs}VOC)UQ>U%CaFBgAZue)+ur9+kkD?DKH%gFx5XyC5lbJwZzsup%N> zR^2`)JA{WbjYONsP0y|{do2Dvpvl3Z@B>D)prmBXlUmny!11iD7uB3$@S|7kPBm!-t@PfXNd<&~$tM zo))AtYqEy5i;LEA-d)h3#|dxRcSJ9ul#le4rydNDt@~-6pFWtDacHFeq!8{rZEE1tNZ(rVTbHt) zk$_-Spab6__8w|S^qF@2o1V&yM4=XPi{$ULPY(|#48Lpe!TiprzMA0t0IgPqzb-7# z(CWs7dF=6&$w)QMyO$>x5C~x2f&LQYLr5lkm53ef&(`n)ZluGlIpFW8AJs(Vh=<8zk@DmgsfQ_G?1}w-f_Y?pn~MxRp5dm}I@gx7GmWt#@j%FQT~WmW)dNGb#2b z?yF?KlfEC*@Ri_ebA*aZ1?|D9r2wjO_b zl=HGbR25%eAig=hrNbz5P5{dszIj;a~)r2$9uyZ@BD1H@? z$V?lGH3>q|JxwzKT?*aqvI_Cnrxta1Z|!T1g11C)Bx2v+PQ`}vm|IAJuBVTWp^wNN zER4s3AFxfu1=CbYz72v3O+>Jr%pR~HH%4}+)Dj3=e%OsSmR`H35q_?rMND8$!I-1o z7gYXmN$K&Yw?$(GtVi>_LSHiJkFUM}e@2?m(5pV_Z}P zpk*K|QC&|Df>l(89l=x(ySAsJqxzpSPkCg$M&pvi{n2u^vbzmA-|T?d3UHm<(Sr!1 zarvD#uXH;t0X)9DV8^4j#XX!kSyV*iC1`p=Z9jgz`FYGdQ?2jtXd`EdB^}xHFtg`4 zaMgI^3AQ6v&%2l+=T1gj=a3I;G)s>U<5(9x(t|AbfA3YFcZvY z76eRxba(eXz0bzHPoF-;7#kZiHZ~^BBFGA?%?04qZIL11uBI`$?W1WvbK5dH7&d8_ zp^oX@0b!Eso&+qz@2+TVzVqT}DX>k1_IergrW3sy15fx34GnEx8yrZ*swi3~j07y91|8=IRe3%#ctE4@4En{hki{mOgV zFtjk4ZeRmQny8cx-f~^8m!Oo0#tOejg<&2v9v>by#HwHdgoS_s^&J$;!lELCpkSC8 z|7$RQ(;M*3tJj;?G2aQ5=vg4;>ifo#JMN8Po^zZR_kV^8V|rL-?tV-wZjM)EQhNSg z9sshyye8OW7wq2;wN+JB?`^&NnLYcv^bh(F%)f$m56mOT2#bZ6NSWvK-{#oVuV0;O zYlXV+&dq1LBN)Z5n9cl|0&D!Bjs|StA^?+Ampz#r_r8A`SPz&=1T&>JLioU-i8TgDuX3A&|eM!R#8x@D}5AQmfO`PVQ z;bP5u#oc#8ol7(^*#;Hol2q!HF3i})i$0HYgVqBtJJzFP>nvb-xEp=OnOau6L^cMLgT~h9**YGpN8u*mX{G!~x-h1sy?x9dIA0P*$KbZ8@c&7-A zz4JzT6g_ZyiSqLDKn^++GDt>`_9jl0WU6Qe3DB9t8t;C2dx7Iq`mgZ;C!+|~v) zH_{?vIZB6z_P)(&K_ts#{9t5!hX!3N*2@A zsk9z}>vIy6+p&~wZ3!&^`ujp#?Z=^%nN-+sBJpMdCtNz*VPK*GH^}BtdZ69Opif+Z zTN|0)^_iBbPsWIw3iMRegSg0EepIVA^}>9#2d7%gDg;?G??D%lNFbbq!^#Sjg%VEq`LC-x8=`h6})M)`y`<|_msb>jy{c* z0E+12Nb$`7(5adC^?|PdUcZ0;zLxZ5rr7sp7WlB7pt}OS-KHk5ckzJyd|vW|f^Tn) ztoLgCx9qpwnJ=jV$k^<<=%!+$26Xu3`%gy?P<|Fz^w;`5zwo3dBwRue7{K+! zn(IaiYKqjcw#G+nXfPwum&U z4PRgU?W(v7>41LmWSZA7VeXL{aAnCV9vVujKd&$+=|;+j6+wsGUTo^=SD6R&=IyS> zUT7v=^rl}+WXm_ltX1Dz7dmTtSS>h?4Gj&;Y^kJUopD5wVbXyR$e6t0ZXRM2o(+v) z6V~JU-nU}^Dx5^=w&Bd^%;SmVMIt=Z(A&08c(va~vIp4QXW z8Os68>TP>m1*M&a8-`5heVvyc%NbiQ?0gvV4cLf!MFHXpgh1k9vWuThYj}FXKy;ph zW!5Z!kpbn$=-z5MCu9k-ack#k*#;J(pbR{aH}f^fG(+IANJO6b>y9b9Lho^!47 zInZ%EDr3r$b-`1*ZOE2Lv{R<(&@)GR%k{CZ6y3kC4}A~LgS&Ij`uDpyQ5wH*tbdn% z^!L5m85~}~UBXJ%)?Vnl``PpB`N)sh`sp1Za>f|}yf0VPzRH%RQMXGg0!e~Y=(?GM zh=-l)w*TIWAW)j=dYfZbceQD6@7=w!@K>52IP8ca3wjHUSXjs=i6m*Yqg-=cU#efx z(usc5F2}nE*LZ~zdkQ`@!Dqtq>s9QSCV=cQFTIuP;_9l~UJdns^e_nt3E^%Bbvm1a zWlE{^#P~S+*1OQhz*oAxTim)Fz-Gl5{)On{Ofms^RriuIUwV62*V9)VT&6=$pr0SH zyf8hTsz9j^0t&g`%W0V}U*f*KO`6e@C0YmFXDCjjP1+T(3(4kW`n=f-N$CRWDC~J% zfY!6u9ln7$$$3;868X(cqmEA8iD~1+6>L^Vk!jXZ1}TGR%#oY*&I!j9FXNu4{8gwB zEuN@RB3a;0FA}0FDum?U51lgo>ez%Y9bSMyh|Li;2R*6|gUE>VPaf5G2p1twjRN!; zMOM^Jb+_xw=v3E3|Lyp-KjZkBZDdSh+KCy2LD2M#R8h`(aHPE9-!{=Cef0=4odAzX zwVBSFfh1}Mm&TAs{|?lh;VgmO@CxocUe&oo-l}-PD_<_t(~}$LI+6@C8RFtitgdp& zF_QuU&gyA&d6qLdHoM>s`Xy)c5FVbKd^dn11s=XfsNZ5-OzRKr*~@>mSk z0(}XwL0_NbxvBC^z9GJ1yUe9$Ek^6Px<2$MzQBU}m^A&~#u)ywyr<_E0G+2eA@wLx z(I-B#g5=aEqDnH)6D3e?!p~kBE67zIK^u&!TxbkmsX7&4sX_Mw@2 zXD5O|DUkDxmRw(Ik0dgpOvg%)4366k zL{tg&qR0=XpTVB_BbHKGoL8s%{DVvB&&<2?mxl}6p3kXG1k2KT9CYO0(E20#d`xfT zhx@l|$Kgb`d8sujK^ZHFKDtrIx#3}EVqEeVwa+Xj6{Y~1dP4i>{qNNmR^5*;D!| z#IqJJcxA$e6p4Udp{n@XkD=~OI_YaW*A{ZS0H2Ol3tgET&-jZ`04?8SCBK`J7RB>& zq9839F8#FhoAzjm-*WI4SL?4i zcdu7E89i~!3QUsk4K?`Qe!pa=Tq<$I*Vm*Tn$(lUFT_Cta!}LFOSL6iSk{L{ zmih14M=&%cR7)^UYi#fMqERyDe5Y{HLKwhXx_0;*sYDa>7SMOH%+sFhP(kIirC5z7 zP8~X&SxV4aRlssJ_rQFcO+iamz$PT1CIbgi`>|K!6d-|951Pr6b_TUm3@8hYoo`5S z6Lgd9`QaE)b#gMcYUWv+)ISO@kUtKJIl1XEf8&$O=2IJ$1+0I?BSn%LAFwL)PQu@p z3dt4jX6eMyFpJ;V&JUQ@84_KDd>qKQI*)e%d9O-$@Y08umBno|1&y1$G8nq+ak8fE zp^`^TBulQhe*bEM^)=6Px#CBssaTrL61OF5zRe_xKge120yS*h;Mo2f;(f6$t*g%y z!|eyI5`{qf6c)Ls+7vhGhi-52`|&(a=v__-Ezy{-ouRO6a@F<->l~o7>gMKHc)}Je zC$ycQd7DL7?)AzT`9l&1^CKYY6c{Rxu|lL z8K_6{H9q&KNF4{(GPIEdkCPBr1aGX2J;UK9d^8haX4BYCH5&Nj+DVM;q?LT3X zdAP()j+AK5++?B>_77%7p0ntM^P#2U8GjdTkGm!2mHJi+yFm$mzDqCt`bJcyu_F5| z<-+NoPd+nWfxAzNU-ZUT!I$BLc}J|O4eQ-(I9)Qu!-};B(hUKYqdxa%w0&x{KeSg5 zws}b-zdX zpf==o`=)3_+{dSJ(Yc8n7as%AQva}r#$VQ1Ni%9dMFViGWeh~Id~3;avsf!Bmm`6n zn_BR#D+PvrZC05*s2lh{>aCRcw_NeRcJ(S(r0TMUh3K36-K|}>Tv=1JqSF$wvf86` z54AM8hRQMgjHP8zfardk#+fFPa$Wu^t&~w_PHXG!UF%ln7CUEOVqQpe$GBHqK8rAI z{UxOzAd!B>W7K^Tml!+}aWr_175%bGcHi z_V4v+vh33O?zi~(h@L(Di{&Wz^S)$ur@*qlf$=p*-sOAR{zKC>AEXjrSToAg;zE1@ z?DW0*cbi!0(nO~o$_z!;-A84_X-&340k0tmLewwAxS zR9COJk!+)10$W@oD#Wx~r*}rra%RQMJ2Lh@oy4(Nte^%zQU%TdC``N+(6yFxb)l%B zx3kwYcHiwd{As)VM~H*`SDHb|mEWvzxn`tUiu$uVe%gRq*2>Dt)m;VG90(2(j?$VZ zoijz=h2kXl%k?0`YJsl53Alh~fV#RJdk(X#4e{UFF~9Wwe@xN3~2$b+aEH>Dv#}MAIqbg2v@{^247~ zA3Ix%e~NNh3@jfF#AJPpGdV|hlV2_|3ZhpvcJlqpWLAe!^}4Uo#}@|=LgI<84ofMH zYU^779OrP~ThCSLqbwirZOloU&vqfnjCbie-VDJycBec!?T*GDAZIGpF6uk&PL|2# zw(3M~R-svPDYdB&$hH5ileW)LjwMm{PAT~NZN0rIwH4rp(<ozdk+Ry-D4_d{oJl8J3O*G8;*%GNj~eEg~JW_39Vi2p(`IZa0;vf`C}S-dPNw~ zAijfgG}KT~erB1#eE|mxqWT|!(sVr=BiMyT18axSCW3wgxr5Hirq?@k0bPd;WF&y{ zzPEnxVzEX*w`pW!W23>+gU^se+NQBNO`hQ~L^XcEX@ZjK!1AGD^Ez%isD<01=#bir zW{rj#AP5RB!6})Umq(k#&rOyh(67(#h^iT0g+|<7N2E%=pek&7qHjGt(OcA-Wf5-I z3)*p-uhS%W1Z5>MZ1~-4VHtkU*Rk(Ya4FYx*7PZf^G;!j%v}rJuc)E}Cg^8F8P&Er zGoCao8w9BNl63l?MAQpzGa2=|?zI7HXCh|zntnHCFiyvs-(x#^Dfq5VfKUddNq@OQ zztdr6uEyODYinzuI77N%<&~9$sGrYLH25(ee=a=?ZRw=*XP`Y19nmMT-0-sbNi6DR z5Z-!C*Xwu@I2^)S0#yzf$(k#EL1zqH>Z%_-6T{hn5* zYAxa8@`h=ul6C5vidZ9zj5z=$_=)kCD$#KmjArlN zzUEMh?@LAG!Xm%=W^Asx-iPdv!GvAlOy$t>T6uJ~IbJql>dmh{5`@F-!&bnO@-Vcb z-u}xWwZxBh{+&fqyZqiE)>9}W?Th2I7(O;;*yG>wD2i%yDSz%UeS$_2305AKxCXpA z=!1tL{b$%Hh~a^7h0kvV-&nuG;pEO@mg(2GB|1`MM^JTaI=J^h0N+FM$wVZ$cUtfC zcW^sFn#2Te=vffR{PgT;;vG{_o5UWEf=KSQisKl{oEtoQt*RT%sCAj_6Q#`hLSv? zp2>r-4Em>bkV^NE_ZuG!;} zFE2bB8SDNSRNqo#PazY&x3+W4>dk_o9o{-T45IFTGZ7_URl$KPMux#wj!)N9y zyXa9OsIvQ2YjSyra<&%5vF_*itU~0mpY6>!B)}D^RyuaiHgaQ~*3n6l;nLK+CWro< z<~BYD8S5K|{?+?e;{b-KE|+#J-76KTKY-h9mB0nnpRD)zf9 z-i^62QU}%ano%LUi|D|PA0i;~oWaG|3>}Ghk(-;|-eDOMt>^oPW3$`qQ1L(xQjD54 z+78Rtd!-%^0K9`U3JSn%6fr$mYjI0oNVeSFyS0_M(-LzX?ntL(&Z$uL4a`KUuz@IU z>i11j>=9O|Lo`&bJy&o3C4jz?3;yU4cBH744jN_*?yTy(_&xz*@iSbVxJ$`0J#%IJi5c_~q5;~^v zE`Ux(bdgmb{x;h;<|>3(ES@%bxvm(9_c6z5p+EJi+jso3&sI5^<9(PT5f9>2`}R+? zkb47p&70Ltr$+G?HErrgLYHgMzgDW61WpE);c4MB!%6Y+5DE5R)`Fk{jAcaAPxjDh z?B@jZo6DTl<&N<#fWEV*JO3<9t133?N6c*d zQ$yY@VL_6vL9zO)j3m&@w*jFaXvI^$dF;q{iRKUcOXd4&M$BkW9nf$5s}Gaxs>7hn zt?sJ+Qf&#CU5fi4;plFxfh~Mx?Jg^Rf!zYmope@0&p;IqgxH_Uey*?6!pGWdPL~#> zsSFw|Di8tj!-HOK1VM+JM8}cxRH4vzMR)m&4vsQgHVFBk_(CNfsS|O#S5IGG`=-Ew zUi?cCLV%Qk9NP=jckln6ZMxlF{B;qE=sX}k7{WBfk-6KSckeKzSms=}HjjkYrcs58 znzp04>9VklTwthgmhQ`>)BYNtV8`XctnM}_F~W)#4UlXGueq9`^@Q{H^Q+H!kFtR? z5T&fT^C1!7Tmt%Q0E!y-&<#D>#wjR;j2+5mSzEx=5`IX=w7D-utH!Qlp%t%?&wjng zh+@@*hwA(OLtunBFYE3WcG*e+S;*%L${pw#gQp1sNi=yb;X*qTr1Kn*R=4{+zd~zu z-in$0&~k(~_a(L9_1wFq={0^-6fWWt#@_AagRbT+cRl44scWHB30h--dZ6@dYG>MY-0TmFKM7yCMcLR$;RkzpQGnKlVxYvL;>&8>a^Rf16!6qwudqMuTGb2p z!7|Mhu@uLG!&;M%eHe4HELM_ER(Nh+?NGw%m&|t>XW@>drMO*G-l)UrGjn4)_wG6S zUZA#Ai(MLf*4}zV-1V-SV@Mplfh);3%ve7X+WCNoI z7jp}X_G|O|ByxX$E&V4h1*{305zy}G(N7~Q*%-_y&m}cc085p{KfR8%HR@V?=_HN8&2=V>Dq6mt|Os`1NQ!O7r009M7O0yN=3l|#`MDCsL~ z?%aky{L=ozuox2NR0QhCphNR&5IQ>zY=r}R1qa{ob#4>Sy2nP}I(&ha==n{dPI~E* zHs_a-k<_i^K74{iUA~)`)a-hE9u@arOB_$4hG-=7^RiCpoe7^VWr;S(9z=V0g8Do}6wwl8YMl8DbM6DS5@Qb=6;zVPck)J0kJgvL_d{hv)F?D6BL#O%&&I zX+Q!eNtak8o399~qa8}*5gue4Hh-@Y7HDc`RwLj90KOPRK7d)iF^Pr`?tonc;P%Y> z{#?v~q?Q}zKHli#Y_;ikDnw6<_rOR*pe)Nzha^2V;`N|)#LKniYqsTX|H-nSTi~KN zMI-H=ZA{IwN5v>(i&)$>{7p@W13`o5tl@^JjwZ(gJ3m$S-yzntlPiIeD#Z@3^GIC< zl0!Lq*h;>LnpK#BBLdE?$q=q!1F*4SPtuYy@}VMm+mbK9!3$}=nzQqc6kzIbib59b zwm;rV<-#4iqLotFCKA5$VE%oKf9s}IlgAHC>%fgGQ!kvoy^V1(ysLii)h)75qi@jb z>*?757+F?URuF7lN(C8+oChj3s9(9D^=$f?ZBTZgY>q}$kmuN#ONuPJ(t|e5Z&xQ=)6AOM7G#M` zh>K%EHGto&(gw;A{pXJ;9265J1pnw6V*wQoHeeu10^k6oU8>LrgvIyywQ==+u-a|L zzPg{zgyMR93<(Xq#$0qFwvmH*cS8i8s-lal`v2C-r?8a@R9O0)HHR>Zk7sPt6@5G( znfQ_8@Bw7_Z6j@jH0M&q@7CDPJ2R~l-fXPn44nu}k}?T?if>h7eGFYNXbJ9*Vuyln z38+)3LzG*Gcm--$G^bNo=M0$mGx|V@jO1rX&*gtqx%LU6E{RvC9ZE|4UX#l7pl)q; zmLwD(D^(@R&WrG{AEjaDYj^U*K<1(R!f)OO>VeQCfnXmHov_c^#_JTpvMeuOtMFU& zSCF@gC%=3>St9^_Z}^2MW=psgOY(>ep@T<*R;J91hT6sMFI}~3z+OqsKg2@9;r{=` z{vd`(9cnd*-9&3~-$T=m)DzK~$@+kQ6@=4>u?rXlAl^+)e%sm7F0Vyi*U<&2;)(PT z)-*`u_H|`8ps9p*ATLYhC6k$5vAMc>6(XB*dr?)Id(0Bjb$=B3748qPMQvP&=p=d&q1ga{qcD+O$mD zQ}93W2WPk&$VU13`GJ*6cP1FfXqBF|Thl7si=5c>g*@Xw)xplYwY{y{!A1YjHUH09 z*h*iEK`J`)8KrL^%AC{~Ap6AXFjDv#X-e$K(tZzWESM7Sl?F=fwMiJHL+!I+pKM-~ zs_I1K(5LgD@>!y<`p5~L(k}g`uXQnRnQ}FW4*bB~B@XJJUQYNtsc!MF zB2O9CH7pCb7d`hE)xQo5ERLDop^p5>LmhbP`-4Ga7}Y}5tZ}REhHdq&+iuA|vKI}1 zM=?17nfXy52Rr0H0@f4$oUOaN0Tp+*RJzbo(z{!ugi8;%)cB{;sg@>W#<%ys{-?`g zfCYqjOmT*ae2JQME%Cn!%y&yjtsv!735zvMHNq4c!rxZo%xt#frVdr2LmMy8Jq1`Y zN46cCi;Q@YC<)P`DjcLFAr3VQXXB0FM;8xt>sTb6ai8+Dz7@`2d6t!PwOYpAV4q=Y ztHS335+7~p@65K@-ixvQcvoThL3s?qFz6SX{uJ^M@$z`8!j^N*-$44R5xGgIM1aJa zdW2Gu?l*!h-22{AmyGDatE8zCj8r~cPQV2g4{S6Y3iS-6`3`~F0X1J-Y1f;-%Y~3O zsTo4)3$a&t?f?Wa267!hAqD)200;Iw{&{msX3{-Ykr=^VXGAqbA&XsXS-22F=qtJX z&y-ERo6pe&HBE6slLh^*T^^gV3>HUWe<$sd5cznc`y7RCWa$pWk70VcnE=vV%xW6w+ys=sHt>8?Awdyu=R0Cl!cwC5ynm_tH@F@tq zf07M^Y@y>-aeeC)*u!U@OrU;+jvd@zpz&m7D?G2B-#VQqMak@~0fJU9OedC>{aB%>0heXL=d9z7 zn_@tXLe>d!1XLDk-i+#vS_*L|R}yyGnJh}M$gDqPB8>Z$%xyPT4+)KD9-42RasOfV zivR#)3CdzRZBrffP5S@t(TknOe94TiH!1~rLtk+kh(47h7&_r}HY_y}k&%f(oJ!*2 z;@o5oxzjC%M*366ORcH~*Ew4OrffUR-Tki0az^$tV3i&`8KG(XUa;c10aMqTktbui zPf|3I;u3gI#3-?{_Fjb_9~hS|5pqfX&T7^{cvM7w2?j2nTvGwMEyuS0&NCZNtz$Em z;)ufByQM>DF0K7xwE?G(Pmo@r$bAjWW}Lse zYY-VSkQXxZoK2f#&tij=#o(v(dO36UI`l*t1A?Q^q|(h{YeRTFvf*fIaF)+2@cCT}Dv9?w-2z0-l>jMgt2)n$l{AwQ)$y!*)aRg0*`4JxJ>*%@v!d{(d= z08=`sS|R2@U>r78>r~y;6pa{iQ1q3%M23DXT@uJGueRRaihUl(n*^*gVtNDMUKbR0 z!SI7;;3^+BpGx2`M9l)3v;C$?t`=N5njqzdRe;b4C~YndOan}5|Qn1s>Qq={G@O|hm@l)EH*@YIyw1!eIfSZLa?yt`h= zxAeEQ?0CU24{SMjWr%4_WF$_$bUjP)Ow;;uRSnBEBhBpALq-Y2It(!kKu&!`Yt^=U z-bOm8_)iYXz-VEw8_vRtQctm2Nmxhlmvwm6NU~pGu-;65{Vc$}jV|HUF)Qnn2k`(} zq*|W9DlvEswqO`P8STrDKlXMKC|`L~_8I~tkf=2}G;XZp8Ze?#YiN}UYwFAA>MSfG82cDOh|bV|udb}4``7)4Is3JB zUB8&ZIN^`dgNtuvKjDh4mURH+q++Jv7V);a`UzMbPN^D3dF>qUY`gtZNTS;Cqf}T1 z$QC4{-#734lb!$f_jD=g@33MlG=#x1Xd%TT5d;g^3Mf&QTNcT@$!b}$Tk25}8#s>* zI1WnG3WIHAbK*&p4e;4eQxCHBw}?0{Bi4Q4!>aw>NHy`V zVY$4ry}nLZBB@i{V#H$jGwI1v^V=&JDxB;JV{Lc8z^@r92 z;#S4!tnozpJXDw{?fDSvdg&V$ z;L`KwmZ1vFAN>b=4xZ~F7(;+183Hc^FLSVbf&vj-LfC>(2_@hjC5#VO_HLHYu8UEP z)Ltf5DqyIKK^M|FH#?_AZkI;5Bzj|d{^P1V$szuV;%IkxLV{Z=&Y5UJ;O}Tdh4*^` z3?3nSL?S$SAh`jWm%e9O1_1<|>~1acdw~M^+RLACHX&#{AgP%W07}UdyhIVNC)lw} zxlX~%+@dlC3(2Av{)*e1$FgvWAjn6r8VG(EUj`p&PrZElM-HXkG%R6MI7}8*)VGq_ z%(i@ifv;KB80d5fZ@cWfrOk4L0dFHe4d4AR3O=(|{cGxj4i;#WRlRRRrF%yY_AGKf zyvOCQ5#^0t$~?{JTfw#=Qxa)V%;?iD62qhmk{ARjEo=-$D1M zw!duh7M(572e20|oZOt9>v|HXf^UIAUEL#=8=V0Y08B1Bu;BQ2Keh)NpbNWn5x-A2?e5#cCdp3 zX>tPis_k6M4H0JPN*N_OEAm@1tDO=+{DPo72wgwqAcLp-1kO&Zvs8y=y-c2DBN6P4 z9iLw6aXF}}kDJ>Nicx22xXP|1^NYo0!riMqjT(B&yiQF5+Kx_JirT*PjCZF1YNXB; zZIEzX+P5M1xUdSiR8fQ2esFGJ(L2R zqX2O3fLl8B)c+AJbW>;(%A7Wpx_5tdc|&L_oWCRHyp(pr8D|HO* zlo>8)!kmR>U)^=3xqs(OSeiP*lj>`tn~7uhB&YmVA^ZjjKV9gN<|8U7w>7C9yg)_V z@CTUFz4QA9l-1!Ju_Qqel-vzaGaX;lDI*s;?;-}xGq+A09Lhl?2fQ&+Ck(ZV@jx~LjSUJ(aTNB1y4q_a3ye?@Q zc0Bj7j<+CdJ(>#a zv5Ve$=ZvBMP9L$vs1+o5FC7lk#A&o}>z{0D6&@AboP4<(UtI|24Af4bj|=dowD&6F zcUcWOX$P^=VChO2>t>;T^+kHTq(oXh z${Ab(@59dP`Hc)UaVH@|0eTArwQMn7rLW$l;TFo^`$gyY;&Q9@tgHSwx2dlIkq?qE z0#5+j7uoyBVfOydK_7B`kSK3I+d7r^XAnmW$FTafMiXiNju*cs>DGa?0j|Hv7GOVv z57P1wh44Paya>wtOlUM5;GT%Kb#_LR#PSBUBdx=@+4PGa`%ygEt4SnJ?)nO6JHEea zrauBgzso=g!=rlBe;~_Er(z7S%P5@bP)0{r2nlaspXBP?nrJIhjnBEt?Ic3? z%*PP+27EL)d(Ad^VqI6fkWe_vC)H+Y`?DgvguHF2l;HsG$g#l0jgvP6a+5P(sFumi zC6nfHQ=>-pT)JutXeb$xQeQ-J3`%0H*KP!RBHtX^15}kl=33SKL?cA;P5GUg=#oM* zR8JH4wvG97>D{fVS0_&3A@<9RiSp|-k;yO1q59ga{iggI_>lasD}WU$_k?$5{yhP- zIH+8a^8!4dpnUghRSNc;bpQ@5W(uxSgEZtgWzDzK}T#0!Q#OdEe*}yH{_1PY`P~5Kn7Z%xCx>aZX56r1`pU~0_fDIE-ItR*-2Q4)D zoQj^qT8nxz@`uS+vLa^-y#-YWEAVL6FdpHH$hJO8OoPTeeMh_lE~}Q6aU!;qkIrQ~ z^kZt(Yrlh~HGNGYaW&v{Y4X{iKtdTf;N<@OIsDZn)%FC~n3(pl@oDSW+6lV#6LVo1 zvZ68>rHWUw6?}f#PtNs4CH>%@fdx0Ha4s-^vE#pl_?gB?`Tjs zeK&X)yxH|M&f8xLVk8JLUwubwKcjurR2+_|4O^`e8^Qhz%uznlas0SmE}se!6pshF zgBMn}Y^k8d_2wI)B?`V`=`nKGcqvbA9xlxOjn&D@zk8OW_UCtSTS5UQYg}Nxh1+52 zkdB>9IF+q}QJ@(|y4%Ub)l+Igl-ib^`{3_R6JfG0CYLCq29>#IVzq(?DB+-qG(P^E6}wBXc+>?J zb}6oD*|xh*&qt4Jr!7iDN<#%aE^a((@j@DqCxX4pUcv9@-SVTpI1<=fV9V@(xNCh14q7j7sFVUewgy194saJ>%Yx;gY$W;dJ& zS3ksvej2pR#B6pSWTdimF2GUrJJ{@@zzZJvaWd|N(hIl!D)5**Dm}s^3KA?2;+^8~ zMQ_C1le$tDQ236m0C7YD*A(ybW4Ptvp)H$z2#|#dBQ-+zX&)i(x2WtdX}r~NJT6W- z6*Ev*3eA^Gg`&Oe^60OZiNANoby@kWEDiOy#5C|`Ilt@4p~rW>;30{RyRaJ0b?iR% zPb*#^O)2e9(_QI1uYd6g%3w7~V8je%2oWg!!7B{@v&7O3cb4^|3*X?shk4zIQ#IW3 zZGw0Uf31&fCZ2U@XBZ&nn1M34wzhk(W0Gmo4f8D-CAd7LN(6D-RT^4dNN2~!#u}{1 zAClN)5EhzOq)|%fVEgqYjF7%66+21$VVg};C#)ljP8cvZBDkZKWa%G zsbrw9`vh~hHvfSl7P8WjKm##+k6hU8o@hIf{4>9M!E7D0ElE&4`VE7ZvhDJ=QqV1` z%C-q>t;Pw?>9SUbZ7w-ySDeSvoroIP*mTFn#+(L9QCH|4;Z8^uKvR>VvRGti^f*lT z9-`c{HE;9OKj*KPgLSRW&K5A|a%iIpaEdjTFp zk#Lp)Mvk~S{_eZeh4@W3&Igvc(cZkM{MBe$bK>tavhoT>yPPvq6IbvAuGJshU8h<2 z0VeN4ciQ3uTN8_lt_08qGN*)3d`UNy)T1k?ry-0~)2?~6g_SP56z(f0t*pa+MNK!U zUis{1^F?$kHeZUME9RXh4`tL1sHRU(XYuBH;1k;<$Dso9 zD$@}^p(x8Mqqp-Q0}ehaWP#|b6Yt4^NLRFH`_Ofx4IQ~N9 zRCII)-Z)}>T@|wJTBDvS#NzVkjvwKVyNe#YhzZRH7*4f8*Nu27yn0FsEo0s=@58SVip>MH~PkhTpNizBgF*A*NW%{v4{^s!=QsOo zUu^fdQ)H{%BNLij4jfr(`RCWKlGG@mwe!YM;Q_oWz!1B^(gI8jtKu87 z#oPCaw4e2bju;iPM6gC`S&v$t@|n!*wUR<|tHut=5X8vRkea~8uoR+2Hhb{-%);}2 zXV7y+)NE4I6#sifw)<{$pCh04q2~p~(efBqYVN;!?@wsVrlge08_#;}Js{yl+{zU% zMimntglyh^xZg?Z3A{ke>17yc(49^)vq;heTrHHUDm?Iqu?$g5EWNd& z#J0F07c5e)%)$!)Aq)G`rMqs^mYJh+bdt4+1Z3XyTj$=>)irovXt1KQ`GtYrWrtjO}+}}iIzev}$Em@|8&08f)bJN~A zsO_M@Y8_K>S%~bRbEWyN09MVAvTj-QEzTib23_uXE3CjyFB ziqiL5ql-IkZN<#I0iD}jn6dGnKAh%1D_Bf8uPHXtfYBMzEGcu-O%R6nD5Y6TQ2D*-+i9bOG5UxGsuswb#2h~_uO6jS^$fSY1Ja7^x%Y$hNA7N~e2T*~^jp)|( zTT4OT>Iq-Jei;83|Lj`bhcF;pPcFMuTlbhIUGe&ya}BKz-2h+=(8Qq8aY=(61e=BT z2tjhXiMQjI4@W0fE|`vjYK13H{ECc9mnIcF49pHy{e(IP^fz`VsjCKJ?_TC22iT$1 z8wt%|owG8MAf8&k{_g$@jT!C-A2n2h=wiGrC#u7}X?paX`WMvWS2q0)`|6`Xy6y?} ztW6kfx^o*o27+?Jc?6@ZFgpuGX<#H439kpDHv~}i^}R_}pSZA6@M`!CGv|rA0srp~ zlO|2xAh4e3Jh&_aOKnK0-*a-%lxqrV&)=GF}}W{ zay{-w(xnS`0>@0RuUgEB%DMTki3YbM6<&W02<7vmF)c`^`Oo9|RR61EEmAK7P1@E^ zrLxC_rrapQ4Og6*zI#)f@>{2e2Hj`*;v|e4HFkbJJs(`oJbW9f7#XH#QV3AWv3eMY zgZZKu;0}nLJOtMcAHm$kvNezz3U3+75=^?i>)0>zcrsg5ZumT26E)^yNYJUn zma*wF{+Uhseihu(;Xs7u7Bu^zzX4)>#B>fB;Q%Sk&o|Tj^Zq)r8(nTLMPHwUP(5om zWV6Z}obXG_y`fv8{)YC;yM+^pcfCJqt-Apv|JIt&y<`*~05Aw3j%@=*gU+5BFn)oTSu4f46twk>DV7M|vHkQ60c|JTF*=U>Ujxh);)DR`ASo zg}?~{8~?`GrM>xPwncZX(;Lr(u9eH^hHyYL*-7n=-+)=8-R+mg+T=!9ok3lvC*#6-*7dM!WKc0Vouyc$yIQwNm7(GN^VxI&8$%@;mFL-@|lQ>++2! zX_zz9)9sL9K4u=_lI=_Q79p={gcTG1ko0ugx-W&_3iTk)ZKb}q$j{=*?~kRAe}4=h z`8!S+H9rJ*A(T!^tFFQB1|}3N%NIe&1}r1u#scn;P!mB<%5>P#;dUGzG|Q$w4zZ^S zOC(s8AC)OFeqw(3ByVifSDD}PIHo;D?CA6Nkm=f|M~9;@?f(XsY-$dLg5de?U9PYIov!`!V?^o z7h`9ZCPwiJtoLlF&td5;f+aIE7Z(@ON`_7^c+WR(e}u*tn;gA5zc8Q8$rBSyCVQCb z0YoU=wE$6nm9rxT!J5hY5={GkT$@Hh%3U*3xdO75r83y4Gng$|p(kq(O$bk$w1rUx z6>ra77J7-dzgg4z|DfZnxv%d`FOJOxRUr5U-zcozMRYdIo8X}e6V>pe0OXEhu&*Xg zR{y2VlZ~y}NAZ&W=G$>0b{Y&7ppKcKnZFEE@9<=2p6LV#musHoT!WrxODEDIPKzRqUHba z;pE8Rn~A2{KIO&*-6OxPa{azNO$}Z$0ywmgf-LNi3?VBO7zuMhtl0Q-@E;=l}PJ# zaZxWNuQXMgt{bOP<2LTBeCspIUy2cEs+ylH{T7aXz>EJ>Ek*uTt;=^+g+rVGwD8ju z3i=#o`b5qWZ-)8LtMKGzrk8TI3A473oESDvR4Ai(VeE2HWc*Zm#B+mgm{)7PR`W<> z0f&+a)#FYXsOQm0wewC2BpCoi%Vbol{@>&QD9eXN1h$$h<(^j>nu||84Z;#*wsb1; zT~Zh9yBDYR{k?SbTHw%b{-~&6ci8AyL3-tmh?%QSiA0;}LLeXA1=BTxs;WLR!N1qB zm6Q169*aDFVMx=gMNrirYQ~q|AUL%x8ZH|>6Z+3$xMm?Y=WZ?!Ug$v3xLe(*9{+sd z^Fod0XBX_*h4-r#8cp5LLZCnB`O*CO6*-Y^{`AW3C2}1YE&wiU^;gKMYv@nem!-vu z-?bV5Hbmu!a+~sK{Y)Z8vM9r(9wewqQ!=A3%O>eJUCDW^l`dq|Vt8bxv^3J7+gC{v zkNjVQKv_bcLtb6S^@;gsJ23T(srj z$ov^y|6wr5?E%Z}+|`KUgmpSl9f>g?`DWJlc)m6cI$rE|O>4I)ND;d9<-@i9VfT>j zV}ZoOpmpE(6MtmIiiqA@!G*-=Eb|0JQiDn+WelHKm0PnHB?|8agfX4cJOQB zfUlKw59961S;TJ-UXkA=S(=BBeS4cVcUdc4Xz#>eCOyMcAf9C^sx@TFyl@EYE%0w#$KcU*{wafyp;%phQ~B3*scWLKLWi%@D=)50l4B>$q?cK_j2b$X zSl2mx6k*8?suYi7jBMctZn>F_{`~Yrwpekuq*b9|;pB9B!XWS3soySGqmO&DtxJ;k zW5|w>7@t@K7-ajjoT7YI+j2ZB42ptIu`uXY0fEBNqFh7VSrw6QRl5r?L|reh-u$#hr5G`!IQa z7Mi0e30lD;jeQ}$tUbdM0U3_q^lhCQKE-ID)C+NoVtq=RC(;G5r6VX}+GU5^LHv$90{1 z^ay5k%R2tsdQ}-hb?Rq%e)ekq1&rv13DZ7(%QKt1@S!l}SvKt~Ykid{6?!6>_CrLV%L2< z-^^QmadO-=dt0$*))1LWK9UqLwLN)Jw@^jUmUlvm%wGSn(6-ZUueh_D&w}3k|D((Q zz0W8t8};Am6m7^mnLC?$aOe7$);Zlf=WhR4&wu`JBK8)wn8rIZsbi0+9ZTiC(S3yb;!@wcoE***s;C|qszNDG(7+NpJw~)-_bH3z@+Zx zlZ(1XH-h(CEzen}SxQcak~sR|NnxmW&`p!ML|-xW)6up(R~Dah82R1-Bj5L!88x#x z5`9v@2;rpnkTpjb&jEAyzfs?*JKL|%3AYlPD{~zFZ-8EQ*iSf+Z8~>a6*Dq6_v9unl_~=H6e0H|1|2?viJUkP=Ylt8B1N8=m2FtIn8 zaJgx2Cj5pUOli*@nA)5U?A|dr(AvV?>e;8>jP}Dy?6e(+v2{O>&*ZbB;gR*FUxKEo z`^gF`@{@VKvjjUb&ACgO%qlSAQl2;JrPc(wCZi{qd-4CbSYREKbs{foF`6TaClSWU z!*9!|{WmooM!mDe!b|Xk|3+f?-@Ztc_KH6Ehtlw{6bnmTUqHb>+Uf>sl`1x2{|{4a BnEwC( literal 0 HcmV?d00001 diff --git a/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 2.png b/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 2.png new file mode 100644 index 0000000000000000000000000000000000000000..758b52ca826d2e807fc4b65260ee7b33738725cc GIT binary patch literal 34045 zcmd>mi93}2`}RHdeUGsVAqj(ISICy7R6??iHT#;~NJ#dSEo+pD5)!fvGLmI1$<84A zGGsTF_cPz$_xK&}@%{_%ad_@$n0fB|bARsZI>+slmx=-$m6i6Jz`qpzZkYMq@pSZiZ0q9y8QS`Jxq14zx!4H=IQaOwczVc6UXhfM z5ODVM^YT@al5+pw4@i3YI7x}i^WOj`q4v@>_k|!@R^mS}C_4uZK~Ep)X=w={e*aRe>O$$|y3Q9j9qJ~(Idq8|-z#17c@-;O z|Ebc_{bprG$)kFQ`}b-b^h`&^U1c0nU$|wDr6IkFxOq?=TFd9wJ z;pnMxs(KB!r^22(aB)XoEQZn!;C~(&D?ZOniaOjpavW_Js;tT3l z#CPzPm)8@ogwMoJSQ@iQIP^ev-%mt1APrjJ+vkwov9WmK z$GSQWChbD`DB(yJOlE{_LMIpUBPR1TCi5&6{~-Nbg~aMc+uC?#S!udMSa|npY1_#j zpHH)OrNrt*e}*nh<}^W?AcM)df-!5;E0CWUJ`2Pf|5`kIi&*d+Px4#~;-^eNMErnP z*(H47MuxY+)0AxLlRxm2jy#31F!sJki!p;G}= zzBDHE3=xJ+ZGroqL!Z&$bYKRg&d|ZA_c(*-N2&-Yl;j#c+LQ4rHS8cB8i1Ne(fhE2 zu8v?5yn|W#{4rew)BqLup+m5cGi=FLTHt2e@Yw}a1O;voT7kl)P*~D~J1bS{6u7RH zvr_1k6h(q^haz~+py8 zZ{@$*R>I||w4I&(-L(7&ZIB#PJ>uu5{K9~XfKtde<3rsKPNhheNSzsym&-{GE<+Jh z0esUGoB7F#&8bMvrac){^PAJNnl`*7>46}s6x>z!rqu?0GvO+u0{)B@Ek%dLUxuXK zhea@%~T$DdcU4d5|n| zBEzTP+fkTI;ps0Dt6V5`N}TH-w2lhjh-s?r@! zVrAODqoSo62A`YNMtu-55P`k7=`s^os`}%G;MbvwHW>QU2 zuqquyW`dex!@CGZGQ!OEwkPq{sMzc{Y9^GuipH9a_ zoHfooj`)X~JavLj=}}}TV_RwFR`?w0<|^F4@gm*EOoWGW-V4M+J>v-e9#p=~-&MI` zW8)_^J;&1q58P3Y_~|$u)j~FdJCB1uB=4#ehnl!a@e2`@DYBT*UvSy58ku}o<7u$PEq$v zFelq2z#1IP|CF1h54E6ad-y7+)srJ<4m_a-_k=q4gu@oiZ)g5pl~$ZqQPz}wAZt0w zVCsY}NNuw~)Y1Hpr+K=62N#YO3gc6>smgaRgpCrx^u4xQh`pV(;fUJx{j{U zHMm7On1i%r$o?w?51i(5pyP>Z+iZgZ5YJFkFl2W!l8O2BBxJ3l1qM)$UK6>COn2x( z;)c5IY;qXd#z;fkNhtUhZfiuG38Ky%mdIa87f91lOih$c{i&3ojpeC#FgNU~4F6SG z#t6$q3$UHmP+qW?F3^9rka;GwAEvEICzO;Cqu0Yi!K!27{-QoP&PaMqFL#ZFp^)AMIQXQdjrkvcy4L#rt+!F~)hE^+ z0*BY^e^0wY3@^+R$IomDT>Eub=mN1%kEkuucke+D;P5aAg}^MGF}6w599)EKp~a0w zSv)%iu8y*9N0(;vnJDFBY*pMI!jc41?sTc)yVT^;hb^UM!c=%NktNeE8)wAKY zj58_}e=DjZ8Ht8z@=n*g%+!$EcmyUtBYeNF*$^F8&Z+5=&reF@E|Gwsg&T-BOD{C? zqXmu##SZ44Drj$oFD;1gG?>hiL_|aBFGH87d+ zaN+OIA{cEz8gV2d!jT5Q;oH2A&YWWTGN*?5p8je)a{Cq0JxOoVG zqO*`kf({n}5G37!cVYQd3{}o>XdQ`I2#F;Sq$Ik6gVscOC$91oDL)sdc#&Doja-4l z$FCx&F2Qi>wz%u|iHO+f`)5>9#qIJGRzp%-i8BRf;ZXh$fv*kZxHFkh!Zbs~Vj^Oi z>-2;bZl4zq|2ce`+-3xl*P3$kG3@xY?N5RTil3*ch8CwIEzJ`|$8ybD(d{wZzVb25 zjUkzE=4fuZ{spNZPFFr;jYjr;&^#R z%@lHj0=WrkvRh}QUJeJ7&07N2GJi88;8E1|Pp|*OPiuB^#0HNg^KKs2NaOY+sH5!d z*$8|}fHxp8$Nu9TrwSO+zHky!BkE=5{_&J{{Q7iCJ9dw3m_yH?-l_6V^7+{)+0Cnc9 zWDKhVLQfX+G6C@ba8jk-P31s*TVWF#UIx?sabE)2oiB@-;M&+fLna{F_9eNHO*RP^89SDx7_HA{ zH|NS3RsWsU0~U?R1ZM=d+(~j!XS;LY5N0%{DUTsbM4Z_iOuS4)Sc6|+UT1G-PMdve zPm;NjpXsr9Kt|#pHBWmc)TvEi)yZv=aO^o$c?w0exEl}*YJO+^Ric#09GfBz1{h%n z=OAjRxwz&E#uBz!1wb6dN!Ku67ZQqpOd3c-E;9IR37l+W;z^T^l5E95zN=9(D$G_&}qLH|1>SG1mPv<2$ewx zSHG1Xk)aq#0|RO&@!UZb?=+v2Z(Ia}=yZ3JXArK^96=hjxWItIe^16o zNNfsoP0FDjc2#~yPK|RlF+$d$EM1l8S^?bUtNT7B?D!ohSl=XE8PR8C=1{vjYkpt1L5S z>;SBH(-3N&hRa4pdY}5HJDiK{ALEE292G&Q(GKR*l@g_iuzOp%n9QmbLO8%QaNbf# zEw<2!z91?RmRl`R3W(FQVI_+HvYt#Qdom211I16dIcBp>WelhYI)RX6WSATA<{v`Kqzdn|W(a5;Oo6KED6FfJ z3;MTzV5KU?1qnhn33YA>hx={9`s7h9ulQ<6De>mVu&&BCkQxKdg>D0|9DYs{=mOG! zI?CCF0!@N?;$Ys6AY`|r>&XR}E%}O4M@A~1MJ~EP4Q>fRUGVL>eT9u22lGz^Y`_I%j0%sFGJD5ArvACT5fNzsx;CShsBXw9A68&nDwwVN%hRNiJ z)ho~c(Z>Bt3`K*=Ml85MXG5AB9W5cGXYio;1^< zuM{tyGN8)o_RnsE$PGIg0AsDDQK_pVR!yOs=IZ=R0rn%Afq=T-j3YdqY-8W-NIzIR;~V{;4Vckn;| z#m%#bm(yXE&eAt%LwZb^LjzNE~i9jAA=}4j(%aea?2>wE( z8*@Kw`4BUHA&)n;TDN?7c_=q@o~#CbXUNm#Nmud4;cREXFFZU@2Tn{R(8}JDoxiHhl3PeASgRv z#EXL0d7q=^&?kZ40p$UgMbt7Rkphl|25RU%Ib}z52&dJ5tsmwVy<)}xJ=6bvR&Z7D zcb=$KoC5BDRv2sH@Yy6% zBf*}?C{mm*hKGkTX9}>h|K95awl>&)=gIhbI#c;HzH7C&4TlpZUW)G=UsnHaaHCh9 zrC0sr(mp|m5!DOr1*>`wuQO67R-gD2ns^kx%rz=W9vGOXlh=&XXLMcYO-8rtwHmPh-a;tMaQd+vACiQ94mk4c`W#Bu^!wxg0WZ zcg@p`$sMHNMfosoS0!#Muy5H9fEwqrpVDTa%4}e*rKDf|!=U7G-|0z@``_@p!Ak-4 zBje*~?hhq1LKkc222LuewO_%!#vB7;1G zNHEmz%vNDWrL_Pc->yhg=C6v?{kx%mj#%gM{oo;8{W(hj&WKf-~~8VRaJqrgYc^jBgn7pDyeNo zG79h|7mcqAvc!Qf|GnxD_IoE-RhWf3P=(LF%3Mp389W3x4-Y~1_)dhM60e)5XWM!} z5m?yc#~i!CygiPr)fV*acFeoM50f(ZzMk;xKGk5f4w)Q#w$$QL;th&N1GDa@as(i_CZLm>|jj9JH;a|tygXBXR`9jtB zK|GN}Lf{D`AVAuH$FC{Cbgh0;TbSi~`LL$^>-Jy2Xevh~!2NI~QdoRSPjKLSTs6u@ zIsauf!lGlb@x@nSbj*&PMzJx-eQgK|!2iMTBOM(nI_+5^uR&lj0QtbG zPQv*Ydvi4fQ_(v+a*_|fKLwA`QZm-!Q%tB@vVlkx_|Ygtfn|ax)cJV3+Djq9v@}{* z_(^y(@on%zSC}Lp8#{g6Gp$v43*FjEqIS^2_{QWz!Cmn}OG)J8$M+ko82U^;Jk@{u z#G@wOs6^;Izos68nH6{Nuh40@(Al=dHdoSiddCH^>fulL(b^C%F8-uJpYFYh*acS* zk#-eu6+T84zT|{$aP?~fD0`W>!^0p0V`Jj|yfvYIyfJaTYE(-tKcd4|J7LbFbm*hX z9lva_m1FDjU{4no7q@=jF+QKsWs5|TDl03CvC)WSbj@0Vu>5Lqne%}k&)2B8iQ7|C zQy=P#e;{*pi(q}nm+46vb1W|_3Nn!Nj6Z6lTjSmY*N%&$1~?#)Yb2DaE5MZ~m}MZC z)+xXqKXWJlhz(rGdwJl3o4dPhSQu}*!_23esn*H*$AIV)>YHh6#uLFF1k_*FPU$GA zYAe2ca0`SS9QmcPQm{i_RyL}xPLT*Q{m##>_BqSll3)jE@DrF3%^%)Q}IK` zGUfE;$*P>XNA2M~2UpASwXO_wt;VV;Yg6pfxJu>R-rJ0+$aa_1gsrI;`p>ikS?Z!S z&TAFj1{VT>X4w$PoMk|fYeeZ$!~NkdjjXIJRcssBjswiqrby+-8#W-&KbhVX@#(?- zQx0j&3v83yfqq0WL~BT63QS9FJv|w_y1D|-2-C?~S)4!Y5XAKkJt`b})Y#Tp0Q0OF ze}7kewtFP^^Po?%9N&Xr7~C*(dHCRU%(Xid!HygIgw~tOVJR}1`T06BSz6++T8Uw2 zS&O3OkbGuac2}G#TH~i<)yU{*3kYlCR0(+e zxZh1)=Qlh+=%Xs7RZSE{d2UlbbM$1|5M!jO-0$ZslscsKF9#O4&vPZ(1q85z98T5M zY8Z`grxX0w?*77{C<4J0v%s+$88CAa4HaJ zDP8k_;oON7mXaJKktktA=BABC@?zhk>Mu1lv4JW{cg3RtDu5%1e~|EwRdGIb3G3_b z04eQ2oP&e{IK_RFV?@=|$fCjH=m=J1MQ*zovz}iyr~$xyJ%CV%0a7LT>0Nq?s6TVv zi$|eqwUZtIbwOCpIB~GEd+n{6gJ>%3eOFR);qjNIrX<(l&mdt7t;)Mjq%yiRD9zF@ z5JVZ~YIZ|{E$b~Mi3MZSji!g4A$DdfP#bfvd^pX?+0IU5Dp|V-Y?>H*e68;y!?&86 z3m;2|S3$Z13bN#<^e;z!qYsO$8s!PO&L3A&s_Ap;d6i)-ez4Enq89_|2S^DsGc)w; ztYB}hax4+}HZ}y|bi@i!T3R|XHH85QT~SweG3O#Q^B6?ooBsD+O$J7AB&g@N#9#HT z?h?1nO+Nln^>dKW;3kU)T{yMCF1qAuC@T{c%jM=+a`lcM1Wua0pQ^SA^vB0 z7zTmj1rpQO*SFSZp61=Vcbhwd#&buztF=Mf@5BT#E=h!w(`Vnzb=b%5d<&^KPDp3i zvfwYKQ%nrZVR1Qv%NZ{02B#X8AV7qGXJsZ4v%QE#dUzOZ+^(O*)HI*Cmwo3F z?(tA>VCQ`b2bU8JzM`G&OwWwH=$Ln|JpfhZ64wu_@>uPn^D%kjs}*p-6!80?Y%^1l z^{oe_>c4#|bACP@y9grfcIl8kzX}5{y?PQ7U$N`QnLSSVT4W%0nh_F;y0QHplo^mg zw6wIffm?#??ChJV$)RMRW?K`+hQBm4B!EQ@4c#L`rjrw8qBd!x6(gwYGhSwX4RYum zw=gw+kRTIlj#U>eKw}s%qa`wrj=#Li$)Fba)Tz7~pMNZK{O>Symif*N1*iMfWB*f&@jMgNGnm~H9uQ+U`P<^J{_+Hdf)q^D z)?}d%)<8HGoK238v*Jhb`)vRSbV%v=w1d?^4F$JHMMe^Mv|O?k$ruWOUo*3_H9$ef zU?^y!p9Id1jf|DgU6%ty>>ldK_NzL2k8atL6RkOZk@ytF!rzvV5tA<#u0D;7msBLSW4)-AxcQ^0C`=)(U zxSp`Foue~Af1mVt+Gbm9a#1h9x+#I>bUwwtrYVYlU1*4g4;l(Z^Wz zCzmD1Ffzn zxfN(x`H9B>Vv5sB(G~vDR=kn=o5-_1+yz<&u&q=#Z)rFiqbDE?J&t$&Dzg=E(2y%~ z!~|7a7>bdH=*ct-vC+U#OP9G5V*(|2o+B&ZfTsWgPyv#Eyv7p=qL~;nV`DEiO}K-t z?t};$sQX#Wl|{JXwef?M5^)e8h4IIdt_rsV&bUC64=;bRVe{3K8N-$|6?_-y($mv7 ze_blK0hOiB(#V-ii3FNrUhYd-eXn;ln`EzpQoX}I*TI|^l9CyuDCURdVf;@gtE!tz z64g4mw?*&Tfh`2+8mFQ63Lu!Bt1IoO8#^GdJ&wM@E!svQIjVD41jJhAu--dya3hQ4 z*pjYD%ypZLIOaX3%<>yWw=KceXGlJ5-NvdX?>C>FP*Fr3Of0I!zP{=?J9OXr+;Yb7 z>N(AyOgYnSw8k?Es)k6qmEG@ROmF(dK*shFGY7En!dtp@moCOFFT2d-hpOfG(RA9I z{Ccj+#|ZGQ!Ac4gYE%pT8^1Es(m2s>gWcigQ~#G;4tgJd%z1-=2P=OwxQuI!rmb`r zNxLqS^#ia1@mIfet8-3{R)%+;mhc-9NQ-4BzcbCt&E5{V$usOH44#9dqvMy_+GmS- zAqxUrmQN3AHnXsy6t1qW8ot%z>&&}!i3%;9mdUdJ%J(X8K6OCV3C96R7;Wqz zBL%W)pU=Kv!(UY~Z)Y*6JJ(3(&CT$uUE5g#PFI`@Z>5k3a*c&3Bny2H5ZOvDGX8O* zWi0C9w;pVfJJD~;j$+o!9A0hj?xrImiL6sEu899c%qnn(9I%Gal{@*ZSDUuzB`#kE z1i8y?6eOwyd-NHojW4|4W?aKx;Y$XxgBU?TZ2`hkKFpAGy_%~c3%RSF?{62yI_u2Un7 zVrS#6j|JuBMlM1S0`K0wrJNWM}ZFy$s;7Zzc*lJ`V|Ix6SYFfvFpSs1ocz z#NDVcdnrh*EEQ_eIH_an6?sAeRx^8-?i02CCWzA+4y!#9;2(JJx`V>ogDoV?qGdaX(SnYdKFss zCb>q3$d|hO$v~LpS-TxzA8&Nt?Pa73N^S z-jVKIu$OBXqXo8vLd2_O^l7D=+lL325JDt`UTP!X`?j&S<-kdAt@>N zO8@-pp6Bc)qL+iLcFcgM;Pn?=0U%sHVPX;R*{SoYq}qqL!KZ0^e!IJZ6CPKI+U)S3 zgJomzIj7#7>wpZctcZVpSReE0(`68coW64^I=q}DMU1M6vWW~GYSQrilW^;VtUuHL z2(^rpx;osUxj&KQ+=T2e%AE8c_Cz{@ii2n01s6@@Z*KE&^#P`Mu@>wYNEX07!4I^@ zIxF>JdH*S%#H4CCTzIWfL3F;1j0AIykUl;v+uPgEw2K0_d#>IVV*}E` zK492(>i2IptI(|*2cc>}Z{#TYQh-l^4WMK>M=h4rV!^;$xk%vWgQR!8>dG zloA16PEJ1j&^qVe*WsE#EFvKE0b2fGEvX@RJP&q2-s=_oZt(nX;9=xN zoCaCe`H*AP&s0D?u?m_$cRW^p!lkxfhlN2vr1U$>6_-8s@iA%8gk<7Dx!jUbB*^rm zBU|K?LCtHG=CU@T%f3H{F8ED1!dP?jEsMu=QzCFyz%~Pju$SSaz6g9h-Cu!jbc1Hq zRgI087txW|@fo~WpjozFm4}l5hyI-&2j!@SuzNP{vZrTc+)y504?tYK8XM0c)e8I^ zzz%NSya~Wr=os`#z_SI4n#F-4G0jMR}Sr=7O zH9R3w&ASrpw?CP-Z%Rr@A^rV#(RBEtS#4v|uWzo!9sv$?|FdOFyj0UO@X+22J(_tv zeJs*+x?2u>Mg7$q;Yoxy$ZK&ziSQyx_9|%!r{6b(*g*yEng=kZm29LRcSytAv-Bnr z2qj`w@bLImyL%Y=uk0|iiFs6@(NT%g&nL5CppM4~^?jHm|FV2{q0qaoC(CzlePE-b zuuVu6R&=Lxry)4$6x|LI5u>g4#o`z8wlf)8g)S!bb#l}{eM;&d9L%-s$CV*vt*ye> z0XUZ0b`}-~SYC9;TnmxX&(AxgYE=yJw&j9C2NEkH%@kLr9a7B|Br_>bJpVU?knAks?wtCnw-C6z>)RT2g13PG$yRJ+c<-fJ9_wX2MgA`fDrBM618kEneIn@DXP)V&L-8qKg;vo)K8_Q__fqQur= z&wx;FAB2|_EvPDg>iU#(S1@t9MK|F@wl_yH%BN1EUF9&;N{n2?`oX8pxBYr=O)4X} z&JWN74fV;?^+#r%rKF4go$}#!=7QiSw%<;zxNyMa1fDTa?AVm_PyK|l;Z;CFeZtJw z+V0&&UGGo+tS#h@p~}=#|9IJ~Ps=0gYlcw_V&?<{C!>fQdcK7z$d3!U1GDEa#a+MzwISCNfE$}GdBLY9t*4^Fkmm^Gf zmJ;Q+ke!#8KE$)e@zV11-oOrv5U@U%{VeD@ zeNlhPXjUiU-)#d0a4`cz!}Dx3w!r@Zcy;H_9Y}ys@;F!j?O#tDw7a{92Qj5*+v78{ zvQqa_e)ClMo|6s~7#<$JaQ%%_gxWO~&xg&7Ilca$jh9BSR$-@ zOn#3BUb|`Ol{aiQXAE2vK2^m)9mNFt_>mk~_KQ_~H!;ozeh}bqAUR$<%g)S9v47q8 z_5P(eg_b*o=(v1}%o(Lil0giABV4|GXz0I5u|>%Aw5=)uaFKPFVI{l6Q5n)cqN5~% z0jdDH9eR9etB!KC*HpFv@-8k9qt`DGprBBXJ80i3o zesgc!QwXrGEQ8k7-8Xl|sdY51p36$nN2SNzp6E%j^38^xzT79A$hfX(P=rJn=MFje z159KsdY?wuFe;gGQ~!D7b&<=VfPe={@U9vw6j&88-PWmhboP z-!COfE9>frdJ*_kv)x&;zy^Ch@%ruC3p1&8#+TRU4p#&-2A`$r7vGL<`!G|g%mZE?a=RA^>jLql)hxWO0tM*5qQyA{9e{9raa#^ z_q_^@is=i)ClS{L9<)6_HHgqnV0?RDQP!`R{a65S{B2Yza$L>EFw^fuRMLFk5nKZ^ZF+_?4nCQ zmyY9|50s9Fh+*A12?MntM@Q}Ff(lXFFxuF*0sR+w6!gmR*MUE1PyW2R`U=EUc--5Y z#M1?yAYdmjI+aAQWy>W6F8r!`FId$QLnXZ$@+9%(5lh|=zxz=WQ^(r61EhXMQuKqpl6kFxAr?G9rqrNN=Z`Et+rpxpr(kbl-hfYn~em$ zF0=rTL`D~jOy#X7e&mh(Ml^r3%_+M7(o))h;4F`_Vx0!TI9uH}Mn ziZZ{yh7d}aJLcc|{+rWMyK*JED?@4y&=MKPI(fb}e>IN3{U3@P7xMRWwR85Le7~PN zQM~mnKYybtBr|eoIF(8GIry(=2Qq6ai!@le6fT~f%L6yt78>nJtX-Qk0wj_SM!wfe)D@Tu*rmIJXWVqdKDK2W|{XoIgf&l$PUI2mzy zOET!jaqi=N>VV@_97NgZbvfP^MpnA|Yrb;RddDD382IA@YgrW8y!+HX505>9`{j;y zZe{m~9EYnDF##C#fcE{}4%%8<$J~$oT9Rs~kfg=M@0`2E7_Gt5?jXYn`V0cVvcD_d z>4Z8FORkZ}vS{)ax@dMg|Jsn4NUj(f|MwWQLIW zM496GM;1dB%u3$9)ABADV%=%jMVP=pSH~#%{pOAPrCd2MlP{gWW7++3^6SzCpL;GIH&+sR z#r@Ct*oey|=30){8ZWxmPMXyhn0zc8blYo%_JXO`z^2bK1lI!#`ZKrY z1ksZNw&DviZqYBGi@RK+|ACWUl0W>DilV!MWGB3Xw7T3+1XzPa%@sO-$vTxvW7SKM zVua%5OStCafpcR8E1lNKH{A;kyItCX%YFH38Yh1~(sWLK>TIT{w|yl%9Ve^hq<0DV zHT7MICnXm9xv#)Gv1868y&Gw3@|s09un}ORnc0}E zuMIy@`u6SHkDECCLO>JJ3l>jY3?KvT6C^*Om@ zf2&}CG&K+*1$LHpQ6v!Gx^_PP{`XW?4Mc#R^T;jXe>2#<&Zi@o$m1lF#nw3o3~q1& zBH1B6C&1-QEEsI;4PI=EI^9$(oW_H`1X~z93fRwerb$M+Ij>8``_o zNj4wbjJ(3Caf$M4LK>Fmf=lSeSB==auRTOND|+8;u9eSjAN~z;vSnWW3A*&yzisk! z5llIm{qve1+txsEbav_i=0)T?W7V!^78V*6WUTKq;VoCyKhoOF?D8 z;>B#>jmqC4)>rJBs>>jkMP&eY}+vbERDO2K!WsS2Gfs6VWuAS#W zdC*i##Mn(`*w24z+1#XiswF@W`6*p~f#|9fPJJ`Jir8xX%dP4he3UbK7%u+mg~c)0 zAENgHY%pS<5;V})*2;8qK<~y-x{9L^3SCgHEEB*IfZ=!GV~J=j03G zBMnv@8eRqiU#g&4>12P)e`{Z!jx}14O4|)qaJ3F^s_XqGK-AWE#+N-Ewo& z1<(z3>FseV{xd4$*SCw?p&9W^i=$$odRSw48n1m{R8H<{aXi#uh1X?%x|i8;LD26> z5~)tJ4rAT$YA32_xQoS6%~_%kS1{LRA;$Ivv|(ix9Bvpe`7EE)_XlFH%*RrUO)!7n);OTKq#zSj?I+5zbY99>LGj5b() zFcsu7sQWRJPk_Me?71J#g5-(~`X1{Z(Wka%DSf{}B=?3PVw+*jq1C%fga32XJLS|- z)`y&Eu0SpDQ*|B$)N zsmgr0QyGDXtbdHB!Ta-2tQLEx4DU%3Dnz|N2$!w5i$xkfK%Hp4+|cZDiu!)1Cu=ZL zVE98KdGmc&KgNG1yVJ2j%_<^lR}E&p8&-?OlV215L=3uuPWym;9E;w7U9qw5ZvS=M z^vft0i|FY>!i5q1+ukAI5gn=wi`om{G=3n8pIQ&>n^tEKb@cPEDFZGGm=OcI6nq9$ zPB3VN3<-JA%4o&3cg<#8yNgSLz7ce8fzxn8J7SR(lX*`@PK&O>-}L?<MPz%FzZE|I{fc{kV5#i^6|b^~_%A)u zOyY$G#9lKhra1v2qwqK>;#Y|u3`*oqQ(KcFnRs0^gddOC))9TRD<2%X686X`=EC35 z{1eY=@5Kt=ivgeRe#vKaNReQwxZlf5rdEQ1yQCYHMCT#=cfLQ_oSd!L^4leUqmyAE z((h3N3Os14gX8>Pmx9{j9^iG!iGrC%jW<=4lKbP}13iux%501&K+b`Q6wVR3M{+4D z+D+O`y3JT~-VyvHn8~x6y7lj4F#RRf)kj5!8|GL4;h^80;_f565Iq>Lv6mT`jQc0~ zeATs@3wVv76LIbmjNw61=fVTjp(+9kS;DZ z?k}}5UnzLWEU#4V@}@%bwA4EN__{pkBlhO0UaB4i zV+}iTY$u*>ZqJHAdxHXfS^(2-Ukl#*U6m{UUc%P#0S35|@w47wD=&q<4u5}OBdsE6 zee=44t&~8&ZSrIl&Svq{+_DAvNd7e_B5#6$p+BG!-SY$tl7aCd&|T6>x-Z5CC=;=7 zeCeHB`7JRBKIFnzJ)?Ayr@Ex8UB$`CiAa}$zv9Bem>`d!lom{Ulj=x)S=k;IsIx3i z5UhftR=D*`Hf~6bSPvrUA#hf&#a>=8t3%yYjdatyM+#drCu-lQ z^EP8Kj$nK}w8TRbA=}w~Hp=p9(at=5#~!zs$!Le5#`E%)6%}-EIPrsBtKPVhs;f)+ zrl-dcScY%F=oXlb@%!|>*OImYn9+a!{9&SrCQjWf*L0o>9bA?J`t)ThfOIfcCzRHy z`?H7BklV6%WK3+ZYkrUC0E~wM_kmc#m-)iu*Ee|TGKjl-<=S;NQ6Nx5gi zIrSc`Hc&~gXf4IefN4z3cQr}1BGA|g>exz9HdQX~UO}juuY;MzCnVoz7ZN)z`#nGQ^i` zj=zt%z_h{2jAFqW&yGv`ZQf>&1r77zu`w>6x`}5Mvu1rCIl0V4x-uJ`wnOKn+gt^e%`(!#F)TfB@{+*H38z`AQH}srL{{HZ=^_gMPn3bRA zDEKO;hBsoSdolSrUx1&GQ<e0cy}FMZZXBOS-=g`AT;VZM*b$H#pk3SUNk7 z>-iDkq^XbG(kc`T6RBAFR9i|mntTudQ;;B92I924dP&_uTNKR71_xWvFa##>NQ{lo zFOF0uT}_L@oHnAs5svZCPJ3qWGRZ zBhrtb+c?9f^U{6w0*2%&_B2<61+zcb>?%PI$L^7ccxLELku;zggtANt76!q6GMv{r z88Zf8dv#6>Cc$+|mS9?m=oZeq`$^ro95Bq7&W`piSjOIB*UHNncg5zgAbdW5PgP7y z1HA(<2-O=;L%}csYULIRO#bS>W$~%wff_WGf8EMrwFEN(N?OJ=C9ejDa%OL%1jE=* zmejt4zF*e*LYFxQ2Ib;G^&1y@#5LIl#?*iQGXy8h<4@WK?yTYJGW`H9c{^Y|h{S~Q zE{RLBskQ7n`F0hgjZr$sr6O2^H9skQpa_?`@M@iVAz4q~>|9azIo+e7Lww`1M*h{1OwDt}Kme{I;#rvqC2cE<+JH z1=nru%EywdzB~tD>KqWGCF*~lv)kU>9%PY}Y;3)5-1{ubB-U;xr+R{NgulRT01V^_ zeG;jD9!YrX1O}Rj@~K`oDcM9w^2z9Wz=e>hWk36Qs^`DWyReUVV!nr>{hcBpU0_qY zF(uzTQ*&4N*r_3!0J!1%+XtF+zkU>lF>5txBQ>xuxLjAh9=bVmn%7zK^*4MQcfTk; z3(wBU;mP7z-#6s8mL%)i@b6abtMv|vImo_LeN)24Ij8~5RDjW|f%~mSXVsFxH}!o36#M9AhtYvGZ!o1&K>!6nc@s5w@#%29%GPgvikyF8!W+V32}y z;&1R84?E>eVV0KT;|9O2H$N5xG>hpGo#BSF#zsc7farp9B~&TXqrQ5w!jzn+EE*4H z5`dd}TM7)BcNX~in}x?VX3=Y!>(AC^yMnF=Vfri_3@Ey}xls+f@B10CZoG01PC0QF zzqSwlKNQZQ*OKX#gyrb=cH7P9w2F*WkM{QDK_?Q7{r#v)(k_Cu4{u2litCW|xBDV| zT>`beUV5)|A}wd{4%&>#I$;;Upazg7+L*MH?Hn)XHN|D7?EJSJMsG>5I-(QRKG*MD z3NTcK3NHrsR7dT8sU3DQ?p`8cB6;1VIe3c+kXf+ZpyMp|N*`EivxNf!(#2qKa<=-O z_&G4o3>JP{?aS@2Mr=VUd|>V?u5h3rgJi9%8B8K=)&{`*!Pf5f`U&2Mwa(U=L)q-h zC3gsQ<$Ap`s|jhQEYFpFJKR!f!_@%f8W_tS9S&5H>{0uPzunn#!BPRX$+fBMcX-jd z#0f^%@K9SYSWxK`TifmH>YH-xJW|Cj2VR7eaFxC%_oP`8ZE1V|lh>EW}Jjf{!*B zHZm;pk1T6uUgFoS%m(`CBMkg*={;&{zc!AZIG_MJ*G%>_zzEBh@O#zbt0k|ws~m*s zE$b)KBYRrfX6YWqIypKPH>6P*+{Zd&GltQtyF?5(z35=~v;l$5GAz1SF;@+Sd0OXy zxIv%~Lr;0N^QU`WUHpmPig5hkzpB$S=N;tX__gjG_oW2C0K(T9u|rEEv*HBZ6DTDh z@ufPjyY*~j-q<1Np_%>a>+`#BUjo}zZz$WtTes*lNBK^0OF|srB zl@%hJ%9b4w$=-XfBrAOF5!u;fWXs+oLiC>Z@ArF;<2_zSe{@iu`?;U{x~}v5tg~z{ zpf74a!2iJwE-o$3p6Q76LH?liIEdz=oJBH5~?uKNjNkNWEd<`AnU&_FX!#f61&~s4Yyufk8Xd& zKD3!TUY~c!>wE5!;_hJ5dV>SsQh$}#QV|g>E2{6^H>&<5Z6X)sBtsCUG9$4aEaD=9 z7+`>w0(xlZPkNU)H)TfXGcM{@XdJj6N^-|9CPkMw_>!dSijd$>hpXoMSyyr>p{&$g zr!3kVI(CM;S;U#SE@90~uQ!Lk%~xuCi<1S?resN@ri;X$7$Y7?%sU~b$xhII-=Gc-T7U;N3j}l?DO`yP**7wLZ^)N4 z7>VGB)EdRoSrHW7G*W5@h{)XwVpx2sOq+}s=d-fARXG9|kbhU1+c=YmD9 zrd!BlIp;e4LCffCr^#T(mJzlJdm>p&_3>}Irs!HrjjJNM-XAdr7F zRwyBX7t&036Z11H1x;wZDIM9qCiItw4Sz@>;U%~&>rNxa(I1-1ncWGuyyV7W^|O;~L#mh_YjFItO@dGs$~1i{E`=ZeUa%nQU44L< zNM;Gy4o(BUL%1hxa&MlcBm8c4VLj4UdiF6vX!r7zudB}1t?l;TEFZ8tZ)WmY(a+Er zI-;V=QAdSrCyd|z40A3EWE2oD6IR^7zn(iI#;uida_)_2h89_;2i2H)N^m=<8lH>x zK8CZkRT5x#5&>||nS_c2ak06QzZhvpm zrUYx8C%_1E3?g<|wFoK~@HUM#)DbwJk3O-$vnYjdHasX!lCBjf@Nb0T)#*9e}KPy5cy4_ZF#NPT4mfFQ1xBjnQQDkY7$=jJ5q5LFs z#!S~*E!|7piS74CW$(F9BmzXQS0t3hl{sb=LQxW{U!tnHwrJ#J9zv0=nqAW1c=281 zev!u#t0qUR=^)BWmBA0+En%ukmD_P)MX=P?wy7gUd39=VPUAdBJHu5+?buuV)y*Ju zQ7i6I0}e{VQI#hlH#+9-V6e%8Qv6orMdY1Vo*fpLZ_h zL_(U(y`;IZgHSSrL)w#6A;7XeQ6#dBZ37r~(8du9DKSp;#~Jds{LE3=hOg>if= z>lqK8k%T|1HVB%Ev|6{fw+UV=q$Q`}mlPh!YC zt&gd8W5?)BL0r1_!vhb?`d5H{7xA-&S5hZLn)4?zSk-?$cox)XMsn&BL2c@9I+l$? z+vT>Kh<826DJ{HF4xMw1V~{DAw0!+)t1ym&nxXdIUP4$_?SAAi(V;`-*Brf~XC@}n z^=w;&>+RR;=egr;awabSSg|)K1BpM>A8V#xxm83eGA$Hq=vud--eI7Zcv3jZYM)Wr zc8EtnYNz$BEY~zwgMv&13ddg*MB+YD%u`Sb!QKx$ZAfR3uw~*d_H|5vGoheFD}H!) z7}!wm(2Oeb6#24^N~xC3?qtJ`U}WAB_6N<*7>7D&Y^1Ra$VNFA#XgkvKRTTmNFI6= z)qs+hS7xwq@lMw>V*7a5cnz0NzU5u_&M}y}6#D!YliL_Hm+fje!eRlS2iQ)ro^L-Z zUx8hQ{#rL3I~yBoq|$nXl1A>Z)mm~9)vMa~F_EayaxF9nMRZ?>p)(uqkTtxhQ=JdI1gKNW9mJRrUo)VWEObFK>fG_SWMKO}qcMlmJkwl4uQ$_Kgb}O(h)X1^ z%yc|2{xY_lK$gCL)D+W`yF&b0W=9*jqFN~498MvN=(5A`?}-!p!`S64aHi8h*Ad>mOwB5yt-Py`UZz&=(b)# zGoKK=QxL$d5dn}TJJCuIbN)d1K3_>@gR6U+*ej0c!#&9|Fl2amxY}+W8*z3y4yWD3@No~x_XiVRFBUQWc5ylDlgy&SJuYL`KpdkS zbeuJz!R5|#fMGyUJ}=ff&kHJhe@o(Cu$6&JYHvB&wwr^x`GdV#7Nm2vNzNb!=Fj)_ zyZyax0sllzLj(SVPmkpqlQsR)KG8mdA_gRrpoQ#am9NBglX4iY%?gYr#pfYncTGF2 zU;Slm;-PmZAY0Rjw!CSXsqLcn4vCw(YRqP9M)?weB5@}-UiVWPY4LpiLC+ZZvPv}T zhQ+l)Rl^-P&KO7$1O6LBnnlBNF1bVWLzvp!|$k3rF;pM8yZ zAZqV;OGM-Urf20qL*|QqXz|NM=gNEi`8W>hL!m3u&e&3FV@nP}IsYi>S!Jj-m@2c! zI(>kn9Ff3I)w@x}nSrxPDWP?s1{!wIA6!~S`=EtC^{pQt$N!ykxO2GgI&4`Aa??W} zqr0N0J<>bZEtbV&ABM*83=Q|1m%A>8-UDlmU>5g4W{`L3wXlsQo64ll9^i|C$Cdx+ z0m^{4?8`Rrj!sF-ao~n%8i9>ve#0sJpY3Eag7CmDfx6$R0m)ZIf4XeVY+HSOq)L5g zxkeiyZsgy-*Jabg@gS~#J!-;91hX%)!;%|-7Gkhaj^mqeBltMBp{RSmkYb|r9p)ig zLM_}AOd6b=oS?l}u;SUDjwIem$l_V7B=3FXGW)@A)B^DYTvc|m;vz!q^j z7Bu|R>i%L>_?(2^4w1y73JK*6paZ8?^DSQo`~Wa-D?y>!m9lG|`Q< zl5fYV;G~=-?j~P*w|Z{#{<4pZofGyS?O@HF?+VI8qUKAy!heo!uPvCxlfe6i;$eEDC$*#^xis(&&~)O6|hbiChbG zPB{rqZoJv_y}<|WT7{26ykP&p=Y0E#lctlnG+e&+^P>{MQ3g&ibGj4oQ+QDW2n(pL z|7Yx@XpO~FT1&ww@9!1-(%S3LYXaa1s)PC5%0J3#hB?));t)p=QvCe^d|HQRer)HO zx$19}1nu_N6`KfQx0a1Q~;3c99`ti6Zchti*J@uG_-H%9f- zzF&i23dFCT{b>Gog?87@9I+aUqI5!+sXzU2H}<-NGhrV0jM0(7h8RHnh!C-xhbep! z{ES&SbCrWD_LFv+BLsqdpzBL&;^E>_xcNR3r>guQW-8ENv53l1|0r-Utif7iNB-db z0B8H*Y>{bJuMsstzmBLNAG|H*6*@x z$_wq!_||At>L9hl^fkQDCSkaTz8d)?!N0GTl0yG9+DA|L%|)^gIaxkPy)IY?r?Y!g zK&vqvW7&E|En6;1uC3zLab}A5xXnetWC`Nw^ROsw_cZ-A$yv?De0+Zi$U>(+s`6WG{0{D!?U`Rm zReq+eP;$#WNXgeoUWDsc@QsGyg}+U^d4~jJKfZ>ZwhsyGZ=iGpbnrV-&fv1(6VR%` zG7-i`5BxD4;tz?s^VihBXR&P9C0~nQ4kz#JGzele1kjxT7Jg!tYu;#`Br}SWp3DUG1L_n4^6O%xo3Kl?;&7~J{q4a{O#HD3BOdnbEUpv zvOweVP=%_xMeIaxuyIbFr0YM<@@TJb?&|v<37+$;GB|*L{Y)yCa)DyifS)}Qz&^bq zHW77wSBc=&VdjmY&s*rK~#tJh;9BoCG2q zpVP>d^edomiZ~mStDoHou8$&o5@z-y29T12Z{AHVD`r#n0_zoyUvth@t0(G;*Gwz( zMo^UoLXUV4 zlD|QWKk5u4b*E+$T=7Ro|9*R;r>vqIE~NQfGh5Vc&~_0=fmBuXI-G$ES5G{} zBY#AsXTLE84Xh>pU5R1q_rsP>;g=c4&Mz{gl)+C2qErwBEQ2j_=ciQ zv=m&Kfh}^5I=C&;gG^E@W$seH$BVHzs`}Sxaj0@~BAdOV*;Fqe4+UKJfHe=@OX|n! zAZ?018(UibBQ92q=xPKF5q$yf$Oo?>~ZGwOB3L&zl$xZn6OYxKYAzUdj=jz zgjb!J0W%82yvBrl{1EvJIdCc@5aPDzN(J(ag^&I6!;DEhL;mO0it^yn?ae#%KJhvl zF!%@mP*0HxR=a8WL?oX0&&KikfSC8+S0H5xl-_r~WwgHnyLhu?dQaXm`jRKFYM~~v zaIFj-yL8c_c?`Hi2DeD5l~YqvV!A?1`k9`U=X27t5B|1)!0PptPh}@`G*<7Pjf9+0 z)0^O?``om-duJ6Cro#*dXrp6gXT_TD)ITt~-g4R)CZ|uH>5KY5!u=>+CMo8QEv*=% z`}FX%xu|7dwg+uY&qcb`dS0)Td0!GO|82(Q8A{5Zo6a4IqpND!M@; z+m|mN@)2ZGiOBQmsL{#CDhxZ3I#ME)mw`d-=PlU4k(wCvG$58{=BxfGddK9MutCX~ z{1vgoO|UapJj)qVDv05>7#(oCI`kM_oAa*%kL4Zu=zg?<`_sjvlE2Hg8rYM@3&K_M zZ~awM$}gp!;I7AR9cyg7-wNX0PCx&+=5NJxma$%m-^TLGNpMrQ8#)I-JfJ%H^y!lk z;PAh*!G_x<49rJer_2NJEuNl=00Dy);!gkEs1(GJ1#su zX^g?_6=511B$X7EHjb6~&-0lQ{EgUj*!wD8_4M0eJur_MUaMd=5auh}O6QXkyR-ju zQwd0Ha1J8L5dh5$@v=F=%d`o6*B5qMA2zr1&CZrV|H^rLGV;y=+Z#;8pd1eaVBrSO zD4`e=9qMk|a3t7;q`0W5dOv=Dw^6yjNp>M@8%5;;9xHwW?9rF0;+?8l^iy%BvNb;$uQM!u}8rz6yiO0BnX#4Tbz+L};y!Yuj`#1O_ui(g2c6XmEhDg%;P1gXD^|1Kp_l zZPsoq*gEb7ex~aCQ*W6Pd2*f4_iO)#KTUtoy~LGnueIYt)c41Bn_Wt^KC$;0ZXe1d zvKkeMgh$Shx#@Ess9e;|GFSY|x}!jbG|b&vJYb83>?0Fmz!Gxzt)Ne*`*<+5DNBJU zqq}92j{{fYSA(<=wYKeOxQl!F_GG>xP#GkQiWd2j>uCjX{LHAWs(i`{$tA0-5^ zitH-AWO(w}n=b63+fbi<9^WkQ*@0Z2n`-63Kku*Y)=y-J&=O}&Jnw#x0LfxdDuRlZ zG6FGDLj8<|f-4)^u%A;z{i}x<&?TYXafL1>;w84R9JcuSvC|sRgYlmi?BEFc)~zhU3o_-K zt=Y6x#|K;t!Gohyl%aQdE~$N*WsVuw$o7D;-t%`Wa{##eJ0vdbr)r&v0C|9D1lReu zG^Jp*0!#!V#jcgYn|?^XHdMI-h+OESg&|2XOu|ftlM$mBeuIIx>`)$ z-S4MgrOdmt(J{bJL8ue7m#eK`pb2K^dr`MDWmNB}8!v>Cyat66Q2%i3UI|tIVWo?c zi`_a5+AKNW8&mKplamG_o_4TZ_4f9@+8uYdOOx|cV)#`y9059SB&Fyx*BvM0`BoyH z`s;#!Q^vKlv_P;TFH*Obg@Z{{Q&9p5&q45TxT}qW;7?bcOo!I6PLo1hW9+S?! zv%wQe4MI;L-*dTLeWC~o#$#um(!oCPMgmNhyRYHclr!Tb@bOi!yc|ymQHho3;iqxP z4+04v#0DX*1+dD%=LxYTu16CVV4k(x_Q()}UfgK=+|*ldK0AqVmucQu%{7U(VE6T&IL;yUd-@()KWa*=DqE|4h#43>Vi9To(ofI|e{$gX<>>^u%)wFUQ{ z4$Ja!{aEt4-cKJYCx6+3RLMQ`K&K;hSSnqVK;&3bB%+^;jv zj2>o)i(iza=0J8HD67F;l0E$r0dOH@;p%Dr#Pjzv$2_duy>dwY=c*p>?mcsf8wa>GpeRa{WiHkimk7xy_YwnIRtC<`80fa4sKL9+f>7Ua(0`$jA) zAW(qB4OkEu%FEU`87y&&1f${f@>X_)(}dw;{!{{@#x>I~AH7}Wchovt+vryH_*7l- zNlLpEa+BbxzCz<>q9rz&JkJoby0GIHGJft}JGnY2nv1W9|MqO#>4;`|gkPy>|3?4A zJM;ypqUAE?U;`QnV2P3Q8dL)%ji(>490N4NzW^3SQrxYsME#*uoZnjXe#7wLFiMRT87UQL6@! z#|9=k)Y-k{_E^S0RvBb;rg~HbieBzwlfiSBvN5t=*}8(QHm$y264RJCS~Feh&cl99 zCD?(D>9e~W5W^sxG|9LD)>*{r5AIipSv2%MF`KHk$AM}QxMBF0;8uz2QcTC`Pu1*x z#CBbkQhsX47xaneXo26bLeZ}xUA5r|iogg#JFbHoED@~SW9e(eG>fK*+zs^RUkkKzzJ@q3cL$7%M+eb#4?s@Z`?f6f zwOaRI9BCfg_mPcv_RsRYTlN;Z)Gw#1Z@kB4DnD;r>&_bP++fOoReY5gKqsep#QF<4 zJ`=P5@hOPG@9>OE(sK9FtLr6X8mGAsh{<#Oa)~cDR5OR%MJ+xWhh1}e&YWSh;!=k= zk<{&bBW@^?#`Y+8FJ{-5kep-ct1VaMlyvy&$DIbyFN2}`CON?zJV70&r?tYj;skGB zUH)|=)jq~#9&Q=B{ViAX?UG^0y0lt$sIl9c6hxe>;I-}-KjUirRb-o~Q25b*gH1yBbHMY5HSd{oCkx1hLv`K=I-f6iPB;O zJ>2%y?F}89xgw zWUMG%q%}}M*oWLt?UVuC%OyVqtaln=8?|m?_?wg`vRb>gAzzZLl6L5U_CMOlVqq0+ z=YtrV6Lo+#yrA%0 zYRTp_vj`|8SOYBq&>+mUGGFk5mt1@=NDa~RBT;`K<>2ax%FT|Ci!*I|3*ZZhH3UAw zA>x!`5#S90XSo`R#M+Pw4E3ngznCkWS1z-JqE`o1!Ok10(5W(-!hlbP{~GFP&@ed3 zn@A77+SMlz`W-s$@qDrhy=l=q^@} z>MNs?(rTGk`Q*amYW3tuQF5zdsL$TlgWW!y;~!Bihp_7YdZl0UPCiaySc9W(I-6`J0p3| zxtPL(FEltAorbR!2^4E`Xy|0tEkS*ebqH~EJ=p2`gpl1SS4^?=%-xyLE4_kETf#Lz z(NMT}(LDTe7>)QQ#9AQPw=>t#0ngPs`E5PQa7aGnQR_7JEYG7k>i?pL7&=fnYWR?7 zo6Fk@aavL1yk6GU>&-HB2Y@e-PCVC2!#OfD_iy8~{#$=;8w%2ue%g-f{k|n@{ricF z3sE^^w1TBy_2{P-TUZX9oCBVuXPo<>^A*LdVPEY$M7)Dc{YHvOAYk+Sv&h_mNL>^` zO+^?>gh;p+8p;ap=L#RPL2x46NEE?XkKiQj_l>lSPXWe+_OQ*D>G%T{nwTmVqfb$) z#uve3!l}f>Bke980oo&n6nH)@DM@(^M2IDmzvJXjv1O+sbKLElqne>N? z(7TIbB(vMUayrt-7(_mU_#76SaQjc|ZFo9c1s!$VwqsOWXDZqpSNTRf(1{{Fpw^?FrN^>P7NjD-jrkim!0Fq<=$k#HUgk6yctP57`^YI&mG@sPa7qGBZk4; z%`Ja^*c&KwkO*}5Z#9U2uAEJ|-%+I9W|Ki>4V2q&?e}wnJn|pkyvI?IMdE6T=qF*R|M&m((cAX69C*2pv{PGuNAE4KoP*3J-WY z?W2Iw4o9G8z)%Z$9I)X1Aj}L(7}zF5?_$q+WhTkKs#&atFftaEq|Pb_t~HsvRLUbG z`xAqtmzv^BhRHuW-!5hTvc0uMC`dsXxa46;PL}-haQvo0e(tm9hPh|4yVtV68~j6- zBMb_}fqQx?_6#O-kq))ZzfIrf(NwI+@Uj}}>PG&A6pR)qo{@-2=HI@|&9KuV!I98W z!&(J>ld-k+!|z(z==akdtAhLmZ7uk5-PR;`EoAnk^@B!sj>9#>%U*iulL)t>;kMaS zz6F;d(L>t+GMSr^5nsM3-*vMnCiu3P^FbVnH}H1I1ay4{RW1^Z2j{`(_6a030jhKG z)j^Gu+cH*c&_Rq1UxP)&r1;Ja6-GUze?*b<9*__VpiOU*5 z3`0hKLF_^?#vl53-x;zsEJKyr(caH*IygnaeA^_%wH$aSamWM9O;V)^Qva=*{iI~f z?5gR*C*$uDyY3yCLBFz4a3Gov{io;3EARXm$yu-U-I!|_G&kY#FdP0-=!rKE4Fg}@PeDb;TXT1 z!&vjhf&8l6KYw15xVOi=b&#dfvR{vj_mop$8FI_Y_tLk<1ydu|{qM0XKCj-FS6W!I zkKUoV0o5inf2KmfiGZsRA_6seOaS+%xtoc7OO^^>1?G)H zB7a^9f2(4e@b)fsFm!vdmzdnjp<+%l-Z%f53UV2As%rl5v--rjfeow3@`YdP$M`)# z1625U8ksUTZ&(MwGDaYXc6o`@M-Zb|e>kM%dj4w+2r1T}b|y+Bfd*=;hq*(dA^BRy z;{`aP@r6`kldU2u!a*{V!mG++xn6p?3odm?juhalkyosx%~$jU-+Z|as|x-=gkQ`; z6o-)%O4>H{9rlq@9F~Ij+kQwfj^}ITQ^M)~BLiiXczV2pX&9BVD{k+ob7%_fWEN? z0ymaZn>@T@Z%Bz}9@0DbSNl^%*(ci|{RwJhz+V;GA{u<8XUS6MDrV`$$?;|R^Krvl z<|ylzpS#MCRzQ7trR5p!n??VmU3)qI6&db-9QT}fE$2WBBP@*8KRtm!Nftw)=;`K* zZqjcO>eTt14da3#e@d4v?jNCF$j`*KG1Pv52suE#vea*P`e#P(Kt?B+*rGb!V=t%u zRh$Qo+?D*)_dD5cwXbb_`~U$kdr-YAJ?@4~2o}yWLrC*L?H1X35kbiWRD#BQtO6^@ z$!=s`#ms=N;p!9U&>(kDxKM%h=05$yR;SpoVtWf;+=e*s*8?P>Dy@pz>hGn={NEBY z$UOh85BHHmg$$3UoaF zZ71PsnpWIU$wDUQQGbddWjOom8H#fwy&q?MpBo$kQECzANGvXNsUhpPy|CX z;F?%FR>vhhR?2HNLg@CRe_b_&LB|9vg08t296rEfLg?VDotfA$O&1a46oz+aRZm~l z9jeiDoi(ZwF^x(Wn3)250=#rJ>pVUK?zLB?X;1P_XQ0%B(L*kotrJ_3~Qn*xGplJO?9bnMiPMfm=&)} za9C+s=HNC<$ys7&c8u(CH&uxKZ=(g}XKunv|OHoOP(J|KYrT)?$M zg5on{V=RDTkz6zurQ)E6xNOa+5PwN{GZ3}m9Y@Hu-+^Kk=pKm3ygqgddMAE@d-v{v zEmgKkAs4&G+$g5s{OCS#nJ-?v$T22;BKcW@H(0>w#R|GS^>>2zsHVG<4_Vs5O71+jcn4 zsB?*luu!(m>rOYvO{O?fo0EHFdi{+eM7ofK>;Wa&6%|18AX^ra+#zws6CX4`a0q!~ zLDdjty7(N7UaWuEeQl4ct!Q<}A+OR=&HiQq&Qnlv^ zKa&034T%Kl7l-*kPS}PoFe9O!tIT=UjxO zc)D|)HODW|&ups-1yeAhEHbDhaaEzX0Z|`oQetv)D{{JlOtSvsbc6pSTEj>qj$+VH z_1*F0%u64#A89*}45@=;{PT-6IZ$~0xal-u`G7}2tA?cJz#x*3YKgSAm3l4g0uNpF zvx?-C6zJtSOPNyfb1nYZ-zfBAJ`IST{pkax4+QPiUr`0g+2a0E4{!$LwZ@Pa33w^6 z2+*g1@MGZ|`-~9xjagIe5=$%lzgxqp&F&3pnjhBW=wTbSB-WzjQ-s9v|Amgj{%u_; zl9dh~>uRU9C^UA~jr`}&p*4X6;XkTDAW8|Zcny>7yR@$fIPX7C!Otlish|*l;R*h$0>l7Ja@sqn-@Y)*3+9RS4| z%ni6@d<4P801y0UU`l)sr~(#hR??887}N)#CcEWnC6v4%=y*$OW)2WAB$)3%A8cJ+ zaG`by#Diq2--g_3#nnsh70Lvw{7M(Sf46#+e`psDxa$D)3O9QG3skd>ZIzQ_Ca{p>fGs6N^@b zN(~02eBPH4N{)5@qLa}q6oKy+D8F^$;90uFno3WNDYdkOq86sLz}*CQ46H+0p=8Fv zTTkKyH3pA_r==IX!@3y$jpSR-?0aZ&yV8DCBIjDW+m|EdkC-*%^hftILijX7qq|HI zQWOX?D7+rwef#ze8T-LN$^to+66g7U{k09M@u&&P&pFG=t4%Y_bGT<(N z6WpI@^xVNpQ`5%Ni#+?n( zn`^`$*TQQ;jzYycN`|v|4POK!5_L%qqjOFi_QS9#kLsrALo!nqtah@RYn~A#t z{zJsH$b|dp`yONj18M{r!CVe}@OGe*{lg=AyR10njZ=VZ4!%=X7cEOB!-=+mF@AkH zp)lsT`}bNH`ul1kYVT<42XerZjINbgmc)*I*=`6s@OR$-rYbltU1k54ygfl0lkYMA z=-RXquO$}ylgodu4{W@(PvPUr{}X|x;{5k^*|RXDaRDCkLlxXeBRIjqzLmv7XQcWu zr1|&Mq(?UDtY@9vWro>@>qZHU+}1DmgruYvK!ah}1rj-1qy;g$pfu(G%tgi`o9a`= z+_4x!b+5LyzuQ)YeLUJkOj2|f-B&o2j>XGAJh?`!HOou*B}9VC0*(M6g1~5sAf-z^ z@f46G-@gK}xXAtk@EL3npfQ+l_c^~rz>K=s{arh~-O2Gm?=-JY$i7ai@K!Vq=d^+W ztR8uh_iGNShV*q@Y99Hg?)$bF`%tB%J-Q*IjlIm{k}3^h>Q8?*M&bc};69KeVcBxKS7Ty8xfT{(V z7eo}f?zPb%GYx<=fU?yDu(h10pWJbH7iuQ)SwlabyRU9J%Ve6lH#aUv&4d~HHoqLH zdCfFzb>O5HJeYf6q?>JU`r$(+MUDj_v0kQwry7VGK`IZKUeKHmZKmUj!XflCVv3!U zi!=5za96bYE=wl-dOYsk_H6_aD!m0#dGkv*%YmDMO(3gktoufoqI*L=1;o0r|4X;q z+U)&>`3TPr*8ueZYd4U@+A^edez((I30?vDWcT;^k#J*O9rE#~L^co#0L7jx07#G_ z4EXb7otkc5lwzE{57=`N0;ip}{?C5T!XS09EZ;JE>WGW)@Aqy}qo!S{c-Ki5Tv7S_ zzu(Eig&yMXc^>UZ+!ZFlqoFM_%BJ{yP>6mRld`qF9i~p%PRrN5iBS>>J~NvDriI3@ z9;DFIt&mO$>8$Ws!%_Rzn^s$R(WTDJK)875GX!T3@o*=7>EXqzr-2Tjt70@u8u#I(mNi{Jq{P#OTtdr*il_RDeFhkKM5Q1j38UM!Q(9#+BKPCvyUG|+AOAoy9QMegUD|k- zc?|Ok#XpKTlDAO}JRCMYUf1XwY#R74d8jzLF>DbJ>_#&g?T0Ih8a0*RNVLx8Pqf}T zbe4!cZ1KQi7fjd;7;XI8S8bYo^r7Z%CJpN6zl&dg9R)$~OGRK=b3?~>VfQeK_lC`p zlP}`xan7b|KUfYlI948uh3U;zKjV*=mD3B_t-dPAaBEwzm?8U&$+vOvDA~KoIQb}U zwI)p9qYLQ?nyzHn_1g}065i*7vS!gI{F7E(jLud`ijIFY>vKqU>dSmq#X<{?C?EH{KokIj2sM?8R3K3gfMAP#*Weoex=W_A@=6n zp8x+danSc{&*{Ja$HWeX8^R<&Ul{hMP6Si@{=eyktV0Z9Wp?!j`0^xyhA_bqB^-o? ziG|3_!2dfc@P9K3k!!*I&-ZDC@sP=aYYlV%cXlCG{_z_a8Oe}x9fltwvl3x`Aeit@N+ZyYX(Xd>InkJ*V|;{g?T8#@_aCX~*g9z86!km^x}0SGq(;DOi) literal 0 HcmV?d00001 diff --git a/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 3.png b/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 3.png new file mode 100644 index 0000000000000000000000000000000000000000..6fc49f8131856c9797f65a3b631965dddb59650f GIT binary patch literal 35634 zcmd?QWmJ^k7Y6#$D5d1k2q=iuAX3sGDWHgy^hkFjIg$bch)9TZSagGQ4It7I(jg@^ zlr-EkzyDo#-Mj9W`~A8`9dX{7^X{|Hv!CbLdm^7`DwADjxDG)OnW~DSHU!}qgXe#U z3BjKaC61cFU*aB*^gW(ATX}e!yIDdS<{mB%&K?f77HnRYZtk|uPWSmA@C)&>*?4%k zxJw8KIR0NR;CFVj7T^(KeFWa*nv05oI|PwXVSnR5scA3>ipo?~l+*D^+iJA-$@}fe zay+Xho9!k`$gr07$&bB=Tjcyah zy=v8zccT^usg?64(iGi3iD$d#*2g1gt|uaSbh7_Wu|D5v{pt(}e`m~wUo;MkXnE&e zH*GkYcAwqPaF4MxaG7s;jYs5%Hy&t;#3LeSl2mg+6=R=l-eSt#`0xLSyVfMY%UW65 z;NQua#9T@v{%nY$bO*}|A0D|o^p^Jgq;BW z8ICJ`bjnrn$q87(gQ->4sc1hR0dU9H(J6vR0ledJXc0QaImd$vj$BY@dxKuMXxIQb z>z*ktF|HUH8}58Ffs1{_KpA2p?2IcX$&vu)+!c~>M7;`3?yo=yqSFX4J2>a~NGIqX z6lk8iBSQC$?Fu@kMzSMIzLhk7YOc^D>GGpYvJ?+W4T2=`*y?+%KjXZ|`85c8`+H^J ziV7J-GJg+B!a0YK&bY_>f4yah<}DAeDj*a5w>W|n^Ec>8=QsdT_zQ@fiEMO zU_Ql{ycwEr7Ad)+fux|z=lc@Hcs|+}#na<=j{ewQUJX3h$%p32kpZ)0+WCCII9FKO%kuue-7ga{;`_^*=n3`W#K?Se2nUKynVpHcQ zrVdTLYWvg8H9Y}WcaWw|vvV}iz!!uJ!F=C}bu+=dD0JTL7`vQFfXN)aiW4chT^TcX z@d#?J{eA3tDLKK~HR9N*0X;zc6af3)4Wn8jO0qP@!z7nAEh>-hT!X=6haSRCQkno#VH=%UM1Q%nPx~hqL zjEih0V!Jjkg4~%}teqpo#G&9p{V;cb#_?UQUuAkY4TST4eOFf^$RJmWBSqeJR^4MZ_tUJ{cjaFz0Ia1H9Cq-Xp;UE#L~;2hsSkA<(e3bUuuZF}89r&!}i zhU+?CHRFnrXOYDCT~w{qT`BC(UFE&n)^c{6xT0+-!Fz*yOvylU3k8>knnTbQw8~4& zm++~>moaeG$v?H$I1PPQ`bnOQWVwEe{6{d^_S~!6Q_5Nlh+&PW_0Qh$Z$1O$9`5WhL5 zWr)_GLtc@$Wa46Kw%wSBKDNNWiwMVzuJZOW$Re^1v(3}0+x|$US==C_aOcV%kinn7 za`2Ti5auQg2^Ssz+-z@<6vkIPQ(g#wN5!}rF}md-4?H7s_KZyrV?&GV}veIaCBHW&PihZxglkY{S~9lAl>XrRb32R#u-)T zi0a9eW5%3Kg_#`aLY1P8-O|uVpSR+4uh_kuo6I($Ls>l4ZQxY&o;D;|&Xn=mSFYCOR~z|bmz}3 z3o-^=szaB;NEuwS&hoW=@nwXONM;@z76L1rm&dVmkqzSw1E}S z>BpTn6Jzyui`T~*t%UOf9BrQ1Ft)+&zd`5h^?Hj}hmCp9&$GP`8OdJ{#2 z`T6hq^fML=1B2>Rvju8bxbF05u6^f<0jVXrY=$G;Fe0FzRF(Moj0!Sw+-nZDsvMd* zKe`Lakj;DVEEqCziz(I65J0RDh8giwhyndih{60ed& zE!hoO$qHg=`8-6-rwsWwoa{P3xH1t&`3gFv7EfU zD+sE}&V%)&=C6U5y-+_+t)B4#NuT`*Ngo1*mqO@5vk`rN_XYtNy$F8GI^e(H2Wbc zw?z;sgFv0oBYrwFA_7NY2}GE>m4U$L%&QgX6u|p(;47a$vh-32%Ovc4dwxE^fVhK# z+ai8meT&@>MEtZzrxe1i07^KE2%VG9o|W?2leg^Tn){ohUV)$5xjQsCqE2AV_NYkk z_JE!uPzIowJ`MOshl1Cj;NWwUQSgAj_j_Uc$uV$_zNO1KG;HL2GX=g948JqZ^tpNH z{G=we`O<5I<0=825=W2tgo0mHz^%wz1VnPXk`JY9DSpG2MA7H_D-AvL2on+XF%IVV z_sVXjec%;W417HG=;#cca?5ji75w94*NG_fle{5Vs+Ggq1IPb{>_eo$8mnQ_b9@Y7 zQ=&l!fCZAVU7sI6@Q#67L+7|i8L})oGUvK0{?&^c$OPz;7SOM-u5Z6Sd!Zz08y&DZAl(JB2a<3R+nM92#0J9OT=0&ik% z8OUBxJivt!VK%Ct1zc}AgVLM>cj82XE`y+B1|$rL16?sd;7qIR?|1R_UI+Z0vG3|g zxWtjT#dI3YcX=Pu%7qgxL6;&(35Z2zQt}dP#sG9&4}r4Hg-nLdlUJnf45c&&QN+A* zXKXogTso&9V0o1L5&vy9uf1c)fYrxLycT;vTL6v#mUnyV=u!+hSHZgh*bEPJDGH4l z_n%Sl_VO4c9f}}pa3skgKxis?H>Q#-iylaA2T{!D9(mi7lXExOpzcB`_@&VN4&L#7 zsL8@$OfMCIc?3B_$D!~~bYx5C$$Y(rc1reisa>Zd&}FWr0r(($8nSPT;)tK65_1DD zk`k+U?QvUJbJh*;zT}w8o`Da3ku0IOq2uckjBf2*S9WAq_kgr#87m z5g8%|Dzp0s>jPj>IHf+x`+w!iaM_3NrIc^}_CU9BID!V9MvO0%bOFmy~X<#la{|&EQOMF6HxN-C?v+#mu%e%X5@qco-0yaSVuQ zbjm-gJ-e3jLIdcEhAbCmA8Wpsb4AdSc^@rlj!dXCDoM zUU~AHq;LOR<-Wy*y$z<92sbWmhyVgi{Tc(yvW!Ced;n2Q;$fUaD|~&f8F#)UCq65M z;E7^Lbp6JT6ZXV*;ZQ^8GEg95qvWBFKzn&1HPTe6{jK1K_woq;OTNN^-ek!z>MKA(`v*t3z3u zv3Nd~ACGEx1j!^iT0^o_HC_&75i1>?xPcOlxv>;~;NgvnPSIjSNTjCymQg?uuujbt zxB)MsoxE#pRz^BRY1o1=2D8$|%jJNzD2(R^N&ZiZLedzR7-AC&VoKw{;2{5Z*l{7l8GGzhorZ8rOrUu1-!v>p-s*-#Z)|?f51y8$o~7d|2-D~ zk;ATH#Itn7h0>Os$pPnoD|!bfBCp1YFmWkOaPbuob8(>)V<`zq4;}GqeH~KYg1tx1 zY7TmLFFY@dN*fHXx4ha|;K~GN$FE-gvi@~^u$0AMm#`*71tHw+ZbzCb($g4 zz$jG^Z)J_NnwxNDzhv9}S0&<&wUNGtyMnFFM&__qgxNYOG zZOQiWRSfCoOrUPT=##HH9i?_Q0;$a#Zgtg;@5y zs&&f?;ps_p=*#R>!zi6e!JkZ0Wmt4OZ|}svoYEaL(JI%rgz;}RdqVnea*|>1q~Tn% zNVomRkLx^%QIBoRHj-~fcg`RBKK-gwT3sEk{T08rT!g9H7c)gvB^@Ece!AN5*|4a3 zYxUfuQ!eA2DJaRZN37cKGlK^Y$UGx{x&{-0;8wQtKp9DvIM`fuwTMo^b9&QJJDRPn zEq!BSf5apjf&!A)vXDs$U0^fokPS8XC=37G zA#UDY`8x@(!EVj%C>QA}h{^$0vvPCsvY!xxH4sO%FCHqf-_+ru5f9AQmhkx5iB}GN7xqsELHG;& z*>1@C`PCP;fh_~?ytcL`%N{Q)^g$797RcjoqdY4sD{mj)dCE;)T3#NUm`L+p?KUqz ze~vzHj!8*0PJwO#=ENTwOSlG7TuMsH?IKSR+^4UvuglutEf_*jMkek>jT1m^jzl$c zTia{V(a|cONAyihl&TC9_t)fpShutq7L0O|{(6raq=Yd3s`F2(&fH+ENx#8G%mIg@ z8Q)!H0ug#ooaBdVJZFf>A*! zsTdE57zdGi@i)6@ayb~x8JwVZq5ziM9MP~)`CIt7IId2*dU{WJY2gnaM*se8z{|&X zD^cxQ6=7EA{&MrH?GQ*RdGF*rJY=H+V#pZZI8=L2ON@UbuInZ#VSMj0*i(V)$*V2ZD0!FAXBUO9*~}H;eDH+HKqkpt!iW z0JS6~ElrUpQC>Qe?$XxN^SRhPa-pqGyUSloyKl;JYqSeS|D5H?LEX^GxBh{5J)Ga` z>xah2BlLO0mzOO;fXxB}S-`PPNl9rL9;VdQ)%|nca>}kakeLuZpu$M!Sg5C`XNlZ- z@Na5_fQ2Qu*Ofq)8!kbZ7yDybNJ2tlaAqc^zh4tz`_7Jz8Y@yI`{p`KjnkV|Ht*n=rW!=1qpxyT;8KfBqsRWlTzPs(3foJ#;T&k2~=gXOc`Kmxc53^Fr=@bK^;whVhMEsC}U zL2!I_|4lhBfdGL#YOAvwsM0Nn)VA{Utm;H8`^>1_EE}>01=6B*0E9WJGbb%G-uT4} zMsR9R&(3bfC|S<-ft&I7XHr*J2gUJ`bf$9llN&q9u>|hS6jYny!Fcu_b(0$%!GLst z6oaDoOdr69hvdqlUz5tsZ@?nO$2AnVrt{*~9$qg&h+L%~4?c7S&Q8+cnWx62|q zahG9}@_u0Q--yOf4F9IqaL=rL=yx%5sUOJ*LjcamSzB|FGfRXvHA$0_led97WyH?` za=x+An3UQwfhCS;e}5lFPcJ9(ktU=dj-pUm;qjNfhhTN@Wj@l6^>Nyri|#~I>(&`L z)#))D%CaGgi}^p~0uWoI2Eg&Nug@GLH8wIPCbX-;JlA<#SVTtgGIw7Ke;3w-422`( zRqz4i!kC!eC(~RH6O@!hDMXOp`tX>YAqIfP!s+2gt^bM8_H&LQ2eI)-1!YNleq`6(QVh-3XG6hdpp(i|v|9&maIOo=z z5{|=07H_&Wypm)A2?(%qexeMqwzH!Q(kGVr;T<8gT~!n)Dbo$Fs6aX=5+v`ylY1jO zqJPom3TOzpq``;F8B4Z&z8~!e{NVq}IXq4ksVE5c9-Ieo85Uam`Xc|{0@t)fj5nD6 znowfXAZC)yRS$eK%UQ9#>%*iR%OvOREoxB)14)P7t?g|ma3i@UB}@OdrfNObDfvwr z-T-E>Pzm^}OT*7gZ3#l?!(&2_HGmVfb?Fv72RIME{PW@4=(obAf`S5zX^_nR(x&R& zdCk3u!4NNyMr&)}gd`6D+IJop6g`_{&T4y2Ehs!90uD9>aNwb#r~W&BbM4*T#Gs_q z6LLuT)(fTwt<4NZPmoAPMG9xc^>D{-fZ72N4I~r*A|`2n7=R!ZPD=2=5(02OzUs84 zKlH|oy>N~sRWEoBD;gyE)nk2e`C-+p49^`uG{AMG%3dHPHZ*)@&8c#5BT7ue6@c!+ z;i0*W4X6Q3ASr)4&~{oprR;TgcTY)APZW1#T|ImB&_Oi*-;=){Jt4Rov#DK6a=f(d zyGva|pq^!9urSF4GDT3Z0wfC=SZzN)mu&i1Rk9ExI@{9Gp{QOv1DNRnUJP%e8Uvg` zgF%j5Q0)gd9Uj}_M^$gRQw8I{v+zs^8JOz!J~!mUbZ^(T_@HpcmWvl71#sFtV?ALt7U3sv5|9scozo6}R9kvhrC&6>CbaKZk#Kyx1Mjv9`IX2$C)1)e1e6xEnqR2?;FeHQ%4^YpENf}S`ILIKg8to}cB6>^03c+zx5{0ED#Ng@rTFI` z8l2c}+$fh}BF_aS6mV(E>sA&2F%%dn`-jIrT8scqFVm;ahh;s=rAd0SXjgdvIalP_7Ig+^$$=`@0^h`Px29F`_OTV|-ZVr^BO~msfENv=u;K zGz>8Ir!#BrevKxeWbKyQ<{h4h0AwP@v zorHL6#iFyp!5i%H#K`d1R`0(S@E=7BO%{jjJ)t))?)XLHX1TS#elwDeY#ZE%pr~ks z>D8fW7f@IR8mWECa7LB{c=c#kNZw6XZ4G^``0M5>hYbU8kKsnEU}4$ zdCOXKRCQPw!Plm15$*C6(8e?^nLRj2U^jt^=$?h%JY8Y+rEAy$gbSR%&dx_KD-3!Z zWX?7#007hmoK<6fOAIM3*{@F7q#T~i*-zM0n5>eERRfL%*8ctbcb8*~d79)aN>Cdy zK9VV^sqzx(N_P@kJ32xYoi;;cd0tyKG5P%@EA>c5L=dVfs|$B;>g5zLZteJE3qKZS z32wCe#V)IGviIYZJXhy!sB0q^{@Wc-m%|GwLN84`$Rb@oEt0>Fb+Rg~khK`HwY3euB3{tYJg zAeer#Eb0x5xVVKc1xuXrF|Sp!c&)*xf?AID*Ed<|-@5^fPS?6BRHm_BVj|!Y5>X#T zoh{tlwC{WfPm2E?&BKBw-PJTNX<*mb0`&yTz^}(Li3jhm4j_DdU?8D5I5@D7Y-Md7 z-}92wRUI#{VAQ7j!~4ZrYhq125Ne+NkT8-YOI6p$Ao;&paA@j0)5?CLf)Z;)nBeUT z7B>8OfOmuAr#SjeQeqN*4BbwdVG$^W+4{Ucg0XxNa6GV6+LVp}1;HUXiAiG1vLgi+3 zt!K+HX;ixmjTQz}O`Zp$ERG)s3`c>R`dw>o3}vH95*gif0su7&7KpMG;VttE@qBwo zaaUF&*W}Ouy}o|}%Hd+j0aFsB_BjAl(`>2+WnRXvoSYo{rBP1f)J>4i->Q8^z zz3e75NdI?I!HM4=1+j=2{|PaZStH0a>i_C^45#-*9o63?MLe{|-$zoOA_Fsu$^ zW~K>z?q2@S%{xE_l~RiwIm^TOX;mSw@ib!sXk)-ZWlX#fI z&vfVog*a+mo;)<=rG4SMH#tX?pOY3OQO80`I0U4m_A503Q#!t_#F5hUSrUq{q1$mk zA~iEAOG+pJ0D*I0;bLFKFzdU)XH@4#05E*`4EaGnT<<_rb#EkOb7MmR@M1vr09SsG zH_}Uvj|*-;VOTw@YB<`2m5AX4KFk1d4#{f0SCwTjx+4TsFicYu_RkM!mEk}N$gM7| z@Y2Det*6u8g6=b512)1u2HQa5b7lHdf;xNFLDd{^CPGU`H%Aj}>qI6S{e7wNm$y#| zm95b+-fIX5qbJutYo@m?v_}9XEC^CnJ(=I!*VizI!`K0VdZou7Y^Hj&w^uHP3y6S? zvoqZzQe+a#HUAo*(DV%rCrc^G^yB0evTp#H;j{Z#T~-CJDnu9$wcd8p+4) z1~LM8ApinjxRn{gTbnC>^A?*l^6WhsW_iq?a0-pBZkeEr`RJ!e&zw9|iCi8m;jsY0 z0r)65x|m-evq3xopH1dBZc`U%42_S!c`f1#Coy3N#9(6kRfm%QP2~MMU%Pv?bw}ss z1X%cZ15ZUDT;fGs7Y7~(WhC1T;!2A@vObbz>6&tG zj?v*6WoH~A$^675_IvJLxVMa?F`$wFK4)i502yBV`Qg6Ut36JCY1U%n`tWfRzyGx6 z=aJ!YT?M})g(f822A(}D`QE-N_5C>rCO~%EtacTR z0#lU|o73Rc`NKUU#zu`e_Crx3uN+YnZ~Hzi)_^H`AZK*N50BuK5JCAqzW*GgrLYy z3o+jV?$gs#48TCFv*z5eONd6JT^8C19&2c*`iUb@-8x46$(xISD?A#r;2Kq(#oKt7 z;2;uhBOIm3j$;-eKm2w*{SL7Egb%V7j13HKW<0k(4{2osx(`Q|no`(hlc?36!<05g z3D^M3wZLouVg}?oAgF+y@+8;#0MJxj9I~lclDUgN%rELREej~3`+!u0b!Gr9K$)F} zeez<{W*`I-yrH(!X%zFK8Q#l`hz{rRR;{LQe)!q!&xJwEl* znyM=ESFa4-c#{j;Q2{tfN=}X+SOvVy@H+LHfwpRkd?Yxhsy88!U3_YUz7)LcFDa3Y zYPJm@JO=RuDI?8*eZn1LSH)wTU%A@bS}^tOFSBsUO%p>ydH!St-~e+{z6Lb@&!4=| zHv^LUq`*qSfkd5W->b;^)lQC#+mUbCGV5#-HfFUOgf|yTFoGz~&u6*RmNZ{VfQa)W zer+5&{UC9hb(t0vR~mS?`l<5m`JEShlQFf%616lcCGdFl;#y!1>GP^3xc5_NBX|`) zg~)#nT~aw>{@u+cWH{K5`ot}!ENlsf!*8iYt!DU1E_IABE^#mpvmX!vGw0nl{k($^3 za7PYv(?vg0^>V#2Y@c6PU{cbX^?5q_)DUV@XeqB> z<9{KB15ii^-{T?oiuHN#MBy1L{S52eIf^D4=(yIdll645p|q6Ed;tg*exq8XTOBIS z{|`q_Emm!S{z9}~?`(nZm8fQ0h)Gp7V@7d*YQ>w|Oys@%w*Z6JMyp>GZJQUA^z;`E zbG#G+R!P-)({{d@LDC;meO>2f?-kPRB>lXxq-(Z~tZXH(yRHM9VRQ4njmEQpFnKvN zXAJA8%({oKfy@FKzRRn59`!R| zt<)SzDx5Eo>t6A|8^(LL_NMYDTQaq~rwe1r+6{^~JBpoBGp0L`A?^?9rLIsaWkr!E?X zMd4@9o@oaL$@r{M+LEr*E7|BtQbx{$BIk3y+qHXn)XciH2#j5@FJ*iKh zzXM4BB@}a3)#Ki;T6oA637ymdZ@6La$30*> zx9<Zp)%!MVp%WWg^b`dXQpSU z+7!;&FR1jZv84>q5E{1k^YGAdlk0g&*%imyg+*s9J7_+uZ;rMfFLpF&YM^2Hu9zBV z<{x=}AE=!Q>w(3()eHq-8wTzi1yTq!9kT<1()nah?&&QkE1xk*L{pgGP*;9uqMYF& zW_aY!kFOs+Y&3P%+a`2}mfd>3H3Hf2y#wevRkI;Hpqt}cJIDlaDl2DllM8E5JD_2Z zT*7z&9G9Er_ig3y)J#&2`fvsYKtTm1E;MbL-4fVvbo@q0kf?DN6LDBELHh`Fo9=9W zxNnU)7|~i<>HEk>dE2B*uFc8sJ5$Nb%nY{lU@(Hby=_2vV=*c|Y<shvxhi^0V^5@1oH#6gXjnj~L zhTJNB-Z2XoPq&!ZSm`;X9Zl1Mm4L5I3!Z@NWg7!=gxQS?!aAv-zXF_{#u9j$v4n*d zivK}yT9#qdD6qJfy5g`V2{4p=7Q*N-i_rq?2|X$keXkQ$IPy-`G4%)zn+9P(n zu=YAvpB3`(ouWL28UqH{_dSDdZ%3#;dBX(-k%mR#Wo2;1+^4&NCjoXG5A-@dvG}hG zcNpqD#1=q|ke<@o%E2fMa!kB^E%GLv)X&k; z=fH4(^I0qRhabV%l*Dk^Lfr?CjP!IzRaCsh;Pl}P?cMr}w9#}AHWexDg^}OCBSB#& zZuXbqiHJat9QsHuGEaJXfo=+yRz^pHmh^x8lTN^-W9RB}Cl{oJ9h%R3Vug<(=DOY^ zExD3VVYgrHxG@oBm6e>Qy1vAJ;`JxHQM_D=LLWc}x4o?`*g;BT>F2iclV#T9SOlg z%P-=feN_^$WiRHjb|WM3)b_Ea=6|$Z;Ji&>{#oWJHDF<(riOJcH)8n95={8JHbKu5 zxc8t7D z@Nsduft9l-`>VhTz~l=ATm#B$u`dkh0?KP}HDEn~mR6n-@6~N?>mf3cW9^akN{pDW zMS6O=5VF3$KFrCCfZ|H4H23^ z0Ifz1J~x|BJ~l^2(+RY}LV)8@O|3jgC2li8=KFdelw=8@0O%rP%|xR{Uz)Ev-@=m2 zJ36-Pe2jr|vEi#G&@$ODv+5bl_=HN)00!Xm1snBzdd&*pRf}L0CbuFQRO02B5JQKkw|IDlI=bQ{J?CY=bM% zWq<#E+Z;e|+c_}m7wPU6IR}uG>5~pfpYZ~l9ouE_o_(djx6GGDy8;oEB%n+55hV> z{O8y6ZhO4OZESH78;>lp}z??v`fGIE}*}FpDUY4=DCWJTsIr+8ANnminiM4C%k)nC;tOYF5E02Z6sDu zynm~7LMS2Cf_8Thqp78pTU10AC-n~os2tX4c)+>UdLco1#zQ|+kQU%Fa)TN4J*)>Z z9)Z3qu+2-cgj=}R$Cg)xe$WP(%pRb*7`itdCdbvj*7tO(z?){JcckYo9|7KAxyoPx z#CA<&TcmJ`6&MITMkC0*N97zJJ}ISe4*nwkDDPgjxS0QyA&FOjm$#vSJu+kl#y z0Jh^E&2(AV0Ga+AF_Hju<|(wK4&QrRjNHUrXp@|!s*Jo4o04Kr?hONdQLNkqZU3O- zh@dxsR9$0LeEQ%-JF5y#9wRPwoxwyctCHm{g%E9Yz(V}*1N4Bya)gO zsZmCerP==cusJ;HAUu4kFZ?*o-(T~4tr^E{;2}tEPgQxj@Fb^A*wEcA4{bY-4qLI*k)vY3+GK%GiRd!AR;i*nVuY z2XvR>S%+S5$}oV=o}eIGW1OeWPfpl_-$qD-wjXWOAPBGUl$1V(ZHBVEI5h$yeOM-x zQOC}cBv-fp_7Ab&In>B6uxcje8f#Fzcb*pEV| zHC7t@C6`TI0%unZ=Vr;W3Ej$;l4iYyhv+-^!Q)D>de5d8QKN1~ZS zGVFTT4IsTynf>JU^t1}gHo}u`BL~z5?DiC8HoPYhU8_jW?rgg)wn2$OyQ)-7*LMLN zg^w{N;l&TfhZ(}{go7KbrAhXD&@hm=w^-t8T>AY@6oClmPfl=pY_xUADrMg7T2fdz z-$`fMfz?5q6$4n7!}rl;Pm1+rZvC@JVPN1%Woe~};ehQGo%O*3MdXncEUs&4bTka$ zM1G<09e-;>4oy6(Rp+}`xvfJacLut4J6*qwl08;}ySUJLX`(rj@!X8&t6(; zOyV(D!GWu%e#wcRpC`Sk^edfV0{>&CHxhEK_G4+h6%!4nm;5v&dCW-N(~#L_Dc;2@ zAloEX)~2hdR8%izO283E=)_v z6A^~N-7XV+xoM-t0#1cBGbHb-uJlc+I}x2Fsf=)q;)R$v1qI0f!hzXx4tg;O*X5xB zGZ+vOM(p<7@XgHc-*@jZE3hyarW=pX7Mnn8p0v@AY+)M3HNcNZu)Ra)VXsoe$<~FI zlkK5UpiX}<*1*M;D57H|Tp6Bha-ELlj6I7iBapDg_m$9#DKOY!0NXo}1d>GsJ?&JJ z_Dt(1Xpw`F5io-#Boqwj2Pqj@c1?}m_xHPjb zLE8K37cpQb-GIT|pMEZp+t~N=`8oaFJ1e)>*6lj`hKIG)O>h2St;~ctqN1%iX46$^N7HRg*N=T|A`}Ng3vw%#PX#1Bk(R1R$$|NYH%;4r1d*!;$>` z%@^z%6jtetq+#-4grNg4J=fKKW^4nA^o1}(XG;sx;jT2`w%T7g&84n++L=_*KDB;P z)u3qYbQqnrkpAIA>(*4wi#%=sh5#4DTo>^H9Ot9-x$dFh?5)El5w!XBGfuxHhc6vG zXy_Ae{A_bljC(6FFJ4Lan4>3iNWsit51_M;)Pc!|!4XQ4xX)$n1eqK`pBo#QM@B|` zR@1#P3lu@H_V)H#udP&WdLJz!GKwF{_DgHK!d_J3fNuk4$8#1@#@({z4GU>C66$YknQkorAKI=wH|gC!)IE>p^H6St_o4Z{pX9x5g#8%zv>G`Da*j`hmT z9syh~n1$Xx?dbbAMt(qzfFaCqTNSB8!K!~ z%*>r>iw`kP|Hjv$6@*zJH1jWX*|l6Y0&&@C2A917irbM5-au4<3X8iOMA5K@1A)@P$U^2HS#MqkEGK zZ6(k=*cl^ONKcO{ypZ?)s@RIVb-A`)-uf&tgD|gfEg%To1epU7z=8)o&ZlQuN()Y-bR^!exRmq5Dvb+7`4SYgftfV{#XoWauikmjeVWCpUlSC0ak>UmEXsB3x6XX3JBGuX_HPnqx)ow?EVUXzX8${5$xr zYmkH#0$6qnMyj^t&j0uzwC)XfKM!wjj?96YQ2T}m;LDl`z5y+NV6QK2{;pWt-cGut zJ^JxV%At0SASC*xlmx9r-iYnjxp8x$xgcd_B8i6&!8BTa3v#9vX1OLfRnBYsPu?lR zcN+}m?mea*8+=nY<77+uKuU@)F)?-g<$l0kqfWKUf>nk^u65^b{Zjz~x7u@*8GV^1 za)a|wcSnBJ3k(-*`t+?VO?$tk4i>3RwfeO>&o1i-XUyM^3yhfR^kV;PoJ;4i2ELz+ z$pKU9*a0Y-M2ihj(D7v+_!td&h`d{%DxFPK-(VBx{ym>Yk!1XVmX=o0Nr7Ye(03Qx z9|XM_;F~8meye3Q0GuNG$y>pwBjxP~lL(XbT}y7WPw*ah;(4}yy#22_0haseqwvSeb|ZqTzJPI$>;WYY{B*LS zQ_$YrT^=B zOX5p(f>NmHz-UU>OMT#7JsNGnglU)^L$x7z^^(jI9xC5AONxuzfWHjJbU&@9*yEsg1#~U$R)kB1xUg75z-WKf0>kjy|F9-L^3do0XKHr+zNk2!NDRNuQU*S zaI>)kJGYS}((?V2L~^gH2@;)nGpfI1pSje-conYl)RoagBz5TK(<0pjZGDCuP$+>T z2_~Jf;{Bfn-WZ+NKf0l4?h=K$iYD)~0pVjwv4In2-)7;mB{fU0C z@|G?qn=0JM_>7DS4>J|@C$BwZ$U7Bo(Jw`@6615`vCriv;QgWe{rBxXdMcHl-R@?? zKD*gZwLvcqN0!nq4RXM=619^yC8dvU#2C8|WXe#K)RaBD*G{U|QJLI(#wqi;{a%K$ zDBXB8fu50-#Yw%9`5~JDAP~pLwV}@qg|&7*4x*E#?p1R2pRK$)eta+wyktO=-1-iP zfzKUU^w^0-$Jd-o(wN^X&%O>PDtHWQd9O;7LK+YDR%t4-#s5N9R;> z9&v?q-pJQh{URqJl4kmtVSgXYCsi`Ud>JF(xR?d?6#M-R>ZQim$N=>`*XIja!yo*c zR2tu{J!jCLYk1c~5UvH>dQndUibY$X+e|@X_4+m5Rg_;>6DNX=|&0ZZb@lGx};l5x*O?` zZcx#0taI;uzVq-0&pB||d#|C^Ma`BUP1m2ws;l`)QKzngChL=jGa_of*f_B@kAt93hN3t z*~P20vebEc&CKPtf{s#7Y&4s&d1DD3%2DL5MO6r3P*M`=waR0Hew0u;hbq8PO+$~*F!tp23&r@O8H;l1e z(n6>h8?pS4lg`p#_r3?U*$8^h?;cl2k1nydXJ1~rf}W4qqeSih-t&Z}v-VwYXd}l2 zja_GACgxzA!1B8e*$()i%b@274V7sE1K2MZ*#yp%a#RJ~&sf2MFhqpG6?K-LPLOVx zqt!{p22zMT5sy_4eUzRqnETBW7EEL|xE-d2!)UzjR$VJit76i}FFd~hGd8!H$|rHe zk_@gSQl1l8KABF-MG^y&L7|6_QWbJI#l;u6C$>CCHtj6a;ZSf~3rZ4F_`4ouTg_n> z+V{IXxQ7CT1lUZuCrR#*7||zoZ|9)OY(zW@N^M#Z%NsS{s&e0+D00so;MFa=;}oa2 z_@i&pj?*mY&is3~T+o4{^Yg^{lKa266i#kO$I=}R*gv)J{YIpZrLJi$*YkxHZvsAe z$dzanm5mEgQ&hbN6H)Zb*q9LCPpGly*+CAJd(WEqd3b!`EetblbH>~b3PG%ZptvLV z?!v-Cyb}LE(326DAh@3nroryr77?RB^I8T!@Q)yqCnrg4pjQWBwp9%nImIE_aPz!V zdO&j3AqeBpPUv0M@aIq_5&Z6skApEr(yzOP-kGtQN#AUGdlYq|RlBJb&k&B4isOFL z)!`j{`>U`Cv+@FL1M#>=f(-(c!^7>VA3sJm4sXg$nRD(wv|U=vO`5yMn5xp-xLvBR zIOHN0dN-e70viKGhFsq zevAf%#zJM5_CzfQ3sHy**ea_)%}(xK6k`oW7Of zqX)g?cWTT;NQwyR$U_glPf9kSo1M==<%YP5>XIC8L+%@m$m~0k&_i|D(Bc_{QuJr% zi)`wh6WUMBAj>kwn9Bzq>+PfcW+YuvD4Mq?W1RmUNj3$G`ZvBDWzbdHqQ}NSWfN%qV6g zFBAe@IQm*fW0t?IEv_{})Zjkt@QXglc3;Mkj~bckT{lWpg0F9Dr=o7B{%}!Id%|UB zbhCNfaVuJlD#Y^*UmoT|Gn`%vIUb1A4A0NAeZkP9eq>GcsgcNqa^@iB?A3FYy|PXn ztS+K`O^LTu0bhJTJBNnn{l8$GBkb~9NoK(wz;|bA=#SRb_RN*cQp}%%Fx^z@@0V3` zxb^E>BM&3bjq`P3pAbucD_cIzs+#{I-1lgdmpElP8c8?DlEtke+lMWwtAYo)6^L zz|`YiSW*w`2yzs3+W9zFty`9{#?iV29DgM__HYW> z@JmiEtw^@3$aR{nLhU!Ue=K%1wMxH*_S3oLP@yUAea+Kt`!^1$e}qnJ*b@}*>ZOw8 zTwAKojT$!k0&Nlg>!s)2 z^k&X2Ra89ZIa}Fo7Wm!w7e^?9B@PQ|m=Ug?vyjf>IrWc@Q?WC!5%D@ZIVp^iLedeI z9?X!7a*}u+_ralrm^8`;Gf@q(&QK09oT9<;&UjH4+)y>ugCxh>gd=pY86g=FaNIKF zcX}5W?1gfT*s(M-zb72pJ*V>UdeBSGB@b{s5>vWtbg*_6aLGJcC?qiSYVO)YVs4xF_v2NM5+fh<8FQDiDmo;itlBd^-@LNeJ4#=c zBsABlVP+GCx)F&iY-}(DFVa^w^N0zVb0$U@$l<2v5P^5R^Z6i`FvYK?&$I?{xy2{T z*9ZQ=8J;Glr?QeaJ5Q12DfOp?&yAO2bkFMzS1B^9`=)V15800&UIJl$gl$EkE`vKTy+>~R`ucAkP&ld95BzJZzV7?~ht2O>R~?H8Tu zAsAx@X+Z{)Kg|(57aV zOl}t7;h~ea@%(Xy=6~$n!@DlA6N}YVoXW8wT6TQo)-;-HvuAzURQ0%E?fv=6mUY+c z{GF`cJ4SSk_w;FNsr@&2zYs<75N%Kgch(-o)- z<%4=Sst=o0GW4yiF3S2sa|R@&Xxnv~^utVB`j;*CuenJSe-?9qWjuZT&p$FukA(Y^#Vz?@qi@TNK{VV1PXTI`v1FSu@e7WzZ0% zgho);u6Nv=`m0AmfO1Q>wAi@H_t-n^?FF9WSe@=pfiaH8`^5$qae6!rtcygkjqja( z!eZRkE$Ylf^z|f=w708;=q1|&kz`ZOIon#Y{ucY%l#`_#Gmke#!)g&HC}NQr8TT|@ zT~l}FAEJI#V$`nDcK!IJ2NPu&cQgi)SD7P=x$%8E_L-X-(h;)9x=EJywPcL};56k< z3}wBg^mF*>&uol*aw9VBj!0}qW+9{7G~$o^8z8^X;An9C>rVisNR-?P$@a=P;oGB7dNqS z64Ur+AGGmKoWRKrdL^VlU{SRTv6EtY&sQWX?89=CEuqrA6xMh6&}jdyI#1!noTTjK zv|zmuzS;NO>}pRoot2v^qWA7ACk{^$qgNtu9CfUl;W-9s!N>@Tg>^9g#~d#m-RU(; zI6=T|5iMC~Pq@YcZinSmb7+vV-@h^0Z>nLfQ!*K)#K?eJM6Cy201$6L+ypTfPF7k{ zZ3;Of<RxSykYD%RGmCYKdRdyFm5rut=-ify^ zR`^-)#KHo*us)eTR2-es=fR8XAt%CkUlJ_wmG|!mlPaQ0xyc9Q&4ggxxSpx9Br-YK z-tnw}bgDSY0)u@>lo6LEta!qHJ2jcjvayz6FsCHy84W|K1BHX!lkdUJ)eQ}-+?9}! z19ST{Rj|Ba_V|08DEs?IXwwC=O4p0(#C7^=P|KT5UOz{sc+}$quJ0bnKTihvimChC)dL3 z9&YIZ+b7CzHH1urn6)aD;?Q-05Mv0F3dd+MCc4&n*@z!C@2|dYZm#QldbW(&(i-?R3Y<<%}El35j!tQQX*FBu!X!C68S(s8#WHm33xz<|Xz5Gd{+|74_l)y^viVn zJG+4{Zey`+7+2D8E=baMO7o=$75O>xW z)1_oQxR#@OAB16eg_vbEnV>4eSN_qf6@2j)P}SuNxo|exC(|k!7a`1Zpl-DXvGi|r zPWAIg`-sHKT>M$drquabHC=UpT-)|2g01MZ;}bE_6P)E2)bJ!3w6a;gt{ zLn#sk)m6y$L-EHScDueNEO?u>Q1X=j3=%!_ zF$j@6o2XLs#KWSB)k`OmCl80theg$l>WAxv?nV^UN^;yIBYzlgYqWGO2zGri(ae1N zT5yowfi@~KH4U>!SzlXwUkkmEsjkLs)MjR|7fO z9AZ&&ay}u=kAH;|P$mnN@!TWkZ!6}RNN?TU-uC5^B~zUrtSOz%4=XLuF5M0R&&J2( z6|sQGTNyDf z27VpRn=0_mA7BcwlURLd3$=%2HMqaBRnj!~`_YfvLot-I3AD`H)+IqG0XTw-I$@}6 zckx3*-t4X})09juSz*UA!mt&75}f~qk%Tf2whOg3Vb(>7y}UWc9{in5gfav7OYY+f zfsLD~1pNd&!Ikbwzr;3;xDNAvtJoJ>;%TWz|GYu*um5$&hDkG1dR2lo+#=AL?YLj7 z9`NeNCR*bwb8#k+-rU^Co=9Brf{9<7@a0MQXBvagxgyb9#90MBE>vy#C*4kE-3F&>ICiKB|+v%OtLG4Z{}cyS>rl$gJ?yR-8F?7KTVg-_{zc~aBwe8}sS zsGDM-ipYYDRgZBw!~HL}OHSykR+TJGtB)K`Yil{n?JA(#3Kxaj)WyTC+A2P+-!x~I z>iBl7!HYr`)lmRVJ4zz8gP26<`L>2uQ5ITz{ro*QRniH>`cg%T+&?^mwXt zm$MfF?B5>ocBVHs>kAKetsKTbwKm$V1)qem5n<6fq8=2-srI^WQf^ANkADhc z9=Q>F2_haaT0sAVZ(t=$q_@)TLi5pXG{cO@Wu4nQZJa!58NHqSV-%fALw|RgNKU|e z!@2-8Q6Kn(kU%#m)8BmAi9DQOp@J|m810d*2@=aMSO#FH?AkbWV~AeK&YM)Q2v9(g zb=1~%2279~BO3zLiKXfiW$Z%1e$B|pNT0Oi=qGj0_Qu{<^1xKRp^ygl;vp3bT;UC_ zjmk84;b*TU{;Ey=q?~Egbr+BrQ2v<%>#76$st(FQv9-Cj;g8$qby&gZknJ$EvciFr zsNlOlf^d5qKlTB!5`xFmBOf%sxitR7Z8VG6ewQ=IG~iW%Z#E^XgqNneu!wU z@VnJ-f5h)Pkm%VjMq(KfD)iQEhK*cUB;x;mU=uevYfzCV4Ky0`pR1oAWkecB-M`7? zjJ~KF`(ZCuwx5|;Z1+yg@v_0#o!4_>KEu~)TfqyRBJ79-o_{1ZQ+O%KSNH8oPlAaJ zccZQCI&?H4t;5O7JC<2*8in1(czh?jr5l$=m2R+}w6TTjZb-*1EWZy;22{RAp9a2z zUlTs6>}L|Cdr{Guql88Mgjv7bcpF==6wX?5pJ$5?0$=AJFdeQV# z*%ke5*Ix@KZS4wb3Tj&ibG^$A^WEl?-@W)K)2jl&y-;Jnf%onJN+N zQlKJ(R1V0ThU`~mfgs_+ZU<4^)nix9VRMJ6ihE>X3TH3L>F`vg>}zm3fQ*Eg3i1t> zJWGi=@=>v7tG@Z4cM*T{^I`}TUNRO)a?^HT>@YhJM5&Q!eAfidUfT^X)6H6f{#I|i zhm0ikspeZ``=cAjnD`Q4=7)t2PC3;F{TNYtG{7s#t(5#zq<`?x5U9HIb(e%XoJmMGKwreJ3j~ zE)_=p`UP)Sj=;UjW*b$aF{zQ5r9l^62+5Q6z!_c+Jx#mzX^P6>Z|H?_iipg%Nm7CL z>vo!OyQ$h9JVPmhw%65*b*`c0w`HaTU1B?-WLfxyz4<6j+mYS1Alt+G<(c5XmRad7 zQ%IkD7I6Fef;BR?g6XcnG3)l}>3~JcuZ!HMisMSRl&Of4m{O-NZy&&f0s6GRLhofC z%bfc$3R(+I!Vx<+FOQ3#-(QEFsR(WF&TP=%GwH^MMh^!MCW^EX%nIVt0e>q%e~mNZyvBD|J}x@n-*w-ye9|^x&n3OqEcDbYrPk)K;UqFq#*{YfiiKpx zt{kvDhR}kAMIVW*K@bxOqWMv}5eWS!_QIuvebkB{8-kP1^Hh*7FNg$hUC)$!MnX(@ zrB9W?pC}iMEohFpd;f^iTyK02x44M!Y-xLcpP0p$su-XPp5r7{Q(0WUr^cgF1}KNP zhC(FX7i0TZW{;$`eErlVK2d#Saawk>-+a5+h0LRv;ve9Xz%dj1yPE0nE6?4pJn>E) zn--*FvIR1*{{cyc%6*3m@?q4io132l8;GHGKZ*75i;daA6p6pv#oTAzoHNq4*)~D!et4S4#x0kl?Xb%8!Oom z)JTvU0vyQFSpIW_{?ULELTYxS^4|LCLF1N8{J+;HU#A^s-{&g&)heQ<5}O`hx{G3n zSu(!ab@1boeHVd!D!cF{w@J=$)0kY$)xcBWty-}4jo*#K+e|WDj0ak0@}!@fR0Ns2 zlONL(fF;8St{Hqx%e+xqH3?;nB|*GbnJ!DhTi=v^?(DEO{rlZajf6|}+;{n+82bU{ zvP3`GjnV3jTeu&tu*(vInKXN2;*ze|<4?7kG^b_XIJw68D?u*+50$|I$nUxpTmEeK z^n>sVoQrCe4>pT7+(oe9Lsu&tH9f~r9^huq&d!PwFVY>OR!+I2WP=&w6ngf^>XyR@ zr1D=xFI>a9qxtj*tQ4)=r^~z5KjQnvFIn-wRM*F(u@fJH8wWN}^AGRAriAq2A#%@S z$}9O+?E#5{x{zI(u+HR0Rlu~9nF*HI8XS@~*4$TZ<(K)<70!SMFkr3b`v%1P#dg#$fLlcxCs zp0fG<{ovv8f=GV6Kbqi&-rQt{vilH1q)WFyUEGkBmHlvFDkAHK4|-C(3FgLky{uo1 z@DRT$(+F`%;`qezh}<|;a=eEbrR~mku}s~l8sGqDckk}UdpZi0o>~;~Rp0o8ih&j; zT+5x7P{5{c;-3Wd4jZmKufAdrcCS~#I1A|8I}eqHo{o-YsvGDJ4SbS_!1vsIgscT? zy$OEK6Gx4{fvI)YMZlz|`80@7br5hK9D%_R83a}wfI?^%%2N+*M*Uo>;qMG=Yu$S| zb^k-rCfeW7V{h4KmeW<C8Wij-&=p=fTI)r2J?=y$#TyN*0K%XLFpIs2Z=H!z5pI=6 zMJ;^2CsZoHJ@UuXEmA9__u9ACba%klTT8|1%-T0nRBFO?2XyQV*D97cwQ8GaUJ z$JQ7G_&;-wJ-3+m;ORv$7~l#B2Y>YC9)LtxQ<6j&aAi4Eduts2q5eKyV4MDCo9Q^8 zv{Ef{owufNF}v(lc@a2AQ5j_QI!WZwuir9y?1)!WP!Y!t$H5?0S!W%hw?*JLUHx_y8Y-&rxfNLvQA2jP1m9J zk)_+XtZ|rQzlw{Bf`|(q3G|k)I#)WcLZ%TONYQ_Pj%(abS@hmtIcmyX)MG@c(KrR` z4W)w(f*axP&_)5132_4>Zwx3`*{b>Rue(!KnH|R#=r^$AFvwjEW=h}0{TcbO;k-#{ zM`fd8_7W!on-_azVrgoTs*1BMfu3_K)o(@n@b$yKf{Ls{h`)Uew=LYj-3wI54oB>b%feX4G}i z$1}Ld^XU=?L?te;D|kEu^xl&ea8&wYlacy6Lv%K&7O0_zcpn3e>ZSHXI&Bn6{3&)C zOrbRn`|CY-dcvregkA^9W0=P|9BgboUb&gj1c0yfr_9~-nM&PF+oi>FMmdz@cTS|Y z8Y3SY+atCzs%y@g>lLH8m9GEbZ;+MQTRK9M6(Vkn0q-^g5L70yo(p16I1+cYZrbqO z+}$&#Ri=)4!^x9~1APgIMD09nJFkmk$}0(4s$Jiy#V%FJ9xffaWxj?=ybCsAr6jH! z0-ZE~ZnCj?Dk}4C088d?QNhlXT9mLc8T(z7XGSVr-XhVZ)umjr3dg5TFB=3bKC<;j z7tLe3&8&7*+ud+@A=zKcg^oncO-)U;*0+6I(u@~wf=_?fj#)(D5q|ko=EZ)2AF<)T z5Qb-(+t2&c;U7$DDk>^aCR}Cl1o7_fU4Bz$a8>zU_tB%*n43+6X|q*j#^<&#N2vyD za3(@o1dHRDFy(Fgtkt`t=g9 zS7&D*403-O%!2Nn)&iICP-@ZUxLMYgT`jbG4!!X`P{3T%PW`8qdU3fUq`bY$N$tqj zEA`BwiZ&OL{a|l(5-_+;#{QK|hj(jyB&({o`g3mCxd{#RV8?U!`Xs4Ek20ook}6w0 zMsPAZIbmV+*LHIC+egQbEZiG>^w>9Na3(!^ec4l$?7N+@GVYM!@^VHT!3$HAFpqF+ zcH<4TOPs_~5%97C%Mh`flS8)Jf@>9D82x|B5^m9H7JAx%%F+*Qs;IEgz`{b|>=EijuZu3bx}qnqe)_|*aR8nw5b){ue@I)9(sf^-yJ-V>5o2oz%R0>% ztT>T5%xh9>><@mI$xyRAAs2^!m-w&Yh*uyC=>I)RV4z=b$#d>KcexdIV69W3@|g_+ z>N8cAZF~T@S=k)UIvPN!B$C=3(M4fnA*Xu&`qK4EN#iBUovn`aN9OO|LKi<4RTw8P z$>%$%EZ5FtLA{G@>*8EFxpX*zTWPE}sNTS#%lP|wXwkVcEdf34m;0Va+r~KP+YacZ zdf$6xJ}l0yzv`?NfE@)Kc(6n@oK9$uyw+xVuuqJHY((?d6;3TWLJB6g0j&e5E=k&` z7AA>Hd5=B&PY4_hB1_p@u2otcAiaZlhNDI%O*Odd1n|?U6Lz*Z1+c+o$_`PXVi1Wn z0pSCh??@-Qpg@tkAI<`?Xsh9H1a`@Ryg#}**g08<`)9dX>6~SJ6*&9{eSANP@-vZ_ ziW6K5Ji2CvBdW@5rJdMg{2}R{Nvq!w3EgBJ0H5GYQ&lDVc7*5kz~b-bf4_&Jou+>^ zt`GJ+7e#KV=Q89b{rF@~(RxGZ(R1Qkp8-604jfM4`ek0f*OC3mOkACepq|z{X%9}; z^Hy+eAQx8tk0R#R-%5%wz1*g^{04cpMpas{v?9(&pI?7Bhez9QOem%}GB9jN5K}jH z;BBy)i;b9C`N#+{-*$$}zmAoUK}l+L<1E_LZwsh9Kr{9S$sa83Re}=u!AKORb1OP# zOoV9j!2jQdb6#%lLF&E-1~BWWFS{QfF#TKo^kH(lqnD#O6eJ-BN#WGH?J`;4eS#nu z45t-6##1*S?Wo=!98Z!jg?<7M+j2_h*5Z?7Vbz3gFyGu5sy_tVX#K!!cyoUYx=y&$ z(?ua;H;9Sdy4^wu-1t{}p=tqNi4wPg7;N?VszTs_LbAKFRuyPjcp_WFBa0UJ=fFVx zkI@RQ29G6d{TAou-@2p+XB~usHI$vaPEci<^%#Ta2Uvf|4i3Vx=y4wpw1vR)6PGvd=h3x zix#Kx^BhNISMwbw$Bmnel^3)<$rj(e7jyxn0zU@QFZ5&&o6Z~Y5mW$Z76+##?bsA$#(@+W{XTI@r_y^CRsISCn-d`ew z-31Z*WhjYXX>8_?)TnCouM%V5tt2pgf{Fqwf$CMxg9e9**u;4tf12YVt`=oQ7uMn# z9orrOE{bI=GH`rqbzaM9e-J!Sdr4Exxol70)&HGxfewR#=|0aY2y_Tsi_XVn6d>m& z>MJo!PnPrZ(%Rsdeh8FENXY7$5d?2b8t0MXu{b^A{yHoqn${VPP=&x}3t-Vs42cU# zaMLec9IlKIpF`ydrtioUzx5~i1BR;cl{fA)yO~qCCnYUTrLRbG26YsbBZk(EI^+4j z4T1@Yz8O)B3|(eUS!qpfW$E{FDV~$-C~I>`jbc%=D>?IAc5iOF)tiVb?x_X`Kz#*S9>4eZl+hX z3?&$pcD|pSm>6|53+wr`cv=ImV;x1P)s4Z(Fid_o@Pnd3Gu7>|d>9iKZ_Nz>CBh-A z>A;Ue?nTCZwAAhu&uz1{^j2|0=VJ&qjETs1BV9RVav&%#oq?kux~5H zN|t%V4O~yfH43!~>)=cs0B)irCy6x8tw>zzo-T_!UV15?uS27+Jn=#GA84>Ie}2lC zJB|ad0aVs;3nO*&z1FT*I4l$=B1@hz;!E~r9KW=-BxOdy_W~aLU|6UDrM(oVu7?Dl z`~ucfX7f|NH8GCte4gB1j^nQXPW(9r<6(PN^8c6%ttwq0pq?URFB>kv&yNl%!v3Hh z?UCaw=}_&@aIn44VLB3mpW>f*Pq5<8*+J0k6=!=va`aFye9&zI_V26V4@%PxFN*;m z7U)Enu)dbl9y>#0{Y5{J!xS;HVI}q>BucYH3&6t`n9&YyR;1HoPeACAd1-D~2!s$h zudZ>tC#|-gw-|m71^c~`xj5^Tcaf#UY3!vC^G zPfMrv)YE>cEQPv|9?QQeNou-;1KZ$xpw%~e=TpBp!|+DwiD~XOsNA2^e zy~DF_o7cC=v>QFk`LVR#N#|UzIESbY`0d((KMCyS!|{@LFV)=mxhB(?74aNhA2b@? z#XG1u(IVY@nx7u_Sx?h3+RIlbz($Fx)GG0XuL=pA6#-CN<-!KXOWJ>$nwr2}ilJAB zC35e@YN{r!@)w`Ma6%GQdXI@0uQ;Ep`2N0Fi7>LWBmDI5&wl|%%qK$f0t)H3LA7eAG5u$My>h7ev5eayo{C#hDhnwvzs~CKK}fb{-Wbp`|3V`Fu`#o!=&N{M^|}1a>rHjmyzbm zBJMn#K?4ek@CQm-wy_|jM?%s9$Cq1SvIx%vp$opUaV?$?9H3%(l3*}O(nKT9dxat> zI4$@=X8>Ww0gD1DZlH+*C&#vcT-E!B52yV?I+Nop+-UBEbi{M&lUo5}x-&c@xsjSl zOpS4}oAh}`%a5(i%}7c`z~#X(1vvQQxZqLj;de=X_bmKKk-;9N{gvmiHJFBR^z$2I zr{rMVGwSusNg#?KU@+=mLjg9IMjT&HXf&VMZn~(&6Klc{^ zxr9jdGOVru7ia;-2C_LXgP6)jwPzhZ?HL#P27TT}wq(JnwYqn$4cO%X%$r!QqJ#)H ztnPyn|3ndj9$+N^FpXf~T?RpDk_}s+nU(l?^zyI1K*jJEl=+-MRCNwQ57(5N6d#Dn zKw*F#Z`v=;P!krP_x7h%tcrDPTURMHad;-SIP)K^jA}a2yym}n$u6^#-~>m4hD{0A z$Yrl~1Htw;Tr7uNOjI}dLC?zE-zY-VYhwL=!+w~>Ha_85upZ5h)&{T^foTlcQpjwR zSCtnm^u#b-LxH}-$g$6}FzZ0+O;&L_{&y<-AFm#Jor)9iZZ^o;mDNph(@6pW?e2d*oMDgiw!_B;lxw<}caS$j`W-;HLl#VUg$;j>dN1~zn?s;s zge+rVssd-r7?zHSP`dYN=e$*^l5Nd8vm;#;7;}^K)f%>}%BP9D?N``8oD(~c0Vn~hcioZ(&Bp)CP3d$73&S7K+qJkFd)B;+GD&Rc5 z;%`e`w4Vyj;N!?2c?v$5?5cJro#tbLO8;Z4Rv+F(7b^x`*)Z0(3muq2V7Nl zL6hlQ0!0neeVjA5U$45m>cS@hVMp)>H8YwB6;{ub>0<8?Rmpnr1<%)q zyqz>u5p+O$go7)PmIWa%kmhkDkI<)~_6F=x3xMJr`ZBw^{Miew>G~fUbu8c}Wi3~6 zQ4H^HccaC;9cf-UgcF-=2dCv1FA5%kA!DZmii!z(G9zQ!O*>P!%GmyE+x?@7@{H6< z_?Pe`AaQBr;0|l!(($K%qDc6X@1GNg+N0OyLc@5kmixjR&m1sQUDhg6pkyvf=bHda za{!#KV7&v#yOy!B6ImbuVV0O|$V{O5*m9zVM^SzGOEk1bsBEols(HOAvBC{=kmOkjQ&f{`AuFinjnaYe37@lmT ziv73tV&uRsWyIXTSv3AT>b9q2ZTHdZhBL@j=y5Pa_a)ne9=o0VAmiO+&0HMh4@nc8 zWH!&@Er&@n2vQvieU>-?X(I7W#2=`=VDCK}a(?v`iV*|33~Z*IA*#ib6JD?nBAfZ? zG_YY1-YW2(#jm!WWWoOiIq)Nx+G)ZMzTO;FSy_+t1Y@kN)KC|7MU+aVKjMm14v*)A zCef{@k4)?N+k(dzr)KY}_nMM;^0XOMh;N)2@#n@Ib}nFr-?}LmF@eCXKz~CdV5ERs z1mU2Zu1nV9M*VrVIeIF?dk#a6Mja^$brsbS(Yipts2P|M1D#kK8=-Y?Ti9ud3 zD)j{lY(#o)TMTgjbuX@R_4KRa3|KCDjT++PQp70zp?u>|Mcg7;vi9%=%aE@@KBt&b zybX8uSWY^)A8NFzLh2MI>H$~+75$?I*S)N}5nV`w?mwhcb<-&uINz%s!e}cGtMFft zU4}`0JI6Xp(i_@Xt64VgH?06R2hga99Q$FF2<1YOvW$u#k2#4#f{~4l*i-zIw1tD( zrd4D(?YJzH4`yXQEotM;(HK+ut`B_XSYKXWT)8j(-Stn9o&G4>><>&<@N0tGDMPK` zKRw>{IQMC=u<*j0xHi+t!Q;1no&t3(ZV~9kDIdh9B~kmt^18{N&DJ$Egu?G3O?Bih z0qx{JKxu=^vpTL}uTpF>Azn9Bof}Y!z)TJ+D^6jFWtjsFY!K60-6AjHEegLxBxz|bPgOjq(lG^XwaX&rqV5$ ze)jfj6Az9S3a0RV70rs{WIu|%s=atNo78V=LctItzqOsxWBk7pK!BOp)N5uv_Q?y^ z1|_g{;%9K!!zYQu=AcJJOz7p*&ih&Ml&JknR#600En$(L?e(1;^ z7~c18F+(MLx|L9ZjK!Vw3tkt=fQ|FBz1|m)$A`!XFz;TuxM+M`6@*qE>Z+ zB?EFaIj*>9vn_7jdq8UkE(s2v*maeg{x7W9rS`phSbgNh^yq#El}o_1jD#;#{G1qM z+E;Kjs$Luohb%rc3Y-F_8Nx{JBtcHINW&XZpeeY*^3O(gWo{+?UN$yy-EznM9Va(? zGdp%u-oh8VFrok!ybfLnIs!d5O4VhrtxZ?ubMTU2#p}O`eIvT(yz_v3h<7_8^PV^~ zejlHFUwWs9%OJJdMDIfn_t#DefEt6)50VI|4z`EOGO+N=a}h?EPhRcweXW|Z;>!r- zau=QrH9rX-r>x!~)k;;~XgBSw1dw@<^mX_`Iw?Cl$c^d?{xMrXFu{Y40aT+-*qlfh zqW4Y?G_X~a*-E_c@SoiMNRcJ&|0Cn@YjyglvV>r1jUQf)A#3U4hapsXe4E~KShj7K zzab?O$WKeKOw>4T$J3Juc_(_LPi8S;|0@Y&7n_^suZcI=4e7uRXuo@KP??vl-o?mW zC{4HfZMRKxeFj!FPml)O5vvC@%n>3b@c!9)^ksFOS5$VVdiLL*Ow01pI_~i{f%l4p ziV9|GwO(8Z!n(`_Fna{*h>LmRdUpHQpG%K-?EzKTJnY!Kk?hGZL8=Nb6pZq4 z!9YPW&yd3_ghCp^l8<2^3-kh!QgTF*1B!kMkO0_@SjqIuACmcgAO6AImnF(J#&O71K4%iXDj<=bOjZ)b#mYK zqNYStcNx!mH(_{}CC?M9<`y&(vo{)pwt&RDU(aO26b#QfP`BZQ0hqvi0HT`=F>lY& zcV%rWjN`9f|8jk^@tf?|9Ql2+%}6jJMOBmuA!ssKV_^F>rj{6?OUc4V-T8U~la)|1 z1*)oTMma{sZ+;AYWo`u%yr4z&t{$onxZ_~^gWVfxJ3-GMqQY#Q32r(Tl&4Jw!YhL} zC?vxenhS0luj*CFMwfG|a4+P=510?e6H+nuLaGBH;6s%TEpN!`B9>Rbh<7S=|JeVn zA1`z9d1l1~yL(@qId}(jZC*FysyuO#zh_%$L!JT!0rbdw?ux=z2jkPC8vDUTjTjDe zN(Q_)iyegB#%ieBztBkulZY~jJy%nQ%6tsLsY6T{3y=Tlr=+B|KYtpYMuyUmUfZj7 zF0iE$)Agwuo2jw>CbUJ|FZpe7S}El>@9=>Wtdww<127V5N05_SVJMpoLKy4o3tY+} zFd{%}cYL$z?U+w;sl)b)$+4w*SNY49x6Q3*)k==2u9p6hKu(6<2PB0cE<^lI>-g)# z0`X`np%PuDp`AoiM#VF%-)|hK8GY*sNUk*E8H{`;Q{?D};5x2AGzXI}PKgox;&3+1 z(Uc%(Vq;^Ir_aqV`OQ^I?n(Wy7q5Hl6Y4E28FF0|+8+8Uko*9PYVt{<(VMZvU7~ij zKmC{OQ*kUYn}k8?l0KIYJ)0WUt^vMGMD&G@ScpTm)Z1BprbIeV`!rtEwCn1F)T zZQ)2+Vz0tw;gj5q5qSp68B04Vv)$MZ<2dLi0!v(!>M>A4z-ftKBY?s*JBywE?4AQ6 z5hC3J1ApI8m~9w+Ve|-}vCu_~HPk@r(eXDWD(LMXspQks48X!)dNlb71E~`un3cGL_dfK&wxo&uaZSfBwt0xIxg zf&LW>bdHh9J*s@)$7F0tpLl;gQP_|GOIM(y+W6g0B&7OEAOUehVrQ zCR1*X%8WL;mPoaElFa0w<&5{BwS+dWa8RXt0J~q#x9I&y`;Lucu%89Dx5=)7p8=8$ z@Zhu6i47;Dc$Blr=KcBBkUVRDPPz0mrd^-b^v_hV48o+kq49Bs zr4bO7O+p|-idYNb@oQPLaJZ+ftLrzpEu*hb4a0#By5T?$OQjSk44Z^9AoD3RGIb1A zeYl1I$xG}@MfS_!6p~r@BhuqzLn`Tojl(@*?Hs^T(-Z|VX|d#))R5wL(18aRdaZL z9Dbg}Ze5h(^BptQQTxu<3f>eP3gQ(3@)OD9=zp>CrLjZx^T+kV zVTpgj9R%=WD0iV_iLg%oM28@}5LLN!$;U&!CK|6b!`T1M2?vHHK?2>Sua6~Wb{y+w zx+A_2`(4)5^~K3(&b_dXY|0P{2U>p?zbh5lo$6sUyxgUEoD)0EJD7#luZ@j78RrG# z!^~qz&rePQ6MEic3R-_;%}t5vjP$Sp>mlGXN49vuwxUHuw8Ni!yC&)1+Qrz)InA=z z+7{tz6p4?`>3T5ML!3erO}03mo%knoRnJg~l?|V!wR7vR;5+>O7Do7rVvg74mE~h5RIw^&Q84r@TUIqkN81y^Ky-Wl<)9v{fDB*c@m2d*fZq%EW6=uYdg!-r~D# z9aI^Ri{m1a9v&|slP2$*sKUm#;_Rs17={zxxSF7ca&CHkC^~Ru(a@y?gYA(snY!uw zo|kWUOHcTY-T%xN;m|jli=4}_tNgQ)h%v>dTTnzNHe;KvI;S3rx}7ESg`j)Og6umx z_9(@Lsl@2Nb-~Hbqz#GB3p?}3Yz>U>qn;&?h<$B_tWV!TC*`+uRi5%@_)~oB6zQ1- z?|Wj629z_X+hT9{o)q35c3O+H=5F7w$W(n`wD3UeV|55B2dVwRLg%|t?1ib$CvMv(M1~XDBY4z1^Y# zQ|Wwmp^*cnXtc{1FZ^d}7=r8V%eIL8C|S61AM!nkHiif;46j`@Ah+2>!w|uI%)Q>y z%7}blf9-$2hpDzssNs3r$iIhq!6LX!QSdW&Z99gAEdigHoCDCCTDTN^K zNM*?y3w_1-Ev2B%qQUqZz~}z~q}woX literal 0 HcmV?d00001 diff --git a/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 4.png b/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 4.png new file mode 100644 index 0000000000000000000000000000000000000000..83a0eb86286207c01d921bad9e551a3260aae680 GIT binary patch literal 32611 zcmdSAc|4Ti`!;+d`_^X7mXdX>*>{BuDMR+P8vCAXSq355!Wfl3Cdrn4-x6ZT7LsiW zA;!LDd#?F@pZE89-@o4H-^XVf#mqg|eO>2yoX2?_XEFEnwdrZtX&?xq*ST{W0YM~I z;P3NP6yRA=VZQ}@llRdy^)d9Y_wlpyMnZS(d^}w}d|aI#^7taXpE!BAONmO0N{a9} z`uKQ0Q4kYz`=1wxdU!jC3CVD4f{Rdl-Z6UuL9~~Me@LLW8LSW#GN^M~-N-+K(CmlK zAAiby@a)0pgVEF?h3Fosw(Hj~hN0BX=aanro%{}#WUhmGZ00@q&$MRWRNveM_RLLZ zFA4UHkUCQ8&CG{5cbk+~la%8XL_*4{c1a?)v7q3|!j#poN!5=bWgfxX5BvslI&S_s z`L;YV{lMmQt#N&3mj0d@hMWw1MG(SV3dGvL7hXTypn(E>O;Y9g(9_Y;)m+e8W@lw( zb-flhEG;fB{zHou>tbtb`zc-=zRwVi_)CucK3mrr91AausI6N!!+uX8w1yfB?phF9 z_iGYYS~}-K?h8KMG~SzL_e}xvhuftW3r<75(e3f-#Dz^^SV;&KV^tP zPQ%vh5Q4kkOE`1|t8E04)%;-(k4(>ePLy{~Ti>Jqjn}L^X<9EqA^uXPA`Ib$$%wz( z@WR_^1Qmg`BvlOJ@3YC-Y$LdG85%Bx)q8l>_}rj@6kbS$av(W&fT*EI&?z5kAziKu zX6Beqf<-%|Fdphc8c+hs3(^C3C{6Z|BRvXM7s_jIW=z#qKyn<1_`3{K7{CQ zlyaj3ih|H2#}y+rgczYO+)y@j|J;VH4WE2_6RxL(I_eO;jR@sk&}kEHvZn zp-7DvEMFfWJt*0mJt8r6q>pleT4qj-)E|>*@~3- z6-d8I^~{3cS3j(>FJ-lM#0VX09i4t7^}o+?6b#L{hVDm0A>3`PGdUt>9H`j~XWY&< zy8dKGr$Q*3rw7O4kV`@Lviq3?#+`qLWbi)xqyh1_5smN)u`?SBfWV$0i;x$-d5=WMcB%Liq|R<5+n1^J2?Z<5uIMg z|L_6~CmfC;J>|oGKl-xY;TXVoG>GtF#~@F{%~OW>CkJ+BNe-Sus$bmDi2dA!q2uL1 zmUrlX5GrMM=Xg#w!n#x}F{;^K6@eFzhwV=jsQ(@68ZHt3% z`z|^*ceFli%qm9w-4yLsCc(o_ADN9YRiW3cR;C`EhsB>SE;904nb%bIZC$TpD`*joNCWv?-L;op zDYCM=Cw0Jq`iL}ZwW~gun5Eok>z{dzGz)bG3oVLdAvtD4F}}R}7CMbYn&AjXHjFlq zXWHxAA*>}a?#TGyWOyNsdCFHYvV*3&78|G54rGCrQIjNK>Z7D}~?FvhR8w6*|q+>pC@7-W1N zay0H8He>Ab&fScMjONUL>@~7Nxlw1|Yr-oW4OnSlF78I|E@qfW>C)+n;+IG>RR^=N zBa)rm1)4LvS`?kR5w(HWfojz+Mdv(%SJN zEJJT^?_f)wuokR`a<zN>JXUw(THjd(Wr)+l7|ed%}nd$8RAX+I$TatmR* zCePN@aTf!z%{`>3O_|vmW__{)F07H7;8WI2gINcXI05Ww7^1fTO(Truy3jp4ThlgQ z-eMy-nuiWN4{LFQ9m_&z#hlGi2%(SYe|kras^p#;g1gFYE`QhC&0_MJ*9Qc!TnKnT zDk205E^Id2U970^yh5ouZ&*jMm#F78FQD2knMtZPsI@?=4#{wPC?QK)#by^h+ zzGV${Z+@@yIU~ktJhtwsCj$Y!XtL(5Iv&Jurbze^eA#i%fGq>8Apdi1wP*xi6`H_SH3f$bgv^=0G$DlQySzfEOL96%&5cJr zwqt7sRq635j`f%{D5&mam%Apk0FCgdIy+^n;vHbC8XgaED7o@UWaSBT3Pa7hG&VUv zK_*9y*HHsGSUH3vIjW_6?APGS$NVIHHDyPk;GF8lq+OYR4*Xru)pU70SRvBtnG$^% z0G&16tvyB`fPI@H!`pAaJF5zYg;oZ~2|`lPH@m0aH%?WdZh$q%{TccF5}}OdmwcH^ z@UxZYmtzS*KN)=|&P2?QPd)OO;^-cYYJd_C;3K<+uy@UqA%}1)mP=vsv^AWN?mzqF zepG`*-KAXgzY9<(4kX{u*(3wJ(1YZ_1Hj5(eVf?!k`B`b3O^1sf~2ar<*a;Uyn-2x z00}~0yByg{fwwMZ7GYgQ07~JhtuDBm>FD)^)-?KKJ-w%_d3}vpl|bn|3ygGap2_2l zQoAK0SBY?YUjF|}OvV9|vAqI7Ff1&rUFNfZ)qmgkf6i?FFSvj#754i*VeIK_|BNqL zcLf>|EjwpQL9zZo0Ba5EVti&xLOedV=Qcb@ zj;}!m5I6LRu?=9pbu?j}%1-=;M9^?W@p;q{i4ug*Caf+@DfO{V$gRm?i_kBjgES}v z{CZIY_t6X^O^rW-Rw*}bftY##4<-^tc%c%C+bwj@;!w9E(P%j?|cDIfoY1 zGF2Yf7{aX#;e4rxvuL?*9q>Z%xqxP7ApW+>35G=MtBBycpcn0j$wcBe*n6 zDx$t7<22-GyR6nPw8$;#tPEbrpNbd77&bs$WO&Mr?yXz6 zikDCe6@Gb7JDYUF43dCaVYLt7oRk|l2(Xp&Ch!`Jl4`#9z~+Z3@&G zr_NL2cAf1Qb#j z9G3_k1XRVnxBwYKAp^c2??L`lZOsHiOXbPo899Cc`a%*%b}#_hnvGo|tgly!2tPI( zTdzIicDI>SfkIu#Y7YRvOdMm?gATf(w~+s_>Cu6hFjj>eUqBM{KirAyXo$V_jHATs z33;2JEB?5aeC;-Nl99*B%y{}&WBuMfv{8XH%dQnE0zc`jnJyqv0t7}EgDim#R7p%% zGWKoEF%QZ`R$!0^NgX33f(ry?*aU=5Eo|+K0e`F3*M*|-a<&Pa3DP4oJ+~nS9iK;u znPYZ4Yv@AQ+G*G2W-txC%)(4?^c&IgvYpE7IdCFhlQzxE{a>EQ?3vxQYr4 z(u!*^$-VD{hgB2FopUwrsFk%{)$tQ&A{%=F|~}d&&UY zk3HHWU25!HIV^)jA-RWU#^+#Z&f^<~6zS15)&E=^>wk?vAtM#VuMeofj?)s>FZRk$ z+B3r<$)Wpj@}cDZH}n~y;3iw0%*F&zt@QY`@gCDpiAfjD(Ub$qA}zY4A4uh@!seio zXUCH@N9)xhE9s8$t;)ZZ;HRODb09k~$b~_g{1XT4caTQPZsRMAkfnC4umxi_`GyYm z`y)cm_`vTmCM2sGmzsg#)5~pVzOmgB{vLSMcT6S+H_RpXGU0-?h~8NS>v(Wxlz3f7 z!A^gRsGitUQ50bC4K{)cw{TDX8K*axUxF77R0SZRuQ^z{bix@6#9_I3-h)JLNTrs$ z#ES&_BvM3;H-U~RvELn!vbfxA<`73pTL)+KZK{UwOh+3FBf;G_dw!umRR;E}l*d}5 znOmELJ-Y?1z&lrFbCCw8yyi6P7LNVJp-%J(I%}cKZTH?W<-8`le>*nT!q0~C_%e$7 z+~O^z`IW(!qj*vD1-#|cb*F6;kv=)$MI1! znziXlRbh;X`Q`R63^oa%oVvv+4i8V$31C$U(KJ=^l{U9sD6*#}iX_Fw>!$6{YbA4r zj^r@=A#abCSxNT@@D2s>o$&t_V!#FdKhlDWF9ib5w$^T|3+_DOa`vV}OpO`+4 z80(9}$MFg0N-pFD(aT&CO>?o*9WQoy?5Q2zJLcIykscGT_q%IYy6&1Cx}@E>9sG!b zLA)xw-Prb8N_{R1kV)udsH3dK#g|Ag-I4l1<9Z$0N8~-%^?3Sig>8vz5EaLig14R< zrlaEkKZqy^7fcWr_e2@gP$-fEsb%dBt9d(kEveIgw+LM4|G65g?s!`c?f?5yy<@gT zwumm`r5wp4N>IzqK#?IzGGL?CSy%)A_l44<+{3@{_4%bYwoI0e+xppF^G2fAL?f-J z5r^`@>s7EpbMtdcar4`a_LTu0HE_Y3>HQ3vE8g{kt}^8n6%}u4YZqSa+ZJ0#^ncbu zT!3XGBWbn)kSy07lc1u38B_p6xs#t@`UV_JFT@4YdLY1z2-0OckM{?0=5@Z>~d z`>4KOrPw7I(}jI+JkBkHqApmG-(}QEQ8Ai&%JER%ELVTL?@|4N788d~-URS`N@%`ANMM@RO-3tUdE3XC8~Cs7dtb4a%w%UFWmb z$q?Vz{?b?Z;kAvsz6*G!VjB8U?zN3`iif8eA4HG*rd3jAQra0Phd+o|il)^-3x0cP z0{B#*=ga3}@?|R|qqxUCqhw@%&%rj7L4Ogm zn8HToYxgSR%d*l|U`FZWsgmy%cTB~E+kJR$K|#UX`Yu|a`~nsEnsc<`2zr~y_EFgFG z_TC{1ZIMWFVPRq7=Nk$Ag^UKfXS0!NSuFybsy%9Cy4c z0y7;c8a0yO(;*=R1QN&G6#_w1;IhW`i|RHu+~7_Xo)53=@?NI73LcsIc?AWOCnqNx z{z~RAhL>*&rM_f7)yPF53ZGkwMuX?T<^<>Jm-dx0U`a+oOeHS_cec)|Z)EHWu!M}c zy{oX$(x04^5?-B>YfWkO%ZqJEhgk)hfx`gc`Y&@xJZCcpts%~O8?1omyxL86O*xLJ zNrILV*SO@=$VeWOYqk04(U@~JL!_lp>ZebiP*^PAaGey~WEEy$3&V=hbBOyDm z+hFOxJXs5kXqJ80pY5+{f>qaBYGDy!NHc*{I5>r`i`(GcU0Yj{A4i3@4x5;5%`a4& zmlV0I`;_TpN>dh`OkU}~oOTMY5O&JCXTBV-HHQ%%7Qy{Yy1+^kSv>hB<$dQJ4>}Zv z-ih_goEIUU?;D)UB&_)Lyks< z7pu+bRk9g7Iy$~Cc$?3x>?(k)0~rWX<9uxA&MAkm(O+=(AnQCEr+;3oNqi)vqyJ~R zOQpmMS|Sh#8!7?TPF5Z)zJ0^Hfz5$?7rOs`^jRPWSVCZaqs)T`1#x5~!tv%yQ<12a z7L}dd-Q3z*mWYT5)+<*!1_#gcCQ-fZzwu{r(d*Y|a?3hTlGxZ-a9z!N_ZUF}YUG}0 zkEf`Z5P@SaV$#y8H@x@$oD8uOmY0XZHE=)YHrrNqKQC@CANkf>f(WOghyb7!h{!=A z1TMeAE!dhdgQ%p5BsDfR2K)SUBYd%1*t9^&PURNZL|7)sXpP()a)i)5C(R$THnQe+ zvK9?(-x?eDSjj^T+Zh-bc)1c?;Kf6Zww|8nLHu_nUDL*I`hTjcqpF>Zzb`-pgu}yt zyIz~Yi`xO;ou!M5i`(Wl9ea5|AXt~V=zd?{?*RA$0^njy8^JdZQJ7;XdJP1_kfS3T z#)}4fxyf4W?`~jpJ5=>JA5rKimRO-Job?Zd=72p^48Fq#HWieV+FFxD7YO9q>+J0A zt*wgpJHDif50`sM{(iXxFX~UI_SfZ4o>|hkcPuDNtMI_3=%9^K1qF`&v1H7+O(>_7loTfhHBE!*_IXR?C*{r(Nn1Q@lYh~%dMK8ile6>S zKrSjede?z?1MLx%7ic0qH-Br2z+k!e1=<$8i5Zp(cPhGP&pt?2*=oWL&D$fr3@azT zOE4TZeHi>W`td!TV7z2%(U?j-BW{n)_a!_IV`Op_wC!-*T(4=*hUz#E!poeJB=!3LxE>caM0yf*OaQ^EVwav{fZT3TB8{UZrk*=XG~(O>Hy9H?CAQ{5iOyhKm)z# zdVzrGh5La6%!=Rl}?*B4ub$`?u+UmXH9P0zW)d$5l|DUA~obfPn&95 zJhWZwhpQhsFWCFmF!ez#L{e>vHoO&l6B05N6RzlzMY< zW&g;|jQM3JFCb&aOKn1T)pa9+E<+$9KzRl6We1Rx7e~Cv$MFG&PDv*(r zdl7uJmpkOh^G5HXuP-wIwbvOLU$DAq_eIl&W@l%UdejE`W(GPEva+~9=LVJ!duD{u zSC;d4?O7g?#7fvPMkNhH3WhLUZ1SGWwIr%{LjnK|0h}QUFEcYU-!C+J)o)+zrT#Ng zOTXT-tFMm^kS57DbRZsfeoV#qH6@joFTlsSZj-sMjyiMC>l;(ShCw|A0W|ewKv^#6 zK%6uT;tJj|03WWWr-#2XdqDA@*dG{hx%#dGa0Y<*fZl<+r|=bN#=Lc?^ykkX{PyAb znA_(WqDiWrSAco~;%jXb01pR)-+kIDk*|J3oix!Mpj?6=1_c;E*3a-i5+E@X&XEn) zr$Ro^_3VL1fr0Epj+a5vVdduTw(^^fJUQAY{%~Yu#MIPu5KYA-RI)|OBFe*+m{(TD z*z*{{IOe|dT=}(q_GJ%#T`$>icv5RbI}h0Ni!tZ4w6(o z1K_fZyYD;xuL&cx)9ql)e@r#4jgq5#Mb3gPHDN6hV41B*!mi`5qwJgh6@-fppi zy#pL#+b~WAfP-5*pwfVhiU1OlslTFTS!gue#MI7#2bhaX6T8M7Yv<$hpS|Yf)Ral@ zT)?GOC*L?5b-XDqEgcE!-^hfc@(+Hc#z8p)df$a0#f^8ev`!^%i@VzaI)8iC_pc@0 z0Ua`Q?bX-?(x4W$&y#oh+%VFtb{%qD$i8#E zN+}J_M|f{oxE16*_4Th|0xk5jH#G{=WQ;|Bdr}~cJDYZ|&3X&$TJ} ze1u@;&)=K=3cM$Psw^!ptMewQ0b1QIoYXn=9>sa<%zgi#9#O zyk~}he8I8eQc@9sa~XF1-mGhh23O|Pv_HiTEUw@KgH^H<^rQ}(KqjZ*ehp)FSpfkg zme$oD5*%`#5C~Y1*x#GPDkWH7Jo&3*7aBTE>acZk$HZIKu7K1jKLBzpbN2UV1@)z) zCHTnm_`qv*wv{{S&bc%(n=YJKYn-r2DY*92d9Np=Y2=jN$HX>BLQNPwt8<7pF3evC zSr4d>v~<@K@wPsf++1GaxgZsr=!!H0#iP!|I|dC6$Y^j62^IoaALFR} zudGd&NVds%&T1b5<|9YW@b6SlcBW$yc;F*I#weqPbyp28yVSze@Hx&rA=&ScnPmPA<6-a}1VAh8DK8H@_VK-U%Mx zSDxCYD$k|NS&qVBPSC2k;*a5;Qh+wLhIF1y}%F6rQjq2L=EE@EK>Lv+YfE*-nDZhNS-poSD z$aNbqM0>!E;@f$RciH%Y0aR75*c2M=Yy#|j z+4&A5jIa&TS7LB_u9X)yGh-bbtmg14R(@G>B!rA>byeQ(J( z-@iv9s}UlFluLKl&yD5H)VM8B9cjvGUPq!+TIV+9xcu=XK?fb&U1W?(C=fguihc|O z%KIGl4h|q`Eo6sl?vsT<;q5$1T#VlJ88tK{OD~iiJl+p@)|^vdlCHc4_$i27?FqgT z2ZnQOuXxWIs;crM&VhVWU>RTVb`q9N?|1I}3z#(EX{I9Y$=m=Gd1$A_DrHR)#tLOz z^)xg?0sdtu1zaTJ%t~2n!brhAmwRnJ2!fV`9QB}n-9maip~i>p`$Ky*z+5&q#EGls zq!LZT`7flJ5I51z*_j4(nTtK7OH?(zJFsU1WcE}DsPHQp%>TGS*wjT_?1Cgij zD9l*QA>hWFk-!{2fG-lDxdMbAv~9}I_5dAsY+u|3^}pnL(dfrNiCCAfRqQlIN?h6{ z#v?aL$ilW}zIMU@Z$3R*?PI7J-^3s~5QEEE{f3Jv$!AvF_gPOG(?i?ykazFP5S#-t`=y&+1*MD3#1(($l0+OF2L|J}PHb z;!5-#k)yg}CcY45#vtOCUkxa^PbwpMtEDKBL<7VKkmDFzB+n7{_b&l#vZ1BLRK&R` z`~j+|4Ddhmk|>~H#Kpzo!O&b6zv`h*)9Jqz|G9YCPgPYuw*nLCHh#zlD2BRJu#8bM z@uCu@#zC_{gnHu2iNpZNEos())Wn1;wQhd7u&}^jjcym}sn^OIVi1eP@H4THeL^#t zd8(JaI66EDA<%Etx@O%{h-XR`F^6*EF`jb8%BoJw@E;42UAo|G;a2Zomw(iIA1X0- z735VOsBTNghmn!wMnw+*j@@18X`3~Eq)q0eR33bO`*TJG3#)CcR-XUPkoM+KV#sAi zr46WeLGpBU;FNAa1A=QgFX;3VJqi8l}-0q$g9 z`O}-nqkmCX)1qvA2qZta7p5`W3Ui|wbWhJg(BprUKavaBx(-g;I(YX%(8-ETcdD>Sl?%Zrzs>7* z1d2JQdv53^Drg<73G~u$ee)K-#ALo4S?e*tuyWXcik+DTx4C!mQq1!NF2B{>fp3bRuZ4aoH;f$5J5=A|E>c8D!uNW7Ari~>y7+hjg z#KJ%Kz2~HBlgNWmSx!zmo-z(J7B|@muWz#ETL20Mc3qS`QJXfRdX?jP%x#TRQ#>gvjYI|4)ke0-32CfBl+T88grvAGSpi0(>@ z<~@N=RVcq}C2FoFJn;iK(8L28*Y|;)eTgcCu`KQ(PyaV=0FGl76ApRvmP+ zL`(S976Rn#vcj^^u&2@cn?jHEB4${bvW!fP@O@7TSnhN>CXRe{N#!V|lX;@oqsBph zBV3CS_S86JtFCDc9MyjFdhkYIbDhufML>2e=Eh6KMoZJI?UmG=fN3AfcO*Z<8 zaRtm@;7|^{4A^MvQDbYYSm+_n)Y9_%btS3GZ~Nb`c>p(}s#-WzTOo7JF<5&+$2-fN zwFIKc=p&gA9&~$jK7pBx{4HsZ@3V4KJz-11=T%Yia-?CX7MUET_RRG3kuWtV@SWpL z_Mgd^309EYZ^hm-{oXzVLe-80=mus&j@&NOkO$3Q<8I#%S>J!x+R7ce#UJ|4WjC|f z;*e`0fDqv%Yo|!+6eOGffk@Jq2Cs4``JV@plSk7gg>~REpJhuB+oXL4p@w^v6Cyxz z+%uji)U!^Dn;86vjn?kOZI3!j=Z`qWB_#X?;RRIIPF=C)eA>j-tR-MNJ+hl5b=nIP zDlrHdpPfx4vI$0&10FQu;CN9ru`Fb_h}eOMJs%1(e=8E|3O?CJbox3_@&KI%4_omU zw&EX(27xpzx{sACNUmnQ(|Eu4GZljnWd31pvra`OGA&0_RQH-waE=kCT&f$h~vAcfIHhPwyp2h92QUE_cHxh5f>Y+i!krsGsC1&i-Z z#8mYa71(u9-$KBKJ5mH)^LqhVDjmys^QL{Y$mFTnDS@ZR;%xz~+&E#ag91USGPSc|G9n``q%rwD9ZKnnCZPI70@r zgE+^YALp)&Io|XUf3D%Us5uc0^mivd(A2s+La+$Fw!b{vM<<*bJ(9@x=|jl5ZQIjF zc|&d0F5?>uVxerHze`l!V|A`(&kFd%26RqeQ(+Fv>8XlI^%oxG7KNTjwo;(cgodgz zaLAzm&e#G5e(BPsgRQjIIKP+i=g88=vVURSP!5R#+$aM{zn zW98vHSqO&L*O^GZlv1cxF+p8o$t?MGN_SYON!0uX!vB!t!zyk;QX}_OR@Lhf+$kME z^n;({Em0ukw7G*XLmPck1GaW{xpSElBR#7Tz6IO79N@E7w`by zVS%RiZ~N6-=AZR7?pmhuH0ek10=ho55k5+J0 zoC?pS9KPik7d@S4^^oz6v-bS$LXgU?3IargD{$$m($?2&L=^?ph(ypdyX8upS1U~x zi~QUrY6tlc{KZ+nvymDNR=vdT!dW6o6okIG6Zo;6LKb(2!&8|BBg5|c`qMGoVYtpf z)ni~Fe-xzBzu&W4>ew$CA5UNUhWCquh?H{OW144}WN%_>f%E(iw_kn4-BCH968l(k zVl%FRK7ZPI+5IU`eEU!YCFAeX#x)Jlco931e=B8kKxI{k&M2qayNjv+B@&n8DrKwH z0F+B7S`%YAgv2mkEu{v5qlhCG-ROQv7-rDZy5&(eUUX&h)vZ6KSF1J2VRPb5vJ=$< z6(T2k0<#r|jD}tQ_l{wQ(}1A>Vz4^h_#e0es>e*w_TA|Xt1cZG-0x{GT@#w>ORYEo zLKqpP=WOtPE}{_s#v!Y{{>h?3FU76gR|$J8v6{h_-!P#0^~)&GlDL*lkw5*<%~0v}Ab9CmTf1-pqT8!rdK z9dKuqHosi*pFNr>C~;%Hwy7fHVwTNW=@THOFB%u5CKB#HpQJ6WpzxB#+Mfgzr6^w# zQw?2YC+fVR@mEC&r*wJ+kVEdV-4Ih`_{;OAZifQY&l^NXSZGz{86+WF95=5wgpga(>+?KK|$%} zdPYY}OMB2I+mai>DGBvD>|0aiyZzb4Evl>BQr)Yn}rS&&)Ot?_qT5*4| zvTt#_VK}+lvq2VW$Uc{TtG~bh*YV*tkqahp$cKB^#}S31RQU8~3v2E@2aj%ydBfiD zZH<;DAn@Z4Hs=7aw~e=+KArv&Y5WM-mcWdwv~0E>FBL08HuA`%miJj&9*hyG{L=E)#s)1*JsrhReS3K_gvM5 zvKhPS7BZJzeHo*TcuYqBRO0LYuCI>+LGUH8s51Xht6Ahoig#~IMm#D(-{gwexeFWy z`ZL-{Qex3lI@uW^isnSBy4>Ag;d7`r{u$%dFinB&BM0Mu-Q|zELABM-REPv91r-_i z)!5EAAn9IWN1RFoP@~V?jORhG0YvAk=c#-i%X#|dw+$nctr8C;%?fWEnz@TPQ3zgc zQgvVdBER|CXb(6B3gSG8VlIR7b&s{k2Aw^#dLF;hFR12I3wi6#8c+_Tt82fAdSFAC znwrTc7_f-Fye@dO9NtjbjJb#J;m`iUecaByBxna)%;7b@6=CK}ZYKe~G(1!zD2PKZJo@Pg{R z7kX<(9$CP}zM@k4Cdtgd91Ym3gNObe45)1R+z5KTQ_KLTQo zmSr{NU688efU$R1{`LpDdd7uUmq}C?U+0(eN}(PPzHYbfF&z1aJs!EWKm$XRK7RSA za$?o7K}_oWs$l#Mjl9mCTTUS%TyOL!`0r(XAp-f~7Sa@hU0;|sZ&GBwQ<*ZQFM5wR zsSQLJ5wi>n^zEFS3iIT`P97uw(KDT6skp|}Hj!cwA`|p&!5jGf4Ys`O@ip!ZZdPCQ zwO0Ljbb=It1aDkw;G{^;+=zQ*@;3SRtHp&`pXIvdo%3~8l@ss~Zg%!z(rZ@6e*%Y{ zP0h_UKtJT3j;tPgogB^Ciq-IA16x(N{JRq zPkBz>i4Iy?^3XWMe7_nG^;S(^%P;?O@?jo;jDXeNJ=-3YCtHOt!XCVNynW>_RlqkO z=YX;}nn&D`3C# z=*P7R$#sMGwHw{Qan3+G1sHy1wX_@5G}nWCOm3>k(r}g~wnr1Cmf6$2S_x_Cr?>RC z-Ra+vq_f;ff#0Q2GmH7k^FBW}U$!&Bj!vWJkxcqsw^W3n4(=X>Gh1q!e0`fxya7-u z|MI!!qxEi7Kh<@YV~_uv?A_w={%Ma+c5eb3lcbz!*|_5HxA}A3&vJ9W#8Gi&8`lmj z-U6~~7xR^WOKxqnd3Q0Rl z3e3FjD>FKyHc<6h#H_!pt`>iLvp|;ryMU)sChM?jb#4-z{Wp;SVXj$0xzBu`7UD!E z6^H-68bH_MnqpMxw96 zc_}k!yzxRhvRoIdj=V@m0ZdBb907|>kU^QV@OIg>M=wgc4KA3w83+}U zCXgWf_t0g)>H-3#%Xq1sot-)hUc5R^GE=``w=OBRNK$o5Q2!P#pm_jt?7eS&1)2FE z17W6>XYk)Sq`Q%$D^D|H^~?I)zfZvI68vOLvjJ0iBjvF1^;#l6jFO8eHYfMYHztz> ztgqPV;!v64qL;&gvnGJ`+*>sR&8ORi3XbHvjcf}J_wJYeOdnW$x|0bn05*PE;dNa; zlAN-#d&1lSR>LPAGU;_8CkjACa(9%bET$O0zJ5FME881E+Y0LpQca{lC9h*DX;Dx% zka`d6jxD|W)EpYFM5Y?p%IA89`c9?IgdFW`11SFY~f-eFGdd-oa>|Q?q2cOIvNq`9$ z31P4N!ST7v36AI{hXLcv_4jr8pXt;pIsESafRTGVgVH z9(439e2%0A|JPMbjzjDE#UTQP4B1N07=~i_pf)&4@A}0bwI19+%n+BCpO9$O=DnbN zw7a{zjO+j=9+>n3CeZu$?|pVKq;S7<@(YlUZ9;xXtNNP^#26o4mML<}G22adOodw)5M{!l`A9#*NwS79i@X%zpkH{%6PW zmxCmm-o1NsK!Vf)8qwWb{wI^1yVH8E?-hH(BY~#oBn9~|W77b{gJw{=xkJgjK3nfW z9~d9f6l}M>k24{98f=BKfT+{cG#u#hlM`OO}=sv0+ktgKs`}o@JulK{Eml)i?{$bBA`E`q)zF;kUkI#yPM2PFgielR7$T zD^;2#k+z(7sh?p^R9O+muj14IYGjTJkMw~|cMsS9XaDdaDN(}y@q-nZ7>c}0jibHO zuAUDUZkv>ido2(1O%FzS->yyDVbC3$;0Og{Vx12Sa5t^ozy?KR&&|z&b$(}xS-sz- zBcFy_CZ!Xa0*;-(QUaK@aMS6Z=yEo%f~vlWO_8>5pidv`S%eb6+ub;NgKc-Qed} ze)z-W1Tt`&|=HrAI^!?~ctOq>L! zM6NIV@~-z$MFZ1^pWHgaa`XAiw7J(9p+cEqdP}zS75ZTW<=`gWop`TMnO$yRr2~hN z*oL|=rVsoxj-x5S=WJ6Aufzg<4c8qz0fj=xl$KuMSzoi9vcLHYw(1n?9bDZ0wBRxW;j#W-;+{oWb z)6iSM#YDBMZy}7jBvlQCOsvzD6~4{&p0Fv+k)~FCaQgM{B&1p7f|LePhMAd}DK#E) z@31EyJ~-w}y!R3HJt1g-E~vq>ggFnbEp+?z>vv5Z@Omrx2=-dbc_E@T6PbnP%>3l$g7uM-ogItlTb?h1a`OYHLYrj z5q()a`gwO{LrZR-;lP47a7c-V4(823OiO28BU_S<0F7W~v8pw&*KdJ=P~e+qhTq9C z93+Awwc5D3Ok=rhYOd!U$qZ52qEC(l1FwPp{#;WLUgp?kGaq1yW00VewYkSIUS>0{ z%>7t1!D^7IKrO{+aCn&9*wB%cj*cRNXc&L}`hppv5x@oMPU)eF6; z2@WAeKa$kgKfymvibXZC(6Sy#3KUBwHQX0iYEs{0vI5FM5q=Aql=8Ts+`0Pa&}xR0 zP%2!LR@T9&<>0Yo;T5dZ&(MPnJ1~>Jl+kr~c$l!@kx|Td*oMVAQ67qfjDz_=01um+ zUPR0T)3^8GPfamIS41|6ED?j!lfO28H{`W4$AP8b2a!bko9X-NxaS-bU`i>0Q}KPh zeL&gX(JbtTo-T_iJu?vZ!B8-m-{Q^GcWH=&P=2l|)gey_j2kPL2D>Im?0CPG-*G01+&P&zl%Z~_C7v)Rrh`yy|;IIKT5%d9{ z{MN1MfZ4ckhYPdGo|PETJ*@Ui%C@wrN&oOssA_V>d?Wv8`$r>ropQvQ@u#fnM%djx zY^3%t(1U}CS=sMKWJM&ofSG{0Q_1)>lsrQHgau`Mb1M5Ki&`|olre}UqfKxC?G*Sdr~sV?p!~i-e)kRf z6mW3B;0q4w<;WiISJVI6XU+=7_)brb09E_xrR-gihQ3;6Bg3`m|8OCrcm~Zk=36(t zmhj%xx8?OF9R;84Etv*899on+>oT0d9?WXMtc29QS<}UVeE~j;Worm0z*^vUFP8SU zof~wX?>_^xQ2&mOp1Np*LJ1}r%QU||H~uq*1u{;HOTm5}NmlNfwWHt~k;+ zfxaShqHR%tEUGcd`bVF-W&)=p%)uIrm4ZQ>f;K!WrGoOM7VX1Q!Z_c4N)ulM0xkIF z21T*ZWBYp>rjjY|Ek$n?j`5O$c!r2G%za?0D(?loXTHR*+l4v}@f5U`4iuD>5|WbP zM7B$m5lf6$@a6Sl_15e(O4%bC$@5Wc-MQsMgl5SX1B-)>Re?#L6bKu?I)E>fy!Obu z=*N)S+e}>&Sa{Ex z6LB^UP!)!tPuv3Jx`J4is(GHpb6x6pTy(Xp!5ok8LmI-rpe=YBz}QNn-ITtCQUNAj^6^->ib;br`oBcL`ZLd%jZ zWNeR9b0ll7s2ly959yhc@=w*f0W^BB^H15+r(b*0Wb~lwffOAY+Gl2?x}GtSOq!{Q}Mng6Jt_z`U<9W_3qwXZV|1wT(e@kK zb|PGV8vrJB^6x1KP%GhXFtP}Ql1yrK9LmW}FEnY5Zr~P8gjgU5IWXzWnVA6GGGJ7# zmE(9=$fLy4+%Hga1-J=mAcTAaqbpU);wL?qec8xGFVg_@0ou|SlP=n!H;4! z_QAj?;Kbd!bt}-5tmtmSf&O7}!$O_H2}Mq6DNVlX?BHfLINNmcjbO_R2Dk7lI zywCp~?-`RI6Z57F7ppcu zXJ_)`!<}w;LEj{F0FH>6xK8FFRqCi+_LF~+=+;qu__%rACnxJ$*??|#|NO~^VNyBB(M)WmNv`UT)Pn@D;++M%3 zEeBP0{Pph|s&wIf@#(Z4%qz>wKlZobh4`Z!+G#mZM;oC$qm`?5)!~eW0tjWuQQDNc z=f(7&8kst(he`yV_9s!-g0OF35)TPw$jjika(*<4qz&n7Q_{>+lvPCvt-r!89F1RV z8qM=bHRok{;elAi`E+$X=gl>B&f*AExxrrg)SzbdjUaop(_Ig~RbUEt}Ttt3GT zf~dO<8U4{K9v%&*GM8Ska2~(XGT^-UGD=zU(TML9d`p8mI&1-YPhLJdXfHpB{%nF$ zA$5l-N*DQ0A2U9D1-8b|=@bpkB!n9MIW{X#Nz*|NX%|anH3irZs7@f~;?(nWTpNtd zHdE&YJbiTk!eI!`4IkdeDCOn0Uq62;@`iv28T=Gah992O7ez8H;Uk8SDUcb?p!r^p zA^Z^bnyod9ov84o=R=jyY?AqNUR*mXCrlue)}-21IaO{HD6T+AY{`$sc`xE9h_kbp z=WW`h^)S7pqBv4KOlwJtMgCMp&(6Wq$;%zWgxpy}gbs-tR1_4Ju&Ch)`C=Bmxg=Dz zGy%cBTnI~gDTz+UyWBXEmR-$TWoM0B8_(uo0R}fE zPN;|6$vp9D14|-fiTZd=(G?d%3GTQ=_leMxKQPFY6Ds>$$=~1K59qg-oPY4KgHe*X zP&f+_-oueEXA+EPb7|_W&SW|M4}b#K0l=ZM(@MQ{RY9uxdz<=BPpYn7XvFz{_+V|7 zXwE`G=JIGo(6(IT6O&CLI8a6cg^UvtuCv^t)x|4v)Y*UPmM(Hn8Ktm|o%4`P8&MpE zjTY2lcwt%=WBfwpc!K9lF#+PlL$|DhF=Xh>JpWa@pHtTtSSFH3mZcrqOt@JRJi4nTw5;7`NXS-{rou@b&ctS`lnP*+*=> z&vfwJJXgMf(0VTBAkRcKFeh6l^&)wK$k<3GPmf2;EfXlrY)@8k;cq#e8?bD6Ki zzA#tG%=nW^J|xa*M8xiK?Wozdiir~AKJeoT{ZHH7n5haHcAhK5#+y6(#G~6v!#XH% z(&XtNV?e=V!gADsr;~==h1~{`l-_Xy8jiXg_O>VOy+cdurk^MMAEw*jufDwRzE52uk#efI%6rvX8F4(OZ>>wMB+9594Ob4XWI@zJjdq z(W)Yx9^N84-ds(HN+GfZt!wu0-1g}OjTi2u9VO^h97u*pBu&wsf= z-jO6)5A zYQ`hj77vUf%wrfX=T2|#-2R)d0le=_Zw%i>?2VNYt8M~E&k(*u7g(EBh2(a<46)&n z%-?UlIS-sWkl^5?cpSq6z+ME6q1n4wYN8px%sQPyj3JBXp#nYm`tsajGd&mghu%;r z=akF=yPw``alP*<<>N?;1utX_tw3!;fUP_qsZx`thV}!n&ZAL7+A8jvz-3bAqBVMo z{PSX0?u0lRCAkQxG8X6;t!)(02ue>uNeB(hx;<7=eN!devWoj0r4F$zV`Qklc%kn5 zTd^3j+`TYI%O8Gt4$3G;lGN>!s1a59CCi4$z}MBQK$s z;idj`j+5RspTNFB8k@GJ{mDuMc<0p(n~~SK|3*o3C!WbhiWF@9epP4zYBAV=5%0@Z zB+g^vaiBQQrHtk)0v}+i0(6yp%*M~gK|{pi7dR||q9$$hrBBpBRBfZqd0{}KpgpY7 zvB(a0(iU0ev==93K9i#VR@VVUZIeJ)b8r*`elS*Q*7@}-H5}^tK}Ce;5GFL2Y&D9vX0jqY zcznwf$jY_M5?tffuhJ)ko1g!hgd}kIDCOkjz~n6Jmlqy}Xfj}Mw(2CU-A0GFI-A!- z<6LfchqCe7%&;EVFu!EaNzUlB)ZYOt76Ikf)*OL-l9yz_%i$W>V^4g=|BEe>xSC%J zD&Fx@|E`<5yyddyw(dodJUiE-iawS{hioiz;}4sPlhDzaz=mKh66|QNo0N;P zxo^3z`zNn9*q59MA>qEx9mEfect4C)RWgxm=7ABDKo-clrwtl}(+|HAlsB}{{g$H_ zW`yYnJP_osun-=fzj@FgQlh?`u0y#>aBvLo^_Lb^Dpy~W)P6jOWOSQ7dfCzy`zLV< z)`}O0)v0_}GM*p=Mxq@keTA^5J1fS+g+ff7@G)RJ2F>wt`mNTwnb3hnOyEdy0L*)#mjg7=A=ya(d>ktKkSqS= zsG0X}_4+$Z;VXuZd47bes@HSvtqbyh`!hJ}sIT>>1JnuK5l}Z)Zf@EJJh<@W391=> zMRV4&EDZi(9{IuSlFmJi`|*`rCAxhA){>%Zb_J|GZeJMdSR=;zR;0jnU|{a8e9&*Q z)$$O1$wS&i4@J`tlNIp9jlIAZZ4?A!VjU{C;Ts^aqRVdMr+i>z1Z#sw`K$@I5?~Bw zMxBD@)Ch=`C`5DRK1$$v>AYxYH0GO9zM1GGoeREY^0tT{K|Y%r6C;N3{(!QEUW z`%cL9NG(DF_K!HD>?NG5r3{KLTgX}9mgKkkO#Tl{CVUD)X27j^i|t|%pZ683{cswQm;TjZ;2zdX3ZUZ*AMj-GEabE z%(`vA?Ky!Vbg%=5t4?3~4=^=RE>FrHi4eE%Klwzr>W#Y!OY8B8RN$`PkMoZ6QzqJd zwXx~B^ta;f)Xv^P-&UymS)P|(ifMWYLy{untK`K2Z)kYXPdewzqkIU8fUYUhgK=RC zr$pi_-E49e$R5}l?5Rl|C%V17zaJg2I(icT1Cw9y^iIo!}wR+AT2$J%|Ye9A6xBh~hXV1f(9uueTpu@Nuca%gT%-?8Bi zKAOupI)5_Q+0R_J$GLb=QHfHYH&2Ygf#Qp3Oz=cznc5p0>PF)dc=Ip7FU#k)d=u-3 z^^z+!7)Gdg!+|CVUrRAU$F_Ab?%R#e{`te$!J#y6Cz_V!8;O|eZx(4{)qlb%==TBb z7HTx*zN}5_-f|^1?N@K>=!2etm{Z~4vZ*pEnO<|7q}dwKcrU8YMu@|!9#eKoDtuk3 zXDiMaf_;I5f0DKhPAr2J>C~l@aRS z_XLxnaCY5aR71uf?qh&Yf3y-QNtmEl+0V!OK{g3TY*hFpO+2}?NYGuyrGLd=zyHz^ z7BXx6s{PiXfFu`}L|YR7`0jlC%c);CqE6CA$awOx^G0^AOb_Cwd!&>7H%LfA? z0;LylCxXRuPk};}KkuiI<^SpaQJ;a#gKe&`mdhnG17N0pnbbMF= zzt}asWhKI*#OJxQt4qV^g{Fi{;xj)0)`Pz1%R8I$La=hCYJ3Ox0XTqnR`+*-9uJa0>Aby?&O1C+SxmUSchnvd*Qw?Fr>fcSy0iHE%6KBtRU`k%`7J5b0P~jl5(ba#f_xc4kDK%(^Yfp&J~v?Tp=_}K>HMk z(hsB%9@}EciCL%&DMn)%<7JduS=Y%ll}!IS!6ZP+bq=r8v>`~RkO*u1Fw3WCzaqBR z54(2em|Tq#0}dv56G0v6tov~K5ERCZs}De54C>hat^1-{voPiA)yki57OxZ<7P8s; zU-_3*)qvT^rLn5BaWp%R^LeACBfT$oGQWt2+3&y%rL=M%ga+!dSKY-ceud_ahX4Zv zo{@s7t;}f{ub0Nzl{sq$8|Xjq$)N~JrjmL3M;wu`aL;m;cU=gmWJ7>OTKZ^hL<_?# z*X4eObSXCi#CEp2`p$ePDuIF`8hTS^ubiO7zNF?7-FhZ6-76Z*@$s^WWb6WNc6zSO z!!Kg@`6&n)INp#bT{HJw`u?|A(pDp2==IPG5r~S2au9z{bA2arj4L@enU4!w(cxdF`@+-5ypBmpns@Nu%TKwUC;&_wS)(ko&XxpN|3|^WYUM zu{i~yk^plJafgz^Xkjh6pY1|0!K?3mx|i^n1rJi1f%Mo2N{P>hUlmSn^>>jlcDR@9t2mb7?E@dVe%vqR$egx7CK=NSAr1IYl_5zw6u@KGor-}iT--y06wqdnQ zV1|@I2hMqKvvF{88(X?7n-bIJF7fjDI8ZdrxBzVl-dv~cd&^XPd2hhZm}!KMF=X=d zj&reipS(H4*ChM-eT<{+^tVig_LrgP2@uPnRW>W>KCw?FyrUjy%D{7uL1~_oHoDsl z;Yzix&H?25N4oc5$}wu@c}cyCW)w6;pj9JJe)iwr!-eSv4`#lI@%G-tq0IOzyl0Ic ziVB3s3+MdPedoSJs`*N{9#?LmS!!!!oMd}LUx#2i>#01TCP=^9P?{kczq0Hx|41#> z4VP?9w3>TvZ9o3z{6Sn%u_|ARO02C>qX*mJ0zE4rc;I7$U(W22$NwY zfogO!sHj+^%e{qyFK|9~h(SGPA7So2w2SW>JUlBJ0l7vMcMq@e=FB1SF$b*J6 zmwCD*L-PC2ZEjWdwVB3*{gmQ>X1^m*#LI*5s`dZ=9*fA!M`C2267mSHF-$yr9zFu< z-Hi)huJXqJcs*Vh5E_=?)W=y!c9gn!V$0j`)ID$0y~Dy+uh3zuxyX;Zzsi=C+HP4m z`;Jk80e2*wuiuM1fvWN4283mLe{25@;4MKf0$%Rjtw+y}&4h-m!)B!1JCe@ea^F@4 zCyhZTrJ1D=c`J-^ez7+m=|E}wK^^9(!0i)eA72Bbg)O#+%l-zSQ%4;vq!%%KtAB^@ zR3E%^n#qBuTeA=lS0x=xizmbA(c|HIU9E1oZ#nylkl|Ho%ps4bYupyIv?_y0EkD^B zrZITI>TB|fg-`_0TtN0*NY%*a(Cgs3^?fZw0)`2Lr0 z0KfMXauzx$4V{b4Wa6W;eJL49QhP3ODAkX9DQI6Xv6Q)3*hU18@V+Qp_sSJ%&7;=B z9}QWULx$-EPf@J_DT+0MJ}$Z*PGbE58Zm9F3QK7 z(fAGZS(LDJ8WxXWxFPCtS#)#JNoGuZIHuD}&hZYqNL^D&_hWxtOm~3ggzqpG{{iDQ{?}{|Z!pyO!rb8vVOGYX`&bpGw*j;@ zM3Dn01v6&I)7xpPQkd%>iOi#7^RflNe-JC_d2-@CfWH^E>b@cF=HmM(gXh^bx6j05 zmmrV@V_ug{0Sa(=A zU(_Ip^mA(m@y-B%yw5oQ-R4VrC}exz9@y z!(Y0y%aiFCIeQH`6Bh<-W+p6Z6<+NP+;6OBjaDvuiy=1OBtvY8RyaCBr zAg7{QK&)?w zh#A7X%$2)__AQ*=X;@)_6X#^I?DRjk^>4wZJf{zTWtl4roGJ`I!kZ7@5*cW#U|vPg zZ|GDA@dn<+3tCMe~&(%XqFJM_Fg5w3n z?wO`6UxcImZDV=~*Y8wa7Wi7846vLozS2~6F=(9LUz$2HRmnYSsOG0HfsFQ?M}o&( zNP(<-A%57gMa#0Pi}h#+$Tvbtx%&XdfK3!X3L+^MKEAy4`{4RNkM!%$T7!iaJmTi$ zcb4VMnrP~cx_|k!fS?Zy5Fl@(Ef>JPlH1hVyO|2}PvD$py!ior(kr&j6hwcUNP|!y zi=j;UKW5k)8PEC4Em@qdk;RH?6vc}_m#4WCl*H5hqWIUM$Qs4)TifB->j}zgXjdk8 zXHiM^%emD~qI$w^uC^S*wHPw|P#Z~lLXKV}5mDsYow`|gR3$7^L?9y zH+2X+|0dIjiH2$Ew~AzIm*~gB{=%mwxY&)ya3rv-fMZ;i06&oiU3@%^Ba(0h;0BUv zffVTGUh`)X)bpYgc@q=-vOG(D>n!*P94|8chC11x)Kn3a7bI#%RP-&!Z)cTTw#sMt zcnE0!XzdNhM|4^Fd|z4Y|FConlK@R)CZ5j@)whz4n9}+6jl*XMbn(Pl`}<@h^$4=P zEsW!=^^R|8TuuyBMVad?y8EIY9$m;s=n_Eb-2!d?>ntB9d5bbE} zz^5ok5-`2^3{6%K`_*KP)9*iHAEMxK@nrd3{B}C?EfP6m?%s7$=5qRXIdb%h52)Us zo9S7rUuEm8?fPN#7LN+WFGq%!P953FPQUanoEVk=@36h}kEF`vIh%OxXA7 z%B#}8Ge zWpANdL_q-wSy(8ko9+1i{jL7+e&#iks4>~uB)Z$G=$A2s1}@3irQLn35C;?G!o93N z)c$iM{&>(M4apt6(r?zyN7#rnq~PwD?6O>n2Al!nN=mQEiuAHUD&Y3j1+FK`MS(#H z;PvRW$<{4OPa@Op^G!P377v~jV+%+cNmb9o5H;!-{imD9S37Lo9IVcTwrb!$R?Zf8 zDOE#72(Ra5@SzkWDIom=lI*l|I=h4HqESrE@m6vAJ1)6&weGm=Y1fWH$QH}Yge1c5 zo}RNv9h#B13rk{lE>$?G4eyeehSX&Go(?@&!1_^a<+;%W1y z$Nrw=>f{xC>L+Go*c58lRo3B})O-$I93rl_4+2FsY(#>k<<)$brC^)r}f=)IF4#=K$0OHRPb%KL}%+; zfLraww0411h7M)#4b{`L5Y6#>dPN}&0wLBc&hpX~2E2?UA|=;X?7DZA+7+^K%sLcc zeIVaD=u9wRlLIN`(Rkfwht&kNR7q;9v@##QR}$g@*6mddjRSVTkyPe(U@=n_&08W`1LIJM zq{o5DvK##6&3m6Nr(|SoU2XUlj>lNB5GX#I!qm3^mh*b=L#G->0Ihdx%AQg{79s2L zc=s;H^?eS=&O;7XXw2WSvN`qOub$j4rPLrx-tGq&MyD`A-xcydf!vT;$2xJ2US*T(P>rwnCbH%Pw7~`(1H8DJfyU=vgeADBq+P6Hb@hxtC248yPrK z8(V2W5bu3Y9%ZAI=t{4kj`GUNASmMglOE@z60bnJ3eYy-?xLw0w@E}j7p+`_ zX9fcFoC4Jo2eJzt@K*!yXKvatYF&uCm!w#Sq(@3HM!q%VhrL<95H-=p4o@JaQn}njOV3Nw& z1=?JR@n5>`VA{mGHklL|C|!xAJj!{@PsV15&y$ATolPlWPvksuPF3jL$QQW~sZs$F zD9l*X6`uYHtrn(Daen+W*KA1I5}j;8l74i<_v1Gg?au>HGz2XdW%})_`TaY5b{)d& zT7P#5eMh_>%*zee?Y&gIyfuC;+bENpa94hmN>pEy1D*d25dl6xitytfMyk(S+tS(l zTGS+WKn5v%w_q~BJTs#do#i8>rM|Ij2B|D9UxA+a%$Gg6v0zp@x-S4*BcHJjck>hD z-Oc+xV`fk0pfm>#^Zf@~5i zDCE3O_PQ^C8eWY0&Ci>2i9Ih(jQ2KW`{^l~4nAAeVy_h4_4V-qdZ>eleUv88Xe4q_O9^M=9AvH(eU z78WexQtoB~;?T`;K$hNCC_Fa6emPA+D66MAyBdIt3pye2IJ-zerz2 zCLLLzj=9CM%ekqgIf@L85=pGm3b{pvDZ;DX}6d)kNp1 zbg8Q{wr8p=L2yT6kMB}0)eVE^y=Dr7Fs_`dt5Gj%XoXL#C5DQ$)s`P>9!58T3sjs22KV6uWx(I-vSFV zebmW5O;4u_mT(FnKD|rl@nm*wj7r2SK*57Ohxd$o`C`HNbD{uG?QxncbwI+`x#dpM zX?dESiQqRwIxrZxL8N^`LzrFi(7jZ5876=L>6T4mt%wy^Q%Cg+bulfCwnl)`wLxSm zRFFra|gT$ToZT(5hXLp;7)?BWwGK#6RH6MWejKHC{9m*?Y`v88z zw833DnE*Q;rYnUDS~!kBr3NE~xZOJ@+Z^U|j(dF%q>Vp{hNWLpcy?gC)JO5lX-+K0 z(MfkwTzxaXHDft@Q|$6f>gVaEvk&j>r*%c)$ezcOp-Q;^=C_TPSZ&`IOn#i!4$02Z z{y%c0APnX|zbxp4?GS~GXxu=OLlWxxYz}#SY?W=omz_f=c1yfSG1<#D!~2}iu%2H0 zpRzA^2i`CrS=eKXRfXK-b*PlMb?uX zSW<{&^yz^1;46mOR&m)ETWY40zvy6eP~z89uHWY& zI09Lp2c*~cW25I{Z?m)`L7a#cd@1?&-n7dXqE3WIyySq8e9@Ea7FSjGcvmVN6c(fu@_A)?(<;M03BKG_3|Sv{kGHH z))p|RzI)^cT91r-8g{ALw`(JQ=&Q@jWH94JYM%mNt^{VcD045o`i?FVfG|EdrI9pT z2zf*T*VF@x6k7HOkK-LLLjBEB(djX%kmJWgDFdqq6D z=zMion-C8%vX{E@(z<@oW^?sZ zqiFeEh09~ZS|~HJA8TalH>y=Tr$C5X*ZJt~$1A+01pVQX6Na~2NsiLWOxXF< zT6W4@#$!>(=h=r#T(OV4uE>$Cb=AVfg%YY$IOXAAXa$kpALiRIaQsieROSo3Y=CbU z6K)+f)5y7lKB{c`zCTk|Bad}gEBY)IBig=YyL>+KjjPnOrK8yJ)#EVN6aAu*D9WeP z1ye*-_Y8MrY6W8KSXbRI%L+NL?#|Q-QIo>#2T4Wv@k4ypXF~uf8zEiGYqO#oiFj=O zvtHELAJ?ZZ=&rV;`R02uTDhm$+}*iZ5YAhCs8S%GiL$>}SCp7g?Y`FTs=}Vn>N#PK zz!Uo88TY^skemiv;krUD?tS9REuA;x0_NtO#*WsWs$-9M_&*MO+!8h#r(rG&qqq-b zIpUoB{9h>L_mAh0bRY!QhiMvImP$q@^8($4e$uLo!Z*xox6zK4f3a?}C}mbQZQd8X zXuNf(96jjb)e}(plr`yxSa*{9U_~Rdcb!S#!+Fe@yYg72)Tj>Q$*b`2aG^wXFq@wP zoCJx?*!_C%?mXfWf^48Y7_lIBU0~OZjrvrsiSER#a2s@;v;iw|&oqEmuU$n-^x4IL z!OO(iRhLtiJAEztsHV0mietq?t80y(e$r7jrv9adHZIY;V8O#O_P0Ny)uZyNXVLc= z$B-$C%5zdJV#Lv@lzJul$gzM-Z((smMI&>&2Ex{bVw_%H&{30~`Ec4I<9*|K|LgMP zHf%BR?4?NZ1{+#Dc}iRLbPaSB>s{hLm)p*qG5Mn>I*^vH(SR$i*3a;&u3^cH$JLm$ z?Tg_tn|_8S1B{{y$dm)wZyQYOeSV?_Gi1Y%Cz8Mvx0mj{_I>X2O@Ep9V-7b?vuEdvpJKVj;BF z%wU*Q!(l8-Pj1|{b=}cbbh*i@s1t_0z)pULu_FvuM`#{EaR#Xg;reZDC@QA{y7bwC z)2C0wGT*grRWmlt!X2{?);*b{pBC;_h;p}Eqw+A`BHh!T1^zRr+(5`ZW(sk_CJGKu zJjJSkd|}pN9(3h{T2}oe?JJz#PGjIyTeY)6(azT=+at zFG3OJlNo(@M?FaTgYI;{l*ORnFeCYq9}E0Zuq`(V4lR)jC2u7u3a!oV`PARJ#@D5w zCM|D$Y(G8J%QuhLJi>iNHV*^wGc{kDw^eQ&frU35Y!36QpZ_~>e{D^L0Wk!z;}E?E zN=inPxnQ!yX_p#KA<98}l2_hnlFwk>cAfFqNGvQveXP>B#JFmrcrdz-&upSGu9C92 zkD7wK_uDote48!m$+1tRV{AFjHbpeezH<6E=Qt;WJvM5p&uv07qAnFo>iTUIK6~Kwh?x?AKrX}2jU}w(02 zdc7lu7`<&JNpKj!!LT`1hlKq9Spx5MOZYaJ=MYp9K0TQ34DNDM>)X-nhDBJSM*2E! z{#uHregs3!<8OqXDjXj)LQrWwQoPdMLz*rh{Gv3cWbZuiTyi<&-|UVL&x&21Q4t!( z%}}V1QQ8gmygf^MpjCUgJre;}OTgyxOmOfE`*8NOwdANChX~LM~R{S{#DyS$KE8Y z?H2dsWjUs5 ztV4e@PE-8onm*%Z2GqySX}YKuI2dg5C`wk+w{ao=SHBUe}*pxM{%pYGBfbBUPeJi{7K zyHJs3eO58&z4u~STBBImJB<70g>qX%U8TjggSOWMnF4Y)mX$t_7cv~L?3+&in<!-?(E6UkjC~`?k_<}e0%k#`F%CG(!#|@1}ADy%>>fobGjZj=XXmjIXY9;q-O2qw5GIt=3%D+~qZy z7L8phPQIz7sxDctNG<*IY`yn)OX>P}wBrXrceZG=O=X^Ep>25uOq%kxF@nb)YrjtQ z=QJ5PHTC~y5Hvlhq@{8AC*T+ysj>Lv%4nKvZS%{9nI*T$Po)-(MvpzmBZ#h2yR-6y z%x*m^@F+Pv9hkE#a@e+MpLx)n{4T^Kklb%p@^tzOyZX?OSI#(uhq0BejC2T1}&o`@wu^ zE`Ro&!NRpxvz_DHYX+E|-BG6R5@jcXa)Kw`%pJwClGa zH-nj3z};S&?N`l$WiN*63&S7qY5(6fqHymhT;CNa4*x%L^=M-oT;f$r0QZ5x1zkDL z6t$%Jhj70cA$f^Kxt_3X8*(=o-1UY0DCFAFI~^(nj{o~Te;_x8wdAegBlmr2r6h>J zPlP^&*P57EQyBhhf*&txXEA(6WrER}hP=SZVh*oviXyw}%ZJlP$2*b!wX;u5T2AfE ziW=(DKER~Hv~>HuUWqLwUmp3qZFycfzte4vLC7R7GA@Ce(zZXYQOtH`cL-3EMpZt9 zE5~Fn!sRmH-DsQKo*TfOPituTg8V#VuQJn(0hec0W0SxtJ4>?+DLwk{=65ROBmUnv gzW=}PMSby)RW;#R$eu8`uizgwWlg2hn-)R;2kQF!3IG5A literal 0 HcmV?d00001 diff --git a/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 5.png b/recognition/s4581053 VQVAE OASIS/Images/Reconstructed Image 5.png new file mode 100644 index 0000000000000000000000000000000000000000..129515b21c08eaf6298a405cfa6b99cd852e1992 GIT binary patch literal 34694 zcmdRVcRZEh|Mz|D%t9PnNQrRly|WTU+56ag?~D}TAd-=tq>>dU*(3+qlvOzP-g`gS z`F?+|=a1)~=lS>PbvmJQ?sMJO^%?K+5%W-6je?Ys6oMcM4RsY=2!fe`ujfdJz_YZ} zVI%lQ(&wI`kDj}okH3|dE%d<3$HT?l$Hmc_-Otv`+tJ-km|v7%h>zXD$H&84N>PFrWL`H-A3xnQf!67JWS3^bdaX{Akd7#hSSRf0@Bt4A% z>PuNX1`Ry*7c-nLbm;WwXD9$MuqLzY)L!o z*n}+7C>f2`h)dOqO{w{;d5%S)CD7>P>#G3QL^(TqV-MhBt7yPXMd z521*cM{Vwbo56>5Q#&X$9VPD1DgKmVZg^y5WNQI|y$=gBGxNc<-X$ps35hk%tN~vS z506nbiGo!u7F(X^p*qsi(qhFnjoRR(M0Sg#5MdXr@+Jrjb6tC%xgaJaZ1Gaw9d&$| zeaW`WJQ9}Je*O`$Fw+p?C!bgLcY5Hta4jnwDQAreY=P?uVdmlyVK%71utjt(0wdED zJ#`neXcdm&c0(OkW74l9wZ`Dyo$y`x9baeHQRCos^90wDr~iyk<5|OUSDC8=6WgEm z8V4JjwNfCvZ*9yQV$zL}sC7QPH@y`X?b2boa+jN6g;U<4vSq;nmZ)PrOnN3J{WD@= z7n44Bck=L&Z~R#WCjGK@_as*YW4hKIBZ}-UgCpOadQxnvN)#0LGpJuOG&d2vA`rZS z-Gq(S$07gGCY%I_|5`$MjI{?&KPd7vOn3YbbWy;oQ>!qY^$)>%Kj11TjHx9jH327dI~{o12KX?mhBv)W*gl8opff5+A39^a91c7mfvTY2(u;fIun<_Y#kOhtDir#z zecw0x{-XO7Y)kz2=4Jlw#7!uS1iRfkb!ok5GU8ZAqnk~LcpZQcEAN8q_%BTtBBgI+@4OU^7&G0+j|IG;6M^A*SfJsf&vJ!a=` z_N=@w{o*;Iiv9(g5atX%u6ia6sX{WuSaInoS#qpA-jOsE1v4ewn*Pgt_!qi}KrGnD zBX0S7vW6+K1k*HEL-m8l=ataKn3*X}MU>cX>^s-+%NoQ2!~O|;+`N-~JWAWLRh=^hMBO-DP#}<)!Ch zUDzC5^!X#GiIANV>px&lVufOWwxk^d>@=!!r*#5)iyTnL9d{E>CBnVW+Khv*yA}km zP>#TrG6ybW7dP1un3R5V=)$>*UN1U0T)38Wc-$9aQ_P$=+k}KL=~JSZ_nTV-mo_Em zoX)OY#&w?2hZln3*PYkp(Zgsvfp=oA=jy?w6Cu^PppXpb6seNPy75(Pe(Q^>+F{wV*8iwns z1z$V93FR6r9=c2SdC<5noTMPQzx`cR{)>NNy~ujb2=RYoQ~Dc!8D`?lG(3L!Qml*4 z(0O)v+~n?#Rn=Iv>J?XO59tsh1v*zM^{|oMZ+8W>U`Jv3J)JheuTa}vLst6 z^N$v`xA^4DETm0joP1YNDeRIw{Ian1S**i-IFYNEc>a`g3Hp0ajfwN>@Hn^uIWqPa z-+0g1+^)Y;G`VwQMK+Xb^CVVkDS&xugFrOSL4dh+pf=_rTqBZ_XYhNV9cxnI@t6(}y-UgckpL~-6a zi-XF5-RJ7oT*9s5oXzWo&h!HZgQ2VZy={su3=@BwjZekKHzsJ#hDs(^jvGiV)z9TV zT*xQ-30jM{*9=tzJ#}Msoe(Y!eFjby;wy07gFzU0L0A)ulzFoNL;{nj&FuRpfI`3^ zP$zVt_tMF|^aJK9bH6t{(#^{*NyFtSu(OcU@VNWLvgsG}0}v(3CL@>bQehW*P$;=8 zi+t2KB68o*!=WGU49zx+4A<&{^jS=sDV`^z zO#tsGGyC+KqDftEeuLn}kL<=%{;j)Jk>4&$5Fg44{)A!qI z{4Zi67jH{R?eaRfu`0xZ`r@UqL%Tp@5dGpz9v0;${tJ>L!QMk^U3WzWv$|GOLyb{l zk%kD2jk|YNSb|@_^>tVM{ne0s;U)PiShaq?y8`$}5xrfYc)It(cQrw`jTS4uotNEs zz*JoaIj?U*M`}=L@$60x@z$qaXCp{H=G$~fJ_~eG@Rx;hUEPnaS$DDiQfGMl*Lv@? zV!n6T#hvl5sxVi(<2wJqbWn~redbb|N-r*op-FtK+wQxje#tcO7xMs$gC>9W&S;%o zLNde08+}tfX+9zgeR&O*^aGR^=k_1>+O*}?)!es_$sATsn-cs4g`TlQ0hF-GguM8+ zh?J+$fl4CVq5A#|ny}{w?@sAA7J``lEZuHQNJ!KdTcM5}p|Ant;NqoAWnyG^9CQ(n zK1icFF=gIhNm!PW@e;(4=l7gkR7c9C{cVBoB9PsNYUW3kP|i>7mS?RuUI^DxCeor1 z^3T7i*2hY#i&DX~3$mR}R7_@0KmnM?m*tE;je1QF01iMn;EMkjgj0%xL~=hE%!rAJ zqn`rg@*l87^8XHz{y(_D1~n$V$rW{cDYtaEF=IFk_nuK??rNEyApIY3m)D0B53q?Z z1-%Z1aEBwEaImbqNeuJTeLvKBUBJ-UfB)g;LIY)@7zzR4pf6IZP?+*E8^ImYzc}A! z&VPYPAM4#dJ(xM7Vq(=Oa7GhB4>)W!uEWE2IWSoOd);g-81YKYUk5diMkPOi<7WnrG>y+b?r^PcO3&3njh#PsiZG1qr7b z$Zmj807P4W$vk`7!fEdV3lRucgf1Ul4j7xj03^&^J6mptLg*Bjo6A-Y8Ii2`Tlb+0 zk#Ntdv7I@V#zRg>8~;cm{IV17{cZPmwmKb)XT&3|-B0Lic?b>}JPmVo@a@OiM zqou+w!|;yqp(~Icba4|hbZ#o!J<%H;FGn9#OE)F@_PP=^Tl{Ogc|im9y^|$=4P9`D z(?P8Alad$JAa99q;FUc58q^0n>gg4wERYSmq#WN}xD!sbIdjTJDif>f4_6pAuA7)e z&-boETL6D7mZ0xCyH4yj%2Q$wzo7d}oU5TT9cT^Q@TL*2HG4z7umDo%3tGU$xyiL; zVncA_@_KXo#`+*LC6AmtiU&WPxSR;PiFYInH9C}>TicUg&_h#%TL9HajAM?$+sIhl z*E6$`=Z?_ju<>au+KQv zuC!u`Zx!)~M;X3Ez4JDxb|m4O#2!TJi}Lu`Thi1D18_Z+zb1^tf)woTC3p8Eh6%>k z=)wa$Bz=OdW!pK%yN95o1jNEK#6sW1biMHKc>IJJN7TT9VA;mbxiEA=@4J57Tk~Kh zd$(`qV0SfiGj`$$OlFhK5-vH35gZR}j& zQ1>`mHPqYeYluna>_hwlq5d( z(l4>yGr%8fk;h^X09!{KC}ZFN8I;r1zx_JQkAL*ED&G2?aMY(b5U#nR7#Ua8mArMy zhS8)_H!@_mj-QV$bUV+L+$4L-0LEO`dIJPp{q97hLmwFwj{Zi7ox(e^$E3&fiaym@ zWERfe9B97;;3O5Xu-#kJ2I77!aGflB^H{4Ld0yyj29HzyOL5Jw0&u4-juP&V+3hJ1|ib zy%usv>}!h1mORySa4vT*kUh9K?Oklb!!AN0l?n&U9(q~TNdOu`z5 z1J@T}XPWI3mo(K9hoFAgLl!U(hHwL#i&bb6+M*u!YLUmou6B4|Ld|Tx370njv#=udQuo#xu`1gZ6f`4_CMI;% zS>3Ni0sug8EBZAZGbVPgemcFhHS(vM4KKh2tQ!5JQXf{g*r#`oH@?5B zdm7LDzkgJbdG8;>ELx4acT|d*7U(JgT+=p$(GAURbl(4k6s%XX>akc4vhNCSTg@%S zd64|C7Pi!X5DqdQ;Nj59Kiu=$byLshk;+sOA6Cl^Jld{NQp#dcC9MGV+v1;EO8siI^Yw)vKnyz^k)0*L7Vug>Tv?smDXP!VXlO$OLxNm+&d#fr{AV|QWYe+!=WYLD;+gdy z>QxQDYihC>EggADT_;*z9UL$JFJ*foz=DnIKfC1#r%<-Auo$^SM{u_89fyFc!PP)z>#fdjPvzK*ONOP%pBDeY>klQ0S4HM-OkP-u$Mxng zPF!4sdis(y+}T;!%@5EqtP>0=`QzkU^6?~~xwmmwE?1{9Gna}ea%ppu7$S&N1Pj7_ zoU*bq2~p&ZKjo-hWlT$p;&Y`NDje62&O^bX9^bjnclYj%H<~a=Ok5n$3jN|6nLY3R zIOUJISO*6)4I6_sTOpD7J$5wls`w@*COeG87L{WutBdE;p8E|0_!(g%BP-{b7pP&O z{ikdDMIZxjDR>EVnN@KmP_7^%btyo%{v&k!!oh zR}L?@@G15W6Nw`m{pr&TmICNwt6f&C+=qT+nrnPMB+ks(|N$0fkSGe=IC;i;0PqqtQ2MX7oo&%W})M{U=QrJzVqlPhl^o*KD-{H;<(+ z|19EH-5RkP+F?CVc*}Ze6E70Q{CC(GoF&(54G>Kvo-f;H)3dUEc6X!T#(C#o@FSqN zJ={CTGT`cS!QZN@+a$buy;wyVDfSKz3qXA7e(Ersn4DAKSFhyYwz4mjk2SA*0*A`6@w4>*h!#9@+rqYv)P~7cE!>Cw8AAhSVv0o5k&%&s%>$40+}!q_ zOmT#`cyvk0UC&QorH}5sseMKZ_Hfu(rQnh0<;uPk_$fkGHvaLQ7Y}bgcMypMkFhZ$ zaQDBRM8A}lzI%Av^668SLftFQ&d!Dw7VY46oY0Km)*qIQ)WOTdegsX=Y@V2J37d2O zUEMrEoAZsH&Nf^|Q&0u7KOk=T*mD3jrKyPxoZWx}Ed~xgS4&$vb{ga+oQ0+B=TCyD z=xBtXU^s4#r(`eidb6dYL>BIzOVi7FM_J{zG>KEUR~!@(XG%pl8>1Z2@M&4JK=_!keh{lQ zKls+r5D(5>zxYK&1WfC}15iw_FytGIO-)fb-~IgO4%kY~LTaxXW)LZ)FYv(Q&Ku3N zh{DHelD*O!tbUwd7EK$>Hzmstx|i_Q6=xop4BS$4h{7ChS$RVb%LeiBA#g@*ZC9dO ztzBqh2ojhjp9RPy*a*eoN&rZ85LF%XUIFL(BNgT4?2%|tXA}#`l^Wh?g$I>45=~mJ z7=Xi~_Q9S;M@7vs;zuZqT%MkuUW;pcrCoH5G8!Q#m!Mj312sL4cD_v)2Ob?ZLO)nL zxc4LjjLOo^{c=8?6{E#^?2bZ9^rp0T=VyEcByJZlkrUnB@_?KEB4!p&SqPD1<47U_ zN8LX)HD&9Q$R^K^61z%*{q=^|(9~3=!Z4+#5^Pjq;Z2YfF7ECH=4EkN*GXT@)de{o zO%k5!kCCVFt80U6fE^A90NeZSmayue6Z%!(Vp6Bg!xDX~l6#*XQ}f`GJvi9pWfC_d zRWSC!R2M#?HbMcy$HzAdTSa^A5ArMzJE*oD_Lp2Cic~Muwe|r$9U&uw8Y0=tPw9Q4wX?UsIhxjK^Qy-V?0L8;e{_hPz}kzEmf5{QTlBR*ec@q4Yf)h% zQadHw3s$zZRIdjLBMOUD3YbU;1jWRnDk>1qTdjUtBS1$(B_vn}1u@2`;6Yo%ChpT` z$Ge~yx1F5S21KDaUx_;LB$$sjJVR!BzXy9ZDFgBx1X^HxORc5gOBId?4+#`#Gbx%k zK|Q>Yq|Tn!6?3ihj}ypbC4Px-8}^=40?Z!=L@_V(r#FLURyBG?U_KQhd4C^#Xj$Lf zR09=OD1$aiT1ezuKAzrP9s^7iBtE^Q;Dd< zZJ0gwZNnpyY=~}Gk$pg0P&7>m5T#D#*~x$4Ath9^RI&H1y%uq5Rg*@ydbzlUz|H2doUQMtE5Bv;G)HTH(`>~cJrD!r;#WyJE@3_l0*q$aC(2Zc zb#GJTK2F~&cm(pnv?-KD-0ffU&W_O6F}}}#oapryYXWTv%2M(u62+?OSxbpB{sJxv3mZ6jBYa2`;=@ zX(*(IZnFKBB_+V4Ufezo0k|!hOe^N@RT5kYd80{KW2y}oXs9(V;ogml!?bcqu0<_x zIQ~kvd+j4zpQIt1)Ib)V7Wkso3WSKd@kmDpNdi#XJ3BSVDJiX;oyl-BM#l=NEJhrx^k&Z=X!Ghtm9RmZcj`GDz1yMCU#Zkq^CE}&Dj_<`@6YO7V2tX%KaMLt6!}& zEsrq9N2%LQBDCG(y^Vy85?FeK8gx@x-JkQL@=V+Lh`w4eE~;}q16c-glLO$&hqu+S zi8(Bl`~g}Q!LMM|=3ZHt90s!<>-|m0Ekmo!?CDcx5VfWHLK!NcZT$XCi!0Kg7XrZi z@4F{HZg2kmV+OtC;it#g!_#Y|FMi(p{P@m0Vs{r8$blH|qaw^zJSH;!vMB$dBskPwG_ zk#_cv7i9Dvy9*CG+vcABCVDdEz3wZx{-DX?a#eJ^A!p30Sy6+GISf-(8GQRq;gF-o z?z;hvq9K3=aQIIymtH5TFT5|#`+0lAAK(3;2YvT~TpQ$CyP3>7ReREZ-eXI9p-|s< zetu5!LTS^o5ER{+o5^juepaD^=rxd~E@E*YfP%I#m?*e8qsc-q8|{Y8)}e@w!Rmce z$p&K@gNL#ik}n~3g2Rp^&X=kMFWamoJZA`Sp#+K;A0Hofu<$-;7(WLGsX;2>kl{nv zRas{bkB3}TLgY>+%;l&ZHy4+@$9FX5(oXm?B+Y6J`Jx5}9s#(;kPbM5S3T&&V1E?Y z!{g&AXyfiNEZGc5mGmy|-hh*HFjE%j7i+j7YfM!I;VQRjl>jF|sunx~oB~9c704qU z*?n5MgNnrIo%t>tMlQ8zN&ws5`Df2*@xmp1+v{^uHzb@aJbIq-gwN-Z_&ud*SvXnpBvxI zkJ5uqOVIcWuX~l)eb=-B%Y~a!N;g0+MgnvS&|#;I?&mQHbw`rGyWOX@75Mm;-JUiW z{LO26frf82=oI}F?`BAe?uO59OW;L70NW+7%Em4)I|7DNf3i{>uv$`G{_PugkU-g+ z!xw$`E+5F#xL@t9Q(xJwH<7vXmOnlo2nFm{6)Y@r=Mr_av@D&RNSA#WF>r85U&_jm zwzh=3ySuk(Nd-TcktjO;a`tJ^O%g}%=;zV5hiL>z$fx6Bb^;diot2GB^Z{m_ugAf0 zKDs(xen~M=&XE=)%;Tz}PUqP6S+IAy;Xfty@4Ufvp2EVy(mkP|_r_lN2DNP1!?n5( zWNYMACXW0;y7&`ok>vg%AUo= z^f!~#)pZD+?0)Ojd=?UZhM&o=DG2|$wIyg>kzOxOOA7dCjB2sBr{qV8SLx5)P>NjC ziY59$)#Vb*mtjrhq$Tb2gli%1Wu?lbOSZr;wTGa0YZg8WkxRsZlZMTw0vHU&G@M;pUrD# z#~S#ZgpoIo{y06kqY1Fm&9Fd_ZF5n871ngfe6u?Wj-Lo=&+%s%dQBJpT~*}gnL(wt zQ-z+M9P87(akDeEj@B zfH)BnV){8i!cwvm@Di@XBwktKTahkO?7GE&0NuvzS(Z#7^(~#8pTd1Ik)VO#lAy+m zkY2z%g;uJpN?G||S9*|{X*Rcidbve}h&$txu&^fhKhVHFCC?cenV7WB%y5vtP)Zxv zBl;EYc15ws=J-66=Xrn4QUfZnk*Z;ahdH{t?vK&EGZ}g(q8j6pRN5;^kAW(XbT+C| zWXR_?KeMtRal$P5E#r;i=BxP*u$UCp$6VKKrqIR@=BQb!CV964nbp}J-R8qQ1JSs-054uOjfD zMc^tMXuBLBra=A!CPGeH9RAVXQ(@aMg$XPyMd@n**t7dSspI_perXP8&S*x!YasO; zZX*g}>##6+K*C<+4?Bs1tHf^4v(MRsgRiKm;YsfL>)wRxw^}+nvl;d|EvHkLp)!a6 zyk~g8Q9#m34lIES-UEHfZ{5o@TU(!Z@BaaTTqGLyp1|7=%Rfcmx*KxkL{n+i;5sEx#7onn1 zOUpc72eQ1b(UmV(%rkpMC;$CR!Ey|#0^&2cECNvFhoT~&tnhoXrm6`yH>9Z^=3CY_ zOBYN939Z7KTO@^rIaO!QH^bz?s}~j*x6gL_W^pYA;06NE(yZFNXQi>B<;+}}nVEy9 z;kmY%wIByUFZHx_P*=`>Wpf+AF{|%Bakn1;fX=Ygy*(e>%`HJrBG?wsK+gOy%hG55 z`!na5h&b{%g$^g5KtZU}3jmj9!0ClD$N{-)+&pch%02pd%VXs?PY4(nYwHqdLMi3< zi?U0O?R)QP|1%%E8clfG<`coD0j~jv$K-N}Z7(7fEuq1ZL?fBd zCX0pggYN;Zznewls)jY6I0CK=#GIw2+sU0=32gF|kc8J92~H#cU=7GpT^0VVs++|A z$wCLemsbA7@y1s-6!^IUZ40L>0J;JM4VqipmVSP8-rn8;8#Rkt3z_cLwzl_1bInCR zJ^K3l)gLKl5s^Wk!wkn}5}{pI-Z!K`;fQVH_=5fsn_-m~uMPC18q+8=9e@l%8h`*B zU~lC16&ROjY8uI~(jSnmIaHrbD-g09(E=d59bl?EO&#+*B_g%U7EpF15HfZmc=R65 z3S;v0i>Lq9cmr+@t*>0@r@h=e20lAu%VX^T%M zm+zc>lDeg}HC}rnKrkM@zSlvm0wF`KL-VEUDMPlaeECK6>I?}Z`I#02x3?RC6spPh!9dEftY(oV4Q))p%gdYG zP+0dGNX@z(MLPGRH6iEP{FQtNo2-wSpPw)7X0^kv(J?RjtRJ_=3dyOt z`ots5o(A|5#c`{Nd;|nvH>G*=h8`h%PKI6ABrE5aMr3MK!u-Fu_V>1OVm;F#GTsa1 z0Ak$v34q09a~kCK%Rev zz7@vz)8qiK^Q_@adP#npsME(cUAv3{4!^2waOcg>H+Z#?A+>n%_%EmOUsraDorN|L zU~q)B53T~J(T>@d`t7eUb9*l8q=Qg^$%+}I%^qdvu_m?`>JSvtnOD2@U#t|D0Wo4a zlI@IL?qWL;7l4<`^5?fq8-kwrD6GdUowgG$=FtpnUZ31PWX~6MKfeSrHjeIte8<2+ zJ;gPs;^Jbn7b=G{l7jHU`I`xB3o~DX;xC<)-rwX|=)`jsE3S-KvUTD?f}UJ2^NWJ~ zqvN|DyK^$GXk)Qf9Ps<>Gqk_zyYO|)O_+cZe zOtQmS=a^ps1rY}ofoRzI`j#g+IvXK|ENOpAkPd()3ccLJw;?s;NLupp|2Q!C(95IW z%wBtacsNz$t<_#y21&OE)~S!tUr5&d7Yd>37iO840Fz=A{6P5R&h^eiiWf>|g}+2x zy0glMV^!7S#};=r1V}UiBnB!g;Om1|vm<07kPaLf<@uuVmz8~TaiH>sY{yyr#6_5{ zzlbz;=F@(4Yixt%^4oKSKw1yR-p=kSWpv>)FJIp$^@2oM8`rzNZJ~6eo^yq1Rzu() z)B^{6mB<=2`h|4`iDmT5{-MDZK}MaP5XrtJp5*sSp)JhHdxJiSFDAfQX{6%Mvj0w5 z7WK}@`Y$o>6BDu=e!qSYzr3U3V_ii_`aoOT+Qo%jM`oK~ayRGD($Z34uidMLFY-3X z7Fb;SED(>(Zbo(6If!K@$1A9IXDJ_WV?96r`CM)bSQ$__JaY}XwJ6kyEBAZ>RgOtm zsX{P56uS-Uf0LN5S*T)w)T8+jejFlaL~tZ4(hXQ?Z!Uh?fU{~jPZMZ7SNgXZ9dJ#g zH&YJ~p+O=3%^~6`FL$;ktPci})&ac0^LWR$ zhM4X0s2wPq+ckYpDFOL@MX12UlP%>3r`R?p8uU?-V~htR%?cd#r^lpBOiV!LR|ArA z!N9BSf(bYNyx+c#E|QH7aLJ7Z9{m%^wpmX|I)3ojD9BGs?S1~RqDZeEqcCgq3rU+a znN3h-Z-}FUUlA;!u27h>4e>OzfLUrA$tB%$bN0&lEKp9qho0A(?#9sEdJY6+nCW*9 z#rhuNH`?DaYsv)zYfo(IA8i6551_91Lyj`XbFYC_qQalnO5b#J4`iqIosz23pUpId zgtdRQTHX-=2ZZPEA@mBX#7EbTX1rZoo(|fE#rsR%NKbl=E^Ch zLfvJ}7^{91n8tb8o1$6x{KZPJx&cLa`4e+CVlrZKdRC|J(;kL{zChu^shI#;O>_er z%Qif8zu4lQR5qqa{@f9`Eg{on7Ox;~#o>RvN$2I3W%Oi7)LtMf8(aHWr9F;~089#~ z?y`}Bhv$4fyA^h(vm4JT4Y+%FA5EV-!X*McqkOcGB@mb)$F0N zP+p-3WIej<3%Y$0>*(h5?zEpTF&4y*pL~VMo{qo3Ug}T>L)xP1dx5g;&dH^~6 zYQmej@Nncl7d6MV{v};r?$?V6pKD{If^tXBkqH9Cx9B+zg7YRzHm}{k+4*>BxI%uD zzHy5XpvaH~;auRV{oK5~*o^z&Z(f0*0d|#6q$QImfKb&x6cEN-|4?o7vJ8K4d~^YZo3KoI3#1CbrQ=2q(2>KezS@4 z3GHlQ%wS11rj+gt`*b_U_2ovc{ofsuA38+dUr@vUyl=4J?pW1UN$G zf%Dhvj~6n5)ZwKUD4af}W)?lpw=bxwmeGD+JQI_^dy`E_IvL^nJnA_EA+YY+tgm%- zb>XU%TyENxd&4aUaSA4k!0`oMO)@nG_MjlUYu?6MP(GPgh?pFW==oTqDSSVID3w>5 zcYf3~db1EwSlp&-%}A_T4|)REI}?EN0EC~(oI2wMUdDET{7FH2P7a-GQk1{=GJ=Yi zb+5)b^2fF?0RKQ`c(2O?G@AOo!TS>a8(9R3?u2GBm@LQjSiNNHo7RIJ{AE8=qaevivx?QH+1?OzYEQC13$H3l?*Z^~dB6oRn!Jen-V`=KI|H z+P3)K*L;##=sYPYiC-sy;GVtrt9|=Qqc2~+SOTODydC?#9NQWCGak4B?|@VmWrRUpN_I z#0yTrzlFtjL_HhAvPycX9EdT2CI=3X=)q*X%9WJ@DdJ&(kgV|NAJUDy(h$ph0JH?K z)1QPmTWvq!K?9*R<*#W2PABn}tj93vrTq3Hc_-#O#ttFJ(wX7|^F^r-p8f3G4Pwap zaE+VO{$b{Po6UOuL-l9V_0dwYueCC#+G<3^#fhQxw{L$86sRHO0Me-n?D&7n@__p9_uO?<;U zZ=Uaac@*mMIKKQcWDYO9C7t)Eh!WT+w@l&$r8I}(@v62{LSHq+B&;l~4jw}6V^;yw zc_ycamDoBKC|(;~inQ%no1-NmsNK7rt*G=glYu(u;iJDLBdgQ|ra*vU1Na>~T7f*; zX1nyn9cQ2Hq3KBP*XRb09%q-0eZ%qjc;MGFF)4$#w6#^fci3T92~hBlJLuvsdW~?} zp=B)@3%XyxR~zs+)|%b^-VI|-6QUPp*^`;$XhG;CMqhhs*X40h{x7G+<-?0d$p!zg zcRvrP8Wf?H!_X!=?)=m-9U-ishX8v6=rG)M_sa1SyomQb8_ucvB4%?{)65rv-Hro? zSm2f>3#^w2Hl%HPSn?prZ*Or0;E&cz0;U-d0LQ>_n9NV~5AV%#y(_I8&!(rXj-@BA zPO3Aq(V0FJFMt)n3KrfA^L)JAng$9g7?kK#wD}>*cE>=)Ev55rWkmM+gCkjT>xacd zC2M<^)z#I7W0VX$dhURtKHe%%e^ygd`!4=V#5E70+vl>T9#z}(DkYEa;8bI=%z)h< z@oij87KNP@ZFlN~QZM%9wvUe+8w~%j5~-zH`L_?h#mNks_uSn3y^gzYG{H{d$lKSk zFXfUw8y(ge$sfmV$726N&n9E zG3WTn)$Q5#?VQ7UHNTviDbg-{iue1}KhHy(*6=*{mLB28kp9PY2dc=;!xXBlsINf1 z9}E*=ysoU|UpU~P-W(_E?0Z!^?O^4^oAPn?=O5wJk#=Cr1-M!IR*a8UG+q9X_-qDf zR_7Qq-h8cJeDkmE(AJ}cy!$^lIV7$I>iAUJYlo~fHEUN8(UWV>0UP}80mPo!1LSr$ zR#}3Bv-1^$rdXYj8JeU1Xu0P_IYFcr$G;3}$@i^N;HQjiA&QKipQNksKohCSsTd!}OE*;UeQ+SYV%X31T|1>Ipv$Z6Q<8$dHC&gyipmW> zz!I)7)Z!v_a+&;fHZ=!t$O-TV?FX4LR^l2-z|`qvH1JD0r$RmQL+}Qk^dGyu6QAQU?fsdCX^8}+?@Em zRIx3)#;yKihT?26v|O%%Kg!Q~GN-QCz93PyONR@(pUOU%o_ z%#VesF2BJ27VH4GzM)Z0_;m6F~kNu8O1dGJG9-S{8iEpIYDO;WtoKhnKb1J|q>k zIHQe$?Pta$psSP@bsY?!AP_eQAuxII>I;XUPP!WS>SAfbb=T)UVr2UULC?7qQ+k%}B3@xO2d{gN zvu~fKf9v%8U{spAUBS3o1ykoM>1&{S)na)W*!(?o=H0n zVOPAYS?We9B>buPUVYf7{eTwS=LF3IB$`2~DVQv%e;htw>9Nm=ELiO${^4-*Pyngy zzEGuWXE`WR7X#c|tKc5H!$}>}yCvx4OZE8FO?-A$y~^v~9@>Zt3hA+Q;QP@*=D7*% z-t3OZUoD@A=^!H&iHGSMKQ!JM1W#-|4}4Rbno}zx%%^W)V5|F;>K+)3nD0uPm;NZv zswzt#P|kzgoAddsUt^kj-V4m@S|L4&93EN3>!`p*)A|hRHWrPe?v9s|n57;BoOmrj z!*~IoJ@_@kAOOrM0%s41b7q9}lh));g}?QKC&brptf0uw1H z1cn_RU$D`W#~={T8+Q1i%xm)DAQKy#nha|@V7H?;o+i|_L=>U6xjF*IQeW`~11c5I zBo4@`t?gjTUDe~U^eEl%L%Ubgc&|_d-wL{Hga9n2-tJl4rG7sfK6qW9oZPh5=L#SL zKy<`uUtkK6oV0e!yT>*LPn;Zq z@MJ?KbXD`(_`StTFz&aouz<6wuLza^<1w@6S8oZZ^|gM8kMpkZp56SocP5HXhq>fY zZr9AT4vZ|XASh(e5_W8m^lKCuaJt=$mlevgo$b>hXBL*IDIPU8cJW zse`^B)C%JHE5pF-9N=$0Ir!rk3E&v$egd*@z4Ib@@a%%q6zHd9$9yc^E-4dCRQllYF0K-jI@{JD9)&YzItV;QT_sBO6*hHi_zt zx({D0Cld-*WIo{n{sx$(GaE3F`wgmoaJFX=f$B7vs5CC21G7j;nuRv6)k+OR*i3E; zCF@`e!IaJBn=g@(86cHht*pVIG~Pzc9EoAG&LX}L1nc1b+V;1x<|3^v*e1s z&Avzom^m~1Tkf$lc>X)eOn{x)RO*qrT5BiY$24J{P+woP^L@J71{dqEV1n2__bHt~ z!=A1I`}U(qyT|{X^#u@^X6!{2ij3h3L40~|n)od1GAl9V2ZNU? zccvWnd`Pbw_%Yfr8eU|duoyU&$a-1|+P~J=AeVX7xwPYypLWvA}b$KPHXY7J2(oFi_t4L zelxQ6W+i|X=pn$)3s|dYFE20mXlG?;$h$IJQtob2Ua9qp}?PKfK;V;Kd=R@`zX12 zNl8hOTH3ENm$819W#=#58|=9FNTH`|sm*GJ^$LyR`z1;x5=dTw_Z3&Uj~CB zpWz^Nbk3$!abz42wp(zHj{DP96TnNnLhIThgw5`nS00*I^3j@!(klk^>kuAms#2Vea?u6jNS#H8a#(M(ON|0m%lU9D%;q z$wYlT)G_LA{28J{D-to_$mSTJ=<2S~b0M>JvC|N_EE;^xF*%n)bC!`8mM~;`eXuSznJ)@z|Nu$dd5H z1Cukd^xvhYwQDyT_lEws0E<;4(DBpb(FWdwxp~pZ&Ptx78<|ordo8 z0Uqb^p+WFFAiCH}ARk6p2bqXy`z)gCRSPs`zqAu)mf|lZe{#5OG}uiJPaR)T;G>5n z^Ka0Tthub0nyBg~PuJgAhWyBR;{`J$_0$@u+2g=G_UEe-`s9=MDNiDZ2A5U_=j~`y zs1i1wiA$ttabHVpZyQav^?bPGDh(!62!_7n2?_nm>{aZj5d{R1ZYk;RmQG2L@XmF9 z?|bjx*TEQPoPpwY?Y+Kh&SyT)XFlEk1%}$;;o%=~yoRZ4o*+iU9kM;jttnxw4 zi1^zW7Y#Gkm{r)Ge~MP&1^hH?19<+(59BIlOTuM1 zc&Cr$%|cYmv!ke4N9MpjqeKBYtxkLOwUn_+!Y5#gYh+4}PkSs%6yVoMfcE!RAiHo< ztcW6c&X3ysnQOwRMvA`s`RL%FJ!`n4pk96KPR9+JwofDL{X$vV9Pc@!)pM(M9V-0n zH5~Db8K0$z(||nqdw<{o*uIV2+;lQ(^eS||s}6Tt4PkRj=%Eyhh^PF?P7*TV0wydba4lt?9uSHlO-tB~7nmggZi z-_!{n)v9|9XAtVfOr?tr5_wX{r7QU>RvmEyoxjoNA>7N?@m(`3SRu_D@pNiKy3E&Z7z(SGH z_wmU-IF$(Xta9?}QfKn`t}69cvswL>*gKB6yyPHUeg1cSWqrb8&b@5mr2cWspUbPf zJ1qnE7n^QnW;gU6m%kpyjCH*LuA8TA(~scTL@*8if|6Hw6|W;ssHO1(`$WCio_;PsJ0muxzFHD1MN1(nvT zn4#+jkKKJ)=*Rh(*R!Ov-q?P*Hs(9iZ1$1J2pNjM6s9^(vcL$3mh$wmJsY&Bd9_!Y z8jmv=TO+(YS?}oXuIpY5o&^JIYm!9=oqlSMr*AEa@i$zFgUNp%pckT-rIkamjd>w9 zpg6VpE7wPRb?g>F4 z0qy~NaASZRD2#t)B3`;)UM}|fUWz%PPfMJ%2tCUU*#Ue!AM*-hMVMl~)zrn>k_=xO zlCf9;l1p9s?=F1UP#49lS*~l zg1M|aD5PlBcjZfwlOkKC^65Pwj}VuYuX!Ym!<2h#s`~NVXrsiZ8G?Hb#bnnk_evY%)&ul)z{Y zhlI&1u&m#)FaUoRM5U-6+-IyC;TsTmy*w3kZ9w93bdS<)YHb{*r|H$`>4}NC`gqQw z0iEXqn4CxU7aU7jvwN2J9$jx{>W274)*Gzww~zCluw2ZVy3G(f^M!q={NzF}JhBQ$ zpf-oEsteSEDIW{J@?CYZ;I$}-(^c(9eH}Kd*@&%VWWC17;4}&T`gh;Ej{31iz!oN| zdo+*5P8p<6Jg&=*qS@x^347|9U>z0WanF2up~>8pk!4_U4`Ph5%?~6M&t$`hMhVp| zNTPdUjoxpt!-?-UP_J9{*i(wlw^Pk3iG>E5EN6Y5oMeygDQakPPLrisoaqO8KJ@4l zOjeKY+icNqlhx>$Qip5UVPF5f<9!ZE+zx@7Lbrcp8^1{5_zleYOp7nBv=LN|wXu=_ zDhhKhdPJVqaLPKcMF>Kx*C=M{#H7+QCn>JdvUeiiM zZMn$(L91SuCXBC3H=gPIO26xw7zB!q)w#oaNQ@4co$E&TyekI-K1OkyH^=B!P1LKC z;pFh~U`cn`!(mk4tx5m#w6SH0?i;@V-Q@UfjpwH9VwKb!RD;1!sw@UQZf}(2;sinT&27bXzAj%!it3lkM<*m}^7Y+?3`^w+5cJ=0t4tl>0uqm8#~F{(i$g zH{`>}L2LI}Sruc%a;f<^NqW+P!B+Z~$foL!mr2{*i7SD-riMw3m*Iu16S!Jw=wNCH z=AzN@@kmRyDUk=XrANltJ!Q*mQi_c^~`H@Z-!iK6(S+_f3k1< zO)H~@2envLLojYs^~}7m1gwbShO2z>!VSTs-kJoqG%jiMz44c-Sy9t_H&x4qRpKS< z9xvl=QxY&JEhyUs%=wSd_FJs=>{AJ_F#hKt!ND@L7O|6;m1}CnqGNxgO)PyDEzMn! zmBeqHK56hMHz6FmC@UiW*Z6n>F;?<)=D`Dl+`2;3@uXnS(HcGd^JJ3NLcI!Ra&&Fa zvX$Tg04XZu$Mpw^{rX;_xk&ER&A`NND;HFFF=Z5q`WtXtA<{qe^JHT7o^^J0yBFdw zZ*$#P&_yv>81#{e-_XpmZ|E?zaKp^x`i#-`i)cb_(}fLK2ZxXi;q|rRgIba zq;d86)HNI5Q=Xe^*?Lq>DdlS1P6O|<$z40C#~nAG{bK3sTSDU>YLW%MTK)ln&~p0) zd@!ZUon&w{Tof!zH5KIlc~K9y+?}s9uu~;>(!OK;{QSIt)NJy5Zeiinpm?%+2bOqn z`SYB7{XfuxFFYWK!@RlVq$1EdcV?>bz9sf3?uWF8+22ty0+egb+0~+^L09$9{r!=e z*^e6FKPw+_dpc;PGR?9UE>}K@UH)-&RB2Yt)j}O{|MqrFhY8`v%_wC=YVI3~ym<2- zN`=?3`&Hy6l~*+mHl^2XN_8W-=z^M!1r6!VDiY(GlP`3f@EWz*9AIUo5A?}ZUUKJ? zkQjv7am5>VBCErN)4|L4T&6WI~ihcJ!3>IARW56#czeY+z z+{)DYq%cs;HlRRw;kusKF$XvpUD58fpv}GT_C~v3506>r)k6tT5OB^K9|Q-w?)BEt z;QYN8>S$`H@gSR_yP~04k}{}0E@y7UrtSkZzg2cwNAas1E%#K>Mfxf8w+d6w^2%r@ z12?x+&o0D)y`2*7>-R8G z?~81suE-V%Q+$IYA7RoZ`_l)z?N{%Yg>n*dtuZB*{$`=$g`<;fVL}}0&r%uZEhs1S z6%sBNbp{lQ!kM1*ZFBEmPl*Z-(WPTAdgT?!l*NkgmuwrYRFH5UUjmbS6ZW*Qb z-Jh|Y`|-3iHIdxMOM!`N57K@pF32wAI+<^_e|;1@G7)s$Bc!lfDJ|lcTX8JmNtE66 zqxWf@1RMORFZf)O9TYQF;{!P2Qxd6?Xx&x0DcJOX?J^Z{C2*2(98g!5)T>kRY-258 zWmD$kJm;qr$(GA)-$`FFm+YmN>&F%~W#$&?bksqkThqBvP@fl?9 zkW_L0kc>pC;#cjB{%7r<;`?*%S*y@&Txj-ab9}C&BMozHu+DH_H^^ngBViSlw5M46 zyEZ?-^ZPPWC=dOsJ_a({#a^o+8ZP5H z7D{sc#>lArGoKF4c|8=-rfv_HHQgV3VpL{F-Y1QOyB)NZ^#0DFHhyzK)gSQ%?@j? z`VV?)+r`a|X6)tn^=im8yl>8Usrjk}CS^Lp|E* zo)qr`FQK`)xrRZ4>~6U>&qrC)?2|G}H(#UVqal9E2f?sQh*Ccn8+Pl zKI3enhxXmSozOUYZ*N3g1QR>1w;KoArBwGC&AS-un)+h}{baXw-S?w+zY6V0p|s5K zFT@cjhD1o?e#z9fYEDk`g=}PGay>DQb!VRJZ$mpP#)9D_rVVRIeGWd=RW_1Yi0oOm zKR`*BqH{m>NVIsUNY=E|1Zr-Z#d~;BoQg+Gp~c z1V`W>zP!JBbYh}z(Ui4twOXMI1Lqhaxg$=%&?OeM`@7U3<6 zXMbKL<25rSPbY?Fy>o2$cx4kHir41r)r+y9i!bk?d=DR>gc&dyk{4IFT($L?UiA?S zV!J7#-HcAEZpR$Rq+f>c3-ya)OU<&?S-d?b#0QO3X0n6*c=}{E>dHs*prcp1-3L>{ zd!dgtYYz8odg9TB71zx;DAzv`naMpZF`KqEea(HZZK6Li5>K)#%Tq7)gbts9YsG$wy{)l=)vjWR9jj( zYCTXk=T{fbBBcb~AhA@@*-esdIG8Y;0~SJhKO`U()^?70JG3KlF(=<&c0U>>1UrL| zAIyaaP9u@>#bYedjR^8rHAJs_me+scjO6Jg$U9*F65ctX;Y-(=zw2bpkLoAVp>;{d z1ExTjF)!PFb8XKb&3?T`A-@SJC{P?&^S>=xHc7ZYbSxJ5;bZCRs3cwKOwYc~a?%Cw z7b7(b%d;GE6O-G5*V1y8`}h6&`D39{s=GL5mDy#n^AlC0sNv%j#&hLbU#R(getpirHiWxCQ zEd6Pku{$*~ONWpBm%VNhqM*VCqpCQ?-tnoZL!660*vM9+ISp7%{F^Vuay`e#QZ|85 zGtAE@^tmX5z@7;Bg?<9QsCHly5ME(PgwB}iP&pHL00v)TCLa~stV!_KCsC_?eer|A zr)UC-X#}4^5qQymJNwwqB!}~-yA8_VaC68)ud~g-x|UP+mxl$WH0Xg*A&#eULkl(A znn5N5FVjzP7@|T5OA7MRP4&tdBv-=d;T=@F1zw!%=%1OS)3#fRiZ_}&sf$Y<<*^)R ziBvqdJ&@xHLF?VV+8A*|Q7%&tjouQ@b5ko8{%Hk)ffNXu!BD)AScnyr;&#Sw5Srj?=j0478&WEO zyZi9)`@S|f+%obG4>mE2WVjb1=M2680p*%z=LUW;z64A1EF%m7K(>dz4jxo}?dr)Q zH-`*{b^9W;9SAq}2Zy$<6y1pDUJmO_RZvr`s=-mu9n{#%`O0dQJ%J}h?Ke!UGbknO z$9dHxC?@6_v8+Z~BBKL}HWnN_};fzr!CS#!b4L1h#qd;|1_J2j`yfXu? zAH#C@xuJfa54Xx)SFGmEFyVBg%5(E^h87|wRQT_NCBf}h<-B!J{4z(xGAwvG(}=}= z{rb{3kM>JOtWcW1kV-N`Q#j<>XZMs+9qJ7%5;{KB)zy`O0c>w?_>*^i2ScLNy)m=X z%HSS_89PEqS>98+74?V&f1`*5b*uP(v6?dg_q6rWNQnGzfcyvF?00$z5gsrtJUVMR z>aXdSr}WgA20`ZC3rYe4Tz%=yNS-La*37;v!{tjoMmVA#brI5K02C_n44IMD=Yc36 z%%?T;+pz}ncpD6gI>O;7g~4Ejpjd8G&iZs=b()jsQUQ!;VMbyIo}g{F9*-R`9KHL{ zwyb>X=kjO4D3*bR8skcqL?U`|_p31Lg$!r{rV@wA;o1PV#kaKscdfnKrUFIer2Y?2 zy(1|(2Wu!<&Z-1Mr77EAxzS^eg;Ty6_BUUvMsHcl6!)r5ENyqD^{s~0t3Rf1p)Y?I z&hfikjKMJ?s-2}kufh)=jY>EERyD|8QOdOcxx5#bnAkKvjzVHYh|vwoF%%dBUe7yX zUBUExEORgtUuuBty^^VqjgSmZL%)G@tJy7VCxflqpYO3NgtoS{RBH3!T=oRzJyp)O zqJF1D%q;msNzK$Wc}zi=De*>8)&UPn<-E7Z(I8U(t3)z3HZN{km-^$p=wF&^`qx*+ zcPHCIL9a4U9)qm)EYMouZh&BtwvJ9jbYhb!h7osQk*`2AADq)*bc9&Zd`2y3?FjoO zAn*|{^L}I3sa1^;e~DK43c#e`4Zsqm4K5C%TO>qDt<~|j7*}@QnWdg{HHl$15%3Cg zH(@ZJ(xmEgeU*#7{D;}`h#@ElPaKz1t#Yz*#Kq3Ox1v{fX6Q(rN@OL#uhrOY3zBG! zYin!bA*(oMmAD68YPbC13%v1&qn=egsUD2Bzabcgq_^F#m8GXA)jMbw1RFW4D6z%MZdz zDIu-=s9gKBUOo<6-S&!CW&8elTc~exq)E4+L2f*8AW`c*s*8mWS7X$(O|Z`G)Ohv# zJvh3(k}tI5g)Bedt?$tY?GrBl>$ZFDU^p;dXj(O0VEg&y0<*JV%TaxNf#6VpbQT0p zXCd!gxKaxD9`eL6H~8&8J)%1E4SH@!#_<#<+{6(*m^U^bd|RHd<~Z;<;iVjvXfM{+ zu%@-OJ1`5veCQ9HukZe;UK5z7c`D8jMwDn=d2LFabOR*Lk=~czk0ci=;NRMAYA`F` zaU%&Ak|-EVaRR7rzb^@mne)57YGW3PsQ74?`W<|A(7x+cyMDDQn`n9?ntd=y$bW0U z-n<-PSHT1ixG%FCVN3w9u74-lFc z;M$H?Q}{+ts#2`>6W7=eJPd2!4lEe~NFg9FoNZzu{(<5G79rEh?LvmE1h;-B&bxP& zdjde9&wgjzJ6Vvd@S&odSb)1Vj$M+3&;Rxp&2{=3x@wzL&X=?HZmAU_k8NhNMizUF zdC=CXv2uG(nw4I&IE+|W)u+21<(|jI)YzNq43v|${JI3;uGJ6%0>#7#nmK6SkWd$N zv@rP17E)B~ck*XVwpD+lYLrX$SVC4p5Z_E#0doy}*$Bf+TJi%&Y=;Tk=U&?6wWMdL zd)E9;IV93#(JTSmLqWntR}6+oNFL-+`>k=6rOAG6){K^OawzI70Ak>2z+`;)gvDkMtj@BB1C=%zLQk6@QMian+dn z!cR34(k1xN+CTsP{X2`76|kv9V(gh$WpGmX2v9yQr}-U8Iz0tiM=*)8sLs^kB5{N9 z>Mx8}p-^B&8&vCJzkggoUI+pj`7f=cufHi$rbyx*F+g-RP$pd?+)S!HP&jjXO3r6X zytaG?XG-)_?|N@f_(r(Gt~0NX06thAv_W%#vm1F0d(yOe2DzSxnh6CG5|Xr+0zi}c z<}nE?3}oKC)q_Q9x@vKr4P%!84nF$l8DnKcnm^h z6H?aUX1S>yHukf-exb&@*I~HH!NrB3Iv_HNByT-tPSA+G62iZbPl-+Z(Z((BO?ltE zrR~=)l4umwfzdrbx&z|i1+^O)QW$WaUuGM_?66B?H#L86( z5@Vyj!chpdqh7zNt{xBYmCmv#!HRh zMFOuin92d@U|(i*QV)GKZ=O>gz;fUV`BuEzJe)3nRum=Q14<0eFOK*4q(0P~OO$Ka zgi;wb8>LbT(klrr>J+3D42tVdjK4(RE5{Qfxb5^*e=$n!R^P}7h+RPl6@rYciF#nD zD-W21U{r*6k>cILZ=bCM56w<&ABB&zUv#!3?=(s65o8trNZE!4M zF}ZcYM+wXYu7_i0Fx6ob`**tcr`x3#9&$HqK8IdpyftadbYC`X69&jZ3KGDs0IFY- zxQFIE4QAlLExdu(wRr;-s%jMcE4=vK8{#K?>MGABhdxaF=^b_G8}hxI!cM}oco(JL zg0+AAC^%v_&T6(ZC$jPhAb7pf(e32rOmOgyp9#3|3;!pSV1wIbWz7~Eu(uF_2 z7nE`b{@=uFmGga+%@h%7(&`1#()om8@SZ?oXm+z~rUgI;DQ-(NVka9VhzoO{z`&c{ zmM(CNt`h58ER0J3=# zD(;M(1{V@^o>4alAZxq^|Ddovo0$pzvWy92V}yz~9z;VA5^@ zx9qK(P`wGc@83@GI@Cejr$}nx$Hx^yN9vnz2q26uVVOVF2=>;?KL3LTz5gHz&0bF~ z75ltv?om^nJTrM-+aLqEwCt1op{xe6fs4X#%B39Ci(9c;QF~)-WxvaH_gDin_?M*&#e?p_TAU2}#lpvtAw(~HHkB<)_++?f9?jX5X zWCFdt_@_L!Gf8x{(!&ySuL;lD1~)$q+r?8PSnf)NIV@ zfr&K>Ua{V}U45_hu7nWE1I?(fTpS8dU$AFzMvjn})JR@gFQsk%G94eiwRda>gX!vj zXWNPudMzV$=T3n?)_p%x1%T7N8N>E;bvYw3A2cv(BLum?lM~CSA32qHuxuW^64O_b z5y1xWc-5cVy^@1bI(D)~nN(D$Cn;s5nvXk-m&jTxY8CZXfPxqSdi|#=CCyQd_e9k; zD{{7v!mQ6{s^X&fArHzno6a&|St>`7wejfo)yh#36XE0wDuGBwTrrdm8w>GyAa&Fr z){gf~K9$R?FaLS|u`C3`|9)=7r{wPZ9lEbuO7J_P51nNyG={)r!EN;iOs^)xL zA0Immv3UYounPMe$zqy;3C)20{_nPo)r_xKyv zyp;@QP4%RB{`MYGs7W{puAcPDd?!X{PLBSDu z23HPVg;2)3?0xTeS2~vNzI^(ztoVB3z-!!53w4_>F~NjWFOwD&qq5Nk*KtfPbK+Lg zgbDaWw^fs*SNpyQwWsvnNr<0na_iWJF%s`G+m~MTmF8fUI{OCp+}W;l&2i3La)MSZcYt>Y6e>okE~``2WD zul*5vnoKkHp?5l3-dW~N*ECU5(O`X^3twu)NQ2BJQiZiq{v~6=&LdcLf$%wybL6z1 z>=di^Y}0n3nd*78%tdjUnu29RzgxIuYo`@|Ky42ffRX1=c>KAl2Eb$A&Y9{~n}0vAPCb8YUm?^l4^1*sz#&sG!>vHEm+`-}mp zyL7I8#Ib7epvJNI;jhPy87A^?pcCa6fiyA>7805JHib?#>z9A zPv#)^O2w4M4Lcm8DQ?u&O`X1iTprS+!TXAUdXRS%NHWaW*ekl2Ej)B!7S*ANph3!O z^>*f}XJ|B~*WQ>}syxt=!211r^}#FMM}+`M$-WcdR=T0BP@_OaZfCM6+fN+Z=cPjE z`H^3Y{|Vg+-6V`VGI}P->vh@htF&>_=rIwoda&HcQPsSCwWzZ44u`j=r=IqO((fQe zZ9FAV*1jxrs}_3Xm=8Mi)i(GHP`?w{0~CU{Y*y0TweR9nQI=w z-+(uUvi>BaCa&on!8N5D7~^E3M*@k>L-|^U!3}L~@A#hp<4efvFaNabLP!BIEf?GM zXIZ8yvOo6-oP$Xu!NQjkHT?Kqy1`pMl$_=Zi>77?Rygg`$=cpbbA`x08o0t8IB;_fT`08_FC|~*X|{6&z6p$Wg+V8 z;ao^Vg!(%@bLpgi{9(!UvmFc9)%O81*X^GoY(&OCzKr3I>VSL(m@ziyK|5`!h9Hw? zqB(CZc#_Cs@km5i!&`f8t`ejQZ179ftCx__FB0SX>r0n`T8dQC-wX6Phdd@Q zK-EaHD%>y|KeHX1QtsZQUVH)oFgBXS*Csz=V-&%+LCohE>g)Ug;T`|SXF@&&aH)QG z*?M0=7;g!lwh33Z8w_Kq7=Bf(-!&M$`tIy~s_Q%<6qTT(1I%Y0(!B_g3zWV9@->f3 z8`05@4FfbHuWjD{Se7ZCHF;BQW0}*5Q2Y8B;~fYVSy3Lm3&0Mcxd=dkM_>1uJGV2X z7lksvH5XV^GvtU|xcne$LHFo6le;({>XntX)-d1*8{wow1U%xS04j8Sh2wJ8%6r^R zX^T^r##;XBKC;*Ua(ySjtO4ZX*fP6(_Uh4jo2kAgC=s`=U5Kb{8{zt z3*@{(rX^EYUXb4b6#$j@_+;^jTcb%iTVKS+UoL0usO;ogSV+hm!K)pD-v~0h?E6x7sboIsl zH*sGHDK1msC0oo|=QN#6cI@ohMvvWQLrzC%IYH+8v*l73gd{MJJsGenv)axYQ)u-I zi$2FkI2#p@z0@kW!$*r8>j;K}xz@O5(1>OT*ZI2xnBMgB?-1afuyEg2w0>mE*fkpq z&sFPqDm6_cJN+iu|BW<3*Tr^c;WmqJZRos!FNKAwOBV|LAw)YX#p-BMVn$j3Bxv~T zDd+cj(S4-i8&xFpeK?8antvc4z5Q&iIfU`Vsb9(BenUr?N_jJK-BIl2Xour1>{+kO zV1p>NB`xO2sqGS`nI_x2no?<+3m zzPzTb3{u7aZDxa^JUU8F?_05p`xf4?)EIKr=*+Y7JhRi>c8)Of3zrO z$&bn)v=XSEh?b*QW1yiS_fG^!ITTCEMt8j)9R8fBpoffA=5QkaAHiC6FRD>Pk6~T0 zv;6_)Mx1(hb>++=<@Im7r?G>#`nwE^KuuLP=8so{t&i%dcPtzT8OR5@@|(ngeH4!V&2Apua<78llk^3enD+j{dsTnbnh&&#KZzcG^4y z#l$!~49~tw9<~BUb|j0k)1L`$mv8 z1c05Ihop=mbRF<&joZuxlUpl?PW#X(Biv5GBkwmYA$xCvV|_WGb^B+T@#DAZn+~7P z0(yVxYB-*zvezgjr{;%n;>>qGI6~>4flPa>KN)H6;6MRp5hM=x%(mv{0`1E5-nMkp z`q)up?N!BFpTW8o2vRss`mM2vB?!)J#V8et#pkY^C0GKH)}LJF{nUUJFrxXdAGoA*Kcs zQ?8a*;Jv{5G5QsC-$iJ_$BYpdYgc%!_^bBk{22Os{3)W_>`wZmX?8-BPsItov1*GX zQyO_d%LS%vm~#Swa;AQ-M>4FUqM{&}nBn;m)h$=fXi-wWL;qFF6%|$^VnF%8eBd(N zy=ChqW<42w=Cgok)?a-%9T2`#tmsXqbE}4ugjX#kh_icE&m`itsdZVT82^=frmS~^ z@dp!^n!h$p+Se{?x|beR-J^k|N3m80>7NuJKfpM#Wo_+K*2g|d@ps>f{+Dan#P5vi z3yz&->Lm-vZX6e{by&Ov0__gMHvpx>^A8+=GzZF2fas(fdDITtN6w> z0+9nVhOkwx&I|)Q((D*p@4du7r^}LIpk4!-q?Fx(bc6fiCgrZ%>ZST78_W@08162W zqb5K;$#5c|B7i<1=Tg6`xs{cwb6C-^%0FY762&t?2re=It85IOzMjexc%i7N|M(ou zl$mYF5;Zchvj3uzF8_1E`nAqAC+p6tn-+mUwn<2kfh9X|znHLp3JAc94|C^n@0Bij zHV1OjpzgQBk&fBjGk23*qC%@;*v5kR5*YucGdAWu&P9ey2v>Ptp~$KqXl|J97p%<1 zvI4k)&?`KDzD;qM7ji+g>h{V!>~A=9?wj-vS!^&)=7=!P!SXltT>0#jJ5fmcSKI2Q zl~0s!-Ck0gtImCerS2DD;0J4e0woV9f%0%%Bcw}EhE!#*<4<8U9!v6Vm-H2f#|F@~ zYy_N1*lv37F+4`iUvLLt4nn#D1$I6jTnm%cZdWSwwhHP>VktizxQB|?^cEL#wXT2h z)8_vs~VGa!{GGvgDLHrIvxu9jiEa3FVgHxnXJ>MbwcX_!x88%2D123pfAQ=iMM)(A4 zsdHL=uSwwbf5b7Bojw#LXPbIo6ZRxz7h-$Ja;G)FqFFVW!?~veYXZJarqqtTWCnkY z$B&aX(noq&+TD3$yKupKElUx7exW6Vwm*Ngo$Z~cAd`u1Gy*jwLZ@u zioDO-vB(Rdq*E_QMDm&f9|%k=Kpq(`eT#DJcX~RPoHk|EjW^YbGumD?9O7i-#1Z)h zbUs2UM=|@_sgASj>p8hfVc|!i^!4y#UcQrx_xsWEjI#Q9527>vWvOP(^5^3hx;lDb z=ITgAMU}13@Ju#E!b`Vb#(Q7!)m5Yt2E+o=q9LnF0IMC&Mi~@n)%3-l`km23d=j3Z zJ8`S^wIsvi1#O(R9AU~$a<}rev??EplsD4*xN$5^=~{g)F7|`dSLB}6TKp3J#!E#A z0?Zwq$R8FB&x3gqgyGit`HXKdg9*%au{IyI1b>~B{$4Cyk@lqKqavkn?<^sNl^{_* zcA0(9oR#nAn_R(s7*}4x`c9MU+EV)|Ns(ZD6kbiMVrr(*w1zNLonK(GT2EhxWU z!$cZtdqe05L5Xmk4`j82D}eAAApu-Gz+IF{#8Zzawa=PtUC%_>OX^@Rv(oH~M{Qi! zxtUFwQ}~dU5C4Iy1lH^J^`kYpBM54O1Xe*X>E{6>WMuU5-yRv{!H8U3&~Vyej2L5+ zCU`}fK-?|mZ^gkUg^5C@L9{^~jsCu{6|0$W0mJ0qISZVnOsv4A4jMm5CX>>tn#4PcrhDH{J}<2|4yZRG=JRpC2Oe+A_ncww8WHk z?81qDiz9Uca_W0a-9=cH-BD42Fro!J8xjNu93S$~Mc5y}cfYFKm}oIOG*qLY$n{Rd zN>0(+DPOHN_R103;EVX>e0ta|V#lpkqosf?ev?=1!%J2QLJJjG<%BRR;T0lEFFc1E z_sb`^AJcBXSv~lKuAzNdA=Al?{{5r0uRl1rH9G?QS1@CvkW#r71$r}hF_13WN5BzL zo;lIg?|`Vmro+!nN5^mXa_e&$b$Z{N+wQ&L&n3kQ1PmXYEL{EH^Pj#f{v6iSNrIZw zxXqIMDK4bw+qc(_%iYK|gkkooC93Phlf7xatNB0$fBIA}d4m*_EA2*bZey>Z;9_px zCc!~LXu@n6No)>SIAvimJ_n?BM`nUoFDdtyX>$O57Z0g&EKNecr#zM_JNo!VFTF(8yXWpQe!lwZNsKM)qtov*vTXSccl!7d| z?`f-7E6zdj-NO8gS0m#rV{Do6lcaBH_d=j-1R@jIQ6kPsZ0pVhAf|%L3)ROh-`L|d z!t@)x{Nwy`?j1}uJhwC%3W}LXj^WTu_JrqeV$Y)=G#VtzYfPRB|0N%RBwJ)XG zy1I=p^Ml|tOLoLxYcAUK_Qs|QDLuZ_`e)&ytx-|CYZW3537;R8aBPyU-=Qw%shrVc z19()qCDi7ixFO?Kgl;{g1#>Iclg%;^=>R*Qo$Eh5g?WIk?C(@!S{QE8E^)T{(nhtB z+0oF(xMbsC4+0GE=wJd0B%6A;p@9z#_QaPDd%g@*n6KPnn3lauwjvcTP&w}EE>5)V z*LJkZgsM}ut???eQz#SsxQMf2i8}uRR045>7W(Ag#A;%p&l{?U0BJ6%g^M)@9E;R7QQw$IEQuNc=MQ+G)KkO|U)=p{K|w*2j(r%CLxTeK z;aF-{zSi28!uYHjfToR+zjT$|99W*nb6OVWouv$G1^O_vued-C?3RFyB#1=L;?Ae+ z4H##4LGFz)a@r3+%V1a$JY(&P_CVbu+E zb}(O}gtybYblgj3{&)EXu72JV@Q`Kwmc(iw{vCR=@|=Tm_osSUee}EyF z3Cut6^dy8F?nOs{^}6@2CDVWp1Huzo9IntP0mlzMPhjB&Lkq|h2$B3%Y;5dJ>68YM zN_%wdPuf4b?&P{7L$5}PFO10*n)RzLEF3v7PqO3KtGT!s;1i|3HH~>G(?jtGc-&z5 zHEim{7^ad-l)#Bp^9h>?R?MLbJ8XtI2yzrU2UXl3OuB!9`UF-20pJaC>q-6#kts8< zhXg)|P`9&WN7XRi%1xNES-a^*BxXGh*vETy)e_r|W*l^I&B}lX@P#ktK(N<&MINvhq9<=mWP3pAV z$w|as8J{O`HQP^ZowKU!e5864jkE6L?{Vf`B{iBn?ZUnvd3(RqwPzVU8@IJJ7vq8% za5SAV_loI{Iz$09-qmZ!vNWH7FXWbg>qess>-W{Jxg7FE>Jg$O8yll9F|&?^N%KwS z7&1L%f#elZ-2rhmg%P5kF3~00db8I17dDhMjLSN!`%*rJR=N65PBWF4`GvIhHVf8F zZ{Ostq&ld=Cg9|P?Vq28f;oN?6+ccEmKDgGktOu2vnmYf)#0qE<}7@>K=RRotosDD z$fNml^{8f|h_<2&HdNvSz#hu!`=cZSHFclO3Es-SCrDQm6>^;mp*`?}Z!su~bqZIK zzNLaqmGlG1c1>8!`G3}8!rsjPR&Lta*^MgxZ|5Z}6ZOERQgVb%n6Pyd7G(PJ6TE#^ zkF3Iky_63B?WO#0$7cC}%vNwg(+xtim-M8Ux>rQRBJGO(kOiGV75`faPIeJiZ2sT9 zo%Pwl|KHM0wULH7N;_nc=AbtVcTZdZpBt}DUH!AuyQ;P{3___1zLm}It(8eLS^zZUhUQF zQx!F&&|xp8Woe4h#D7al;s3i#_tG9T&FQ~`vC2gA>38_Yz^}OmSQiQk*Z+@&=zsp< Y4CddLNteuH$AVwV^6GL0G6sJC4+oT1-v9sr literal 0 HcmV?d00001 From 63a9a1839272ca954ed14a1ba9530b1ca0b54e58 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 22:13:28 +1000 Subject: [PATCH 27/29] Added Graph --- .../Images/VQVAE LOSS GRAPH.png | Bin 0 -> 12029 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 recognition/s4581053 VQVAE OASIS/Images/VQVAE LOSS GRAPH.png diff --git a/recognition/s4581053 VQVAE OASIS/Images/VQVAE LOSS GRAPH.png b/recognition/s4581053 VQVAE OASIS/Images/VQVAE LOSS GRAPH.png new file mode 100644 index 0000000000000000000000000000000000000000..b81d27e4a57f518686c138670420a815a1637842 GIT binary patch literal 12029 zcmaKScQ~Bg*RC>3bdeBIBT^*#=rw{MS`g7&Fo<49Zz)6#i85*uEutIIMkgeU5`CCq z1QBhF=teou5_sZD{{{{Gll4_(AP_97%NT{Jh*e{oGuh@&-8i__}y{fQ2Q6#RYkt z{rtRq{}mAS^l=ioBgLZ*{Ky3_O*3B-5?U7GA1O&z&J_|8;b&TEDvtwmHZdWA z#(TL3rw7tbnq8eQjHvl@K%}mb)ci9|2J+A!_1NVPaWpcydaj-oV*wDkFZ#3I8wy}| zrx_Qn&9T1V17XwTFB1mZUuK@U4yZgbppE6EV`8`l`c6tM8@8o9`EdFLh|Iz11}cBb ze>1*y$~>fE%i5u8`0RA@JDQ7`S<6z(fQ^@r?}s5%Y%ISFMO3V2w9)_caaR?xe;rH+ z9{1Orf6vQ=+z0|AN}ey!4!Y&Tie6M{4{ksqPh24rko^Rh?uEWYDfmrr_DE-Fo;dtG zHt2NBYsO%+7%_{yI#@3J%lp&YEZRY&eeqo|yeen$b1W@5+X=GlopW9R+3%aS!-Z!f zBESp-&9lhxNz6`~vj8>7mfb5JW`=ncIiQ+wFLY*>f}Q5VHl^XMkbM)eotA#2)GMK43}pYn4u5*}Tf9r_(#!Vpd@a+y=}nW#zpj*>kjNNf>}^Z>(! zAOYWyxGk(krwa^42A*XaT1>01pY%SzQu)>#JnIHAdC~2ZjgkYGT{!1ygBA~@`Ihi9 zp-*q)LDl>a{H-V`=2QsGLH&+Qc?-L#PjU_pJ-hVA6_U}LV52jAm0E~`n}VCXI1^Ql zJEVh1v1Ky2(c6fC+Gs4v>Bz0=GvZ;6G_KZrrY>IlQK_ zP8prJ*xp3A3H2m9yL_9*f`W%?zli|Fv?{qRlwX*Kg1wRvThN1t*nHWVw6QBmo_+_H z?{SIBLeP)F97{MrD~^)Vur!47CQfLfSw0N~`mo1{3zsP;B$ou~mt{xg0}{A!y2kkT z6b4togAM4@H;65e_03|(33X|bcAo<_Oe^ytqb*4%V=RO>*A`RQ90Egl(#}s&>qx*Y zp;f*I6HbkmtCTckq`c@)oFtX?wlQ07UGy?|(vkak*V>{;O5EK zqRuZf%&Cv)vT)oXM~6Aec!b`9&#E%W~F`hV)zoS;Tvf{{;x2nkEh}Zye$FbjfCsCa2J_G@_B|BPwxYD z(E7$~$6LFw(dU;y7hPrX%vetuZ*m6wW!q91#sz|cNJ*gCkL;p7*hmx_JHPrwi|jFB z%_8wr-mcmOw?lPqIs4abam!iEleKZyKL`h*9U)^~%ZyU+N!}2KD~=EwYA6GczByj# zf{rPqTPAFoY@S3fq|srTEgTM`XFxo7Ms03Da`|2oj9zKbZIavsh6;j6rD9F|%BHBB z7up;lFi5%`B%=qZJdYfegKZUb*uczEDCFDDFPkpn97_n6xtcT?^T(REs9Td^sLOV= zTU(L#9S_0TZjg+au?cdG3sZ?O%u`5r7i-T4uQQ=rLIyt{s_?pZ(RV-OWeF_CG%8=* zSVFwAeeU*9L!)(@{+d3`u3CSQYVJYRe6{6+9 z9CKfj@pq#=%GWnKYyCfwN@7&2{~HuasrLpH}ZopQ{9-JWE|&2di3Gf!W5$Fe&>ev*WZ zEe(UP&gxDb&t*hbk+<_6wrAh-l9)o7t5u76hPnA341$C@&iQ`dIRW7>Ks|Yy> zGi;aMafNze@}u~BYW$t0>b!iZ*kjALO48C(w6wGZ!m4V8`a!3AQ;dy->ZIPiCf?hE zf)1Ewe}jx`e4{0qsHCCZQddpjRRL->wTn5?#9!HK3Ip+t%1cN|8CqNGg@uKU7ilIW zB+&iY+0oL~RXpvv29)!hv_F4*YTv!*xq`kj>{d$RwEkA%A)5Ls+ zk5NX~Z`-toE1KAZNs?32VdL&r{@zurYWYKBW@d(A)y&n(WL0?*OfxmuzhdIKRaCn_ zy*kg!7dBMUFxT(ifa%SV^*#KP(T;SjYJH%kbpZ@PzOSo$H<&F=-buN?$iwK79D0C( z?v(0>#RcvB5(_^G#I+vG)gL)JI)2er_MB6wMBwC#bzohC((^j&Y1t;_rk(Euj4~x% zS)p@5PP3u`w8%NeB=6^?(F zzK7y`^W|4%PNe5U-_mnPy~)VvyOCkpm4CK-D=t3%Xv0Z)faeTP)tkuC_fb8**>VL? z`{*Fz97-bGx@-8P$~qCYjeXAeCwD6}W-I&vfex5;zg6d_bclVh_KI?N=&Tqao>k~)6czVx!`zyKXaNzC zl~)`dwp(+p5)u-A!3_6IO5}~a>B+#^`d&Pv4pm?Fx39Zy3M&8kDnK)VJ- zeLcP6($dxPVp$f?wxgSDmR_VJ`R6AqN23{SJ<$}jfy-$ojRYJ*K2-m&76dIf$oGy@ zkThLBji5NZv}UZ402J|Wn0RWB;M0M?iLtd>f$r}-a*>XA2Y$BRW`1qz?=M-PkxYp# zQ9gBP7>)qk1z3+$hSJK|(pxOHo@Vg6V4{K?IR`EY`kzpzDpxqI`*hTAh&*I8?* z6s9T8Y5tp=Z5hH7dO(R4vt1AgKCVG zq%0~F>WQ;Gaf%!!N#K$Xm`vizowNC%&A{biJJf!b?h5&}<2hv+sJC-b>v6V}`?K{K zO$UE}e+Ng$1spn@u&I+J@meH&J3eJ{{Kt7BtQCjj;s(!J?5EFBE1(RhNg4^{IO`4E z*X~#TfhAwS;tsY~P1q8Dtx7|U!U`92HTd|fwikKMGXWqg*3EtZ*gOAp!>K@cHQ_6m zI)L2%sr5=sx17&)#Sx)Pu39dn8XVb2N9%}SpBlc=;n3}P8;8^5JvVe?N(@hUW)wNq z1FKQ?cqgWEikmQ$3|Ra6^tAF=I3GBhjE>^T*P8UVIs+WYuP;^2+MZDf32m=(v#f6Y zocJQ>wB>fD|C1Gjn**UqTS(l<^1Ke{ApyxSp;*}N1FTpwR^*wM)-`!X?uCObc=er@_+vP`4VNT=K3kMxVZSx?=Wx01yLxLV6LN{J#|4LXUh`2ADlE<)TWCtBf zzosTBEiJ_aK+4q08aO2{!2pRHlBhuEi6*YpJA9)iH$s`{hKI2Is|AOEU1UPb3Hmb3 z%&VIhm8eP5(^(%`%=Z|11_bz(#cG}x@sdE2xcYa0JU4CotfO%+R`QyPt!{IFTD zBZs1r-;@V@sZ5tZT;Cq*FU-ISjwWV9O3M{l{bl+1yo-2$_xwz~e>KY@Fm!Y1lQuK+ z>$?;xw{9{F|LlJ2bA(#Um5^*uBdUzPO|54KY?$zD?P#z!?yo2ZMW=wJlB3L#DwT6d zP3*2KO|;Q}C1?=TMi)T**q)csSP3OFg;%@p_|q%K_hQrvBMg~xdPk5FtgOr1yV;0R zrO_)ybufA=BNW;XX=G=nk;69gmx$V#+lCr*;5CHWg;U;IT z&;I2ZNx+XUIFK@{DNv!#(sSRnNqdPF!k@Asjk-8l$xVBCtP7F@5;2=U`45?=h zWIv>WR>hg>%ma#IBWEe#!pbz1DA$$Rv)|<`Q16x-;8k5Rq zD(fE#JvZfgj;&%o3Tvn>mEWcw`XLF{cneEM`e3luYR9)JI%WHZG?$yrjuZ~|tr^C- z;)!d4i+O)j0?ft5*<+z*D&T=tTV?AwOnz^d`0c&5NXr^{mM~$G1{v4+Mje4y z{ZzNFj2fl--PLAZHM#`CI6FhG(#HP@Tua0}=5bFTY9kE14Zi_EfV$}J6zYNjNDf=?ujEV3yae*D2DITa);K&(ciyTJOx&%> z&7Yd8M#OCPd#J*sp{-5xslxMTsWemru#RR4ASzL2d)r8Z{^?OaSL5IJbAg8r5VW~D zG5RK3B1DN1A_pu3r%(@~W6$jOu}XWa7A!!alH*u-(-*LW_4K38JXMVXqPO^IqPe{W z&}8Q5AcsU^#bID3w4a&UJaaNDbl{VBpz;hbHH!xD)Xu%P`)7PbZAD)3py06 zFw{81%`h}3zN_!~_UBtb@#-B-y+1eOAG?QOP$%MGc3?du67Y05GSl2Tc8dyryCG4r9z0Xa%g*9ipFw%9NR8Y1&A5T$!6T?CZI?d=FDY zj33gE^JXW4kACBL$gDx>{t_v$uwLXO4AtD<-xdbXxN(C*LZShGXpZSOR11y?r0n$I z-De92rY(sGlctb${Y9!MGp@mJ7#^>Kr%(}YDK}HgxJ?0;7$@J=9?S6hU@F}~A!4vy zoZ>a5)e(n#L((bH)Cw%}QLx0gpeB;(OMSt?iNG=&6_qPr6E73N=wdss*TGFJ1C}D1 z{^UP$7@CEm1c%TzvNTFs@9o|+u@sH|p-Ne;1Js~GHqF;7NK6<-^+!y#XOBWVZR6cM z$=wIvwR(koX%F{zwrYcHgx+dEG^|o%`Ky5(zE#d4!mS$oFx8xb@1iV)irOnkaSisP zLSC@F^n?ZnF$6AVM)Tg+)}|#EgSS-~#6aU0rBHeCZ`^-QNM|J)=Hmz6*y+?%vi{FW zc!E$EK~0u<714YN@Tv<$Yhee#`6yUXZ$n*=*Yd8x5~K(o>S#u62BX)W#bX)oy@DlD zhLa0YhF`7vcu~vz;YH~|_JO-w#)bh{FCf4)$=04RWg>V;D@ntD)0sPB!Wx zlJGk%Iz55d0*njgIw538xGi-aCb)c!hsPIUQ& z!ZIsWBTOr0bX<|vcgR{z#sckr(=?*TC?V7P4V$?FN3zv0ZK$tfrxP`LIl zh^LY?&q(gFyk|XhIQ&F3CAD18@ca(=SEEqfILr=h>N_j7D z)E4sFe@Bx|Zprmn{F9!$zJOE#B*0~z7K{Lm*a><2ZI*|MkEQwp5c?VdIKjQ+6!HWD zaqI}|a9Vtl_9i!_UlP0%gsW(MJn_^GK;|)DtkFU!WKdc?(Cl@jbv(9B zf$47AHoIXxqa?RK7b#~d1&f0F7#2PlJJ!`XE4Rk_<^O|)-L5Q>L@1Qc#ASChL>7L0%>DVS9&etA;#o@zs)?|oUac}8p6FnYv zk0){iCac8iqm;5KCR9GSy zE`fc3g-xd5tT%B@IP2fImmSY+qYzc`_PSwnu`2ALe#e@5rVa7e>2--Ae0uNy$!)gH zJ@^{G)V;|%zL^*9tN+)(OjGdZ{|5~H#5Y?4Cr9&vEMAT_AT|&Q>hM0>;`Q^_-%w+) zuVHV1#4RKy?w=U~)n7NuP^j>HsdkolPF1)Dm_pWxCR*v)J#%qsONCb}(i`@kUp#n( z_?+}rMq)7HC6TO9ZZkL-D0_vBMohbQ_P&%pM@tI|$1{3Kl9Cz#_Ur>MC{`!ewWqhKk6&wD zuNBCB)7@PL9T`Vo1i1HT4DdLmsOaTJY2`|h?%#qF1`sBmrlEU@aM6u=#Tp(N8TSlU`4y$*R+F7| z+zZsz6eWNZFbW9?ENQBO^(*ft6Gh1Z(UF%&&tYN#)c-URe~sfq6CJ)bIeLFi*(+1o ztu%osx(tYpr;TZ(WT#cGGrs=jB0zmy3asjprT8jCddo>;Vu84dwT{Y2GDe~#(gDnE zSawfMPX+j%$eGirQIfp$+8-e^I&z7)XKmp1CZLNOAD7;qUq4U-D%q|fuq^Ip*9RST z*kEm%<6p`o3j@pu273qz0VMHD%l;X$txU1L{olJ4(K9JoINo z1MuzZ>M^Ntnlb;4&G=V;i0HwH_#Nfh(c2EQ4&#klfgD!`|_RF>#1w%4IfHHi5w97|*bsBQ9x1Z1BbY~ci1 zzjN1rGY9^qu8{$h-X3!i^j3Ko=S4yK-$#Sot3p^axR-TQ4+{2a-;M z6c%tC$PI3=1(RMt46*Io>&S>K+WxFTHZ|1mWNThaY=GPif6DqoZAiz) z1~T5#_iHW#TrcRUm8?zcGa6a5Rf7zEKIV8}tGg`#NtclLf}ftTD#y1Zn~5i_`R9?KQQvfM_F**e{yuJ(0^le8k-zYA-Fhmq^S- zW&#0vmgjJ`L6{YPUa zxLki?S|#1k7Tvvyo$c8N6F%juhq>DT^xXUYg7abT$|E_k7=!0jYNp;#4Vk`a(fEir zx&849uZP@1v6t#_rhF}2Fk7PMzGs-KZk4xtA2-yNb!=2b=dYw;JJW#R$2@oKOl@4# zFSD@`n*G;53Kz^2 z)ohFDCuY#m2j?Rxiy7h~NcL~zEh;)(lDAKwqSq;l>?gP9wY1}RaBb#KJmfppHjT?s z;csT_bxCrR`Qk?}G`-XLW`}F!q-50DOc_Q22lT{FdjYs(k<;JVP49-W>6<+DcSG55 zoLCq@@82sd;TN+rWukfe%1O*SQ0nai2LrD;z#LyVK8uto%>)`w4W-A)UCnx4nF|bj ziW87Zb_L2oOT4wC!xiukrA;rDw@U3M>hBPhnW=p~E?sX+IrfK=qEl2UCcZBhIyCU` z5)IjE01(eA_a1{tYi`iRVq%i&b>C*WB@7QfTQ|?TM5$({chg0HCU&j|BXEwr&2cbU zEdVL7IqTd$A4Zo;2s;yAD#yA6J-?0ozL8Sx>HKV=;*V-M`YNb9Tb}X92>QNHs<*+J z?ZcZ(kL0dpm{F(_2Mr;LVJXq~f{_dMx^883_w5GGCOC}nxwK4gQzHqz zlqst4Hp=DMS$-lTgEWYPkcrE!AgnvnOdUmzI}|7M|Fx|{K)%Jie_xiVML@OC6{(HV zPQ}AeU|KtCPB*qgdBY7_1njB;pyI#)>>^0@>vkCP7LSaM{myp#;K>K_uG}${9W9@; zmV=XX-!+iD1vJd7aC0hbUF1|zY$Y0KX1M(}d)#(ueZ&dNNoQ-<6U)QfsV4Y(G*X*5j=U7?FuyJexOR@uv z#YI)hVs+E7&iXK#E_lg z26Ypt7|1)XRrvO;yy$$>{ld;Hmm4)cr4ID(Qap#GZ|D?$fUOhv_ z063H>peJ+FVd|Ny_!sz4DdO3o2zFMDq7IjhWt&KoJCu?kOl~Bca`2{FDx>1Z`h> zykpGt4d$%4xc8zU^UfN;rqZ*H7fDlcQ+)1=>d+l%0IU4okCd4iK_UoxZ5v%F_*eG? zw0e6M%x>*2+lW46QUwz{p(dU`ac*VhuC7AmGu9IUwdh7PvVOo00FPB8jJp9vr+-H% z^xW?-SWL7Jv-R8Im=GC;I}_nbAkg92BWvvYhGuFO-?eJ+l`P{PPjb1go5x|yto$24 zxHlu~;idWtsR9wsQ^E~3?e%AesD_1hM=A!E{SBZ$^BHA814L3&-(X75lR0E^=d-&p zK#sQIdAE|kF+_i0%-KV)KL-d2DUReU2EpU1tdwpJy69=_-`6Ev+|1IPwLisuK8Dn! zA1+RkO%?FOlhpHka-IF{Ib+cs)x7v2RoSltF!{-c(-KBGgSxyu^i{gBV+u zp&(&k(4nhZFn2JkBS}hP2bh4)1Sqv*I?aZQSbQ6Aw>u37C;h?yD2v=;;Z!9odeg_mKStY=!`*CWsy>5Bfx$Q_=UF%vQJkLTThItE~=YJm- zt106BTaz3ZMLO*5zrVZM8YjmqvmP{bxYHFP=67y3TtX8)2fwAN#!m`N>3V@V2244O z-|M7&C2jVzX=`g-h7<+jp~Y}8!;sxK$F6E*BbVuWR^URONbSnVkNO?9CmxC;0is>N z6uxc0hSt7Hj{a#Ikdn~t#%mCY!uSka7qDO zmSko#|2rzHHxo4gssDkL%0$fvi0UR+RS8dC{(9Um3ZFu5R8&_+;B2$F&y@mdDmB}Z z7i!-QZs>wTDxZ5}+S|~rHj0rBwj83|n&fk2#lUGp1gSDj{yeq99H7JHMv-X$eYOrA z>~L;EJI$e_ZHJ|;u1KjXH{O!5s?4ARi4T&(TT`7UE4H;Yo0?o&ABZf+2i>Wr%$)Ve zy4{v+ReCpd%f4|#gf!W2_xy75-UN>Y2p&~|Yr|kTBKsm=ZWfzS3)1E*U$C@;Owz8! zy+E9k$^^z~v62laR$smH6jBhZVx>5QIcO$uEbN%uvW%R0{QPIJWZEeet3z?g&^qP& zMSbnbR^@juLed#S=>n_YYBh(2?e+0qq2GuLvKsns6I<^BaQIJ@KESV$OufQf_&Zpg z8aKA8P^7tGw{tcHk*c`G#wqpPiRmOO;qvOSoVpcL=P?aJc9NVB1)N!r?M*zN@2ecA z_z}v}&KSz>yy_6zP*rty1r&yY-}``wpbkt~EEVAjd5%36U?V-JP{0_`O2GxOY#nUq z*5Z=%5V;b-7<94UT7tE2OiwFq z;MzBAQ#B{2R@dU5g?$w~XTpn%wqrg4z9^}<`*CaWMh6-A?)BeFZD`=qsYjNFjm@bT z^(5I_q+1TGhGrzkum(jRw5*U9$l z8hG|$HTX|25cRo`dJ<=v^=8RAf6`1;0Z8NBQkI!4ULPbs>~+9HdY3xs1&W4~^z3uS zxIY(O7VJuvL?*p|@4XF~Am!WzLI!CYO+sSn-MGDpwC0bdHU*K6kaQg;1^q^XuR#P4 z!qkL+zAvdKBy9B}3xFP#$^rDRfis*(|I>xxI(x{Vm`RSMCzqwbKIbnh=Uhu&WBxm* zI0dp44=;(9-AK**z>TQFcw{*e*=7&fGW8N7yk5ZH2NSFUy=&*QP}5$A)ZmaiE#GD> zAWyCi-9aJ4-8%j85iEJAI~Un0qP~5ntXsd@d<|mx1z|&t*6>Qxq!z~33%1Chx;t7I z@F7w2F>FEE7u=|0^vQ701DB3#*qO%gt0`k6u&;cZnP#~NCx6ivx<)jnA%-n*b_FL3 z2_U($j7wa=x8m|#A!+x*kuL|#MVL=NBJ^o&&ab~&!<~ Date: Fri, 21 Oct 2022 23:08:38 +1000 Subject: [PATCH 28/29] Cleaned up Files and finsihed off REadme file. Commented out the pixelcnn stuff as ran out of time to finish it --- recognition/s4581053 VQVAE OASIS/README.MD | 29 ++++- recognition/s4581053 VQVAE OASIS/dataset.py | 11 +- recognition/s4581053 VQVAE OASIS/modules.py | 91 +++++++++++++- recognition/s4581053 VQVAE OASIS/predict.py | 118 ++++++------------ recognition/s4581053 VQVAE OASIS/train.py | 126 +++++++++++++++----- 5 files changed, 258 insertions(+), 117 deletions(-) diff --git a/recognition/s4581053 VQVAE OASIS/README.MD b/recognition/s4581053 VQVAE OASIS/README.MD index 5b59bad248..2b761b001d 100644 --- a/recognition/s4581053 VQVAE OASIS/README.MD +++ b/recognition/s4581053 VQVAE OASIS/README.MD @@ -29,13 +29,32 @@ Before the data was used, it was normalised through residual extration and resca ## ==============Training============== -The three data groups - train, test, and validate are split 0.85/0.1/0.05. The training set contains the most images so the model has enough information to learn from to produce accurate reconstructions later. The test set is used to validate these reconstructions. The validation set is not required, as the model is judged by the quality of the reconstructons on the test set. The model is trained with ... epochs on a batch size of 128. -*insert image +The three data groups - train, test, and validate are split 0.85/0.1/0.05. The training set contains the most images so the model has enough information to learn from to produce accurate reconstructions later. The test set is used to validate these reconstructions. The validation set is not required, as the model is judged by the quality of the reconstructons on the test set. The model is trained with 5 epochs on a batch size of 128. + +![](/recognition/s4581053%20VQVAE%20OASIS/Images/VQVAE%20LOSS%20GRAPH.png) + +>Figure 3: VQVAE Reconstruction loss over 5 epochs ## ==============Results============== -The reconstructed images achieved a mean Structured Similarity of ... -*Inerset image +Below are 5 randomly chosen reconstructed images and its comparison to a test image + +NOTE: The below images and Avergae SSIM rating was originally run on 15 epochs. NOT 5. + +![](/recognition/s4581053%20VQVAE%20OASIS/Images/Reconstructed%20Image%201.png) + +![](/recognition/s4581053%20VQVAE%20OASIS/Images/Reconstructed%20Image%202.png) + +![](/recognition/s4581053%20VQVAE%20OASIS/Images/Reconstructed%20Image%203.png) + +![](/recognition/s4581053%20VQVAE%20OASIS/Images/Reconstructed%20Image%204.png) + +![](/recognition/s4581053%20VQVAE%20OASIS/Images/Reconstructed%20Image%205.png) + +>Figure 4: 5 randomly chosen reconstructed images and their ssim value compared to it original image + +The reconstructed images achieved a mean Structured Similarity of 73.4 + ## Dependencies * Python 3.7 * TensorFlow 2.6.0 @@ -51,3 +70,5 @@ The reconstructed images achieved a mean Structured Similarity of ... [2] Paul, S., 2021. Keras documentation: Vector-Quantized Variational Autoencoders. [online] Keras.io. Available at: https://keras.io/examples/generative/vq_vae/. [3] https://github.com/shakes76/PatternFlow/tree/master/recognition/MySolution + +[4] https://keras.io/examples/generative/pixelcnn/ diff --git a/recognition/s4581053 VQVAE OASIS/dataset.py b/recognition/s4581053 VQVAE OASIS/dataset.py index ecb9d05c38..35eeab7b0f 100644 --- a/recognition/s4581053 VQVAE OASIS/dataset.py +++ b/recognition/s4581053 VQVAE OASIS/dataset.py @@ -110,4 +110,13 @@ def process_labels (seg_data): #print (np.unique(onehot_validate_Y)) #print (onehot_Y.shape) - return onehot_Y \ No newline at end of file + return onehot_Y + +def codebook_indice_generator(data, encoder, quantizer): + encoded_outputs = encoder.predict(data) + flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1]) + codebook_indices = quantizer.get_code_indices(flat_enc_outputs) + + codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1]) + print(f"Shape of the training data for PixelCNN: {codebook_indices.shape}") + return codebook_indices \ No newline at end of file diff --git a/recognition/s4581053 VQVAE OASIS/modules.py b/recognition/s4581053 VQVAE OASIS/modules.py index 0ece4cbbfc..302a566212 100644 --- a/recognition/s4581053 VQVAE OASIS/modules.py +++ b/recognition/s4581053 VQVAE OASIS/modules.py @@ -3,7 +3,8 @@ implementated as a class or a function Based on Neural Discrete Representation Learning by van der Oord et al https://arxiv.org/pdf/1711.00937.pdf -and the given example on https://keras.io/examples/generative/vq_vae/ +and the given example on https://keras.io/examples/generative/vq_vae/ and +https://keras.io/examples/generative/pixelcnn/ """ import tensorflow as tf @@ -77,6 +78,17 @@ def call(self, x): return quantized + def get_code_indices(self, inputs): + # Get code indices + # Calculate L2-normalized distance between the inputs and the embeddings. + # For each n*h*w vectors, we calculate the distance from each of k vectors of embedding dictionaty to obtain matrix of shape (n*h*w, k) + similarity = tf.matmul(inputs, self.embeddings) + distances = (tf.reduce_sum(inputs ** 2, axis=1, keepdims=True) + tf.reduce_sum(self.embeddings ** 2, axis=0) - 2 * similarity) + + # For each n*h*w vectors, find the indices of closest k vector from dictionary; find minimum distance. + encoded_indices = tf.argmin(distances, axis=1) + return encoded_indices + # Represents the VAE Structure class VAE: def __init__(self, embedding_num, latent_dimension, beta=0.25): @@ -194,4 +206,81 @@ def train_step(self, x): "vqvae_loss": self.vq_loss_tracker.result(), } +""" +# The first layer is the PixelCNN layer. This layer simply +# builds on the 2D convolutional layer, but includes masking. +class PixelConvLayer(tf.keras.layers.Layer): + def __init__(self, mask_type, **kwargs): + super(PixelConvLayer, self).__init__() + self.mask_type = mask_type + self.conv = tf.keras.layers.Conv2D(**kwargs) + + def build(self, input_shape): + # Build the conv2d layer to initialize kernel variables + self.conv.build(input_shape) + # Use the initialized kernel to create the mask + kernel_shape = self.conv.kernel.get_shape() + self.mask = np.zeros(shape=kernel_shape) + self.mask[: kernel_shape[0] // 2, ...] = 1.0 + self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0 + if self.mask_type == "B": + self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0 + + def call(self, inputs): + self.conv.kernel.assign(self.conv.kernel * self.mask) + return self.conv(inputs) + + # Next, we build our residual block layer. +# This is just a normal residual block, but based on the PixelConvLayer. +class ResidualBlock(tf.keras.layers.Layer): + def __init__(self, filters, **kwargs): + super(ResidualBlock, self).__init__(**kwargs) + self.conv1 = tf.keras.layers.Conv2D( + filters=filters, kernel_size=1, activation="relu" + ) + self.pixel_conv = PixelConvLayer( + mask_type="B", + filters=filters // 2, + kernel_size=3, + activation="relu", + padding="same", + ) + self.conv2 = tf.keras.layers.Conv2D( + filters=filters, kernel_size=1, activation="relu" + ) + + def call(self, inputs): + x = self.conv1(inputs) + x = self.pixel_conv(x) + x = self.conv2(x) + return tf.keras.layers.add([inputs, x]) + + +def pixel_model(pixelcnn_input_shape, residualblock_num, pixelcnn_layers, model): + pixelcnn_inputs = tf.keras.Input(shape=pixelcnn_input_shape, dtype=tf.int32) + ohe = tf.one_hot(pixelcnn_inputs, model.embeddings_num) + x = PixelConvLayer( + mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same" + )(ohe) + + for _ in range(residualblock_num): + x = ResidualBlock(filters=128)(x) + + for _ in range(pixelcnn_layers): + x = PixelConvLayer( + mask_type="B", + filters=128, + kernel_size=1, + strides=1, + activation="relu", + padding="valid", + )(x) + + out = tf.keras.layers.Conv2D( + filters=model.embeddings_num, kernel_size=1, strides=1, padding="valid" + )(x) + pixel_cnn = tf.keras.Model(pixelcnn_inputs, out, name="pixel_cnn") + pixel_cnn.summary() + return pixel_cnn +""" \ No newline at end of file diff --git a/recognition/s4581053 VQVAE OASIS/predict.py b/recognition/s4581053 VQVAE OASIS/predict.py index 43d540db6e..ccf3a60769 100644 --- a/recognition/s4581053 VQVAE OASIS/predict.py +++ b/recognition/s4581053 VQVAE OASIS/predict.py @@ -4,17 +4,42 @@ import numpy as np import matplotlib.pyplot as plt import modules as mod +import dataset as data -import warnings -warnings.filterwarnings("ignore", category=DeprecationWarning) -# Show how well program performs +""" MODEL AND TRAIN VQ-VAE """ +# Load the training data from the Oasis Data set +train_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_train") +train_X = data.process_training(train_X) +train_x_var = np.var(train_X) +# Load the test data from the oasis Data Set +test_X = data.load_training ("C:/Users/dapmi/OneDrive/Desktop/Data/oa-sis.tar/keras_png_slices_data/keras_png_slices_test") -""" MODEL AND TRAIN VQ-VAE """ +# Pre process test data set +test_X = data.process_training(test_X) """ RECONSTRUCTION RESULTS""" -# Plots the original image against the reconstructed one +latent_dimensions = 16 #dimensionality if each latent embedding vector +embeddings_number = 128 #number of embeddings in the codebook +# load model +model = mod.VQVAETRAINER(train_x_var, latent_dimensions, embeddings_number) +# Create Model +model.compile (optimizer='adam') + +# Train model +history = model.fit(train_X, epochs=5, batch_size=128) + +# Plot Loss +plt.plot(history.history['reconstruction_loss'], label='Reconstruction Loss') +plt.title('VQVAE Loss') +plt.xlabel('Epoch') +plt.ylabel('Loss') +plt.legend(['Train', 'Validation'], loc='upper left') +plt.show() + +# Plots the original image against the reconstructed one def plot_comparision_original_to_reconstructed(original, reconstructed): + plt.figure(figsize = (10,12)) plt.subplot(1, 2, 1) plt.imshow(original.squeeze() + 0.5, cmap = 'gray') @@ -28,86 +53,15 @@ def plot_comparision_original_to_reconstructed(original, reconstructed): plt.show() -trained_model = mod.model.model -idx = np.random.choice(len(test_X), 10) +trained_model = model.vqvae_model + +# Select 5 random Test images +idx = np.random.choice(len(test_X), 5) test_images = test_X[idx] reconstructions_test = trained_model.predict(test_images) +# Perform Predictions on the test images for test_image, reconstructed_image in zip(test_images, reconstructions_test): + plot_comparision_original_to_reconstructed(test_image, reconstructed_image) -# Return the average pixel value for the image and the reconstruction -def calculate_mean(image, reconstructed_image): - image_pixel = 0 - reconstructed_pixel = 0 - - for row in range(256): - for col in range(256): - image_pixel += image[row][col] - reconstructed_pixel += reconstructed_image[row][col] - - image_pixel = image_pixel / (256**2) - reconstructed_pixel = reconstructed_pixel / (256**2) - - return image_pixel, reconstructed_image - -# Returns std dev for the pixel value of each image -def calculate_stddev(image, reconstructed_image, image_mean, reconstructed_image_mean): - - image_variance = 0 - reconstructed_image_variance = 0 - - for row in range(256): - for col in range(256): - image_variance += np.square(image[row][col] - image_mean) - reconstructed_image_variance += np.square(reconstructed_image[row][col] - reconstructed_image_mean) - - image_variance = np.sqrt(image_variance/256**2 - 1) - reconstructed_image_variance = np.sqrt(reconstructed_image_variance/256**2 - 1) - return image_variance, reconstructed_image_variance - -# Returns the covariance for both images -def calculate_covariance(image, reconstructed_image, image_mean, predicted_mean): - covariance_value = 0 - - for row in range(256): - for col in range(256): - covariance_value += (image[row][col] - image_mean)*(reconstructed_image[row][col] - predicted_mean) - - return covariance_value/256**256-1 - - -# Return the structural similarity between two images; measures the window x and y of common size. -# https://en.wikipedia.org/wiki/Structural_similarity -def structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance): - K1 = 0.01 # default value - K2 = 0.03 # default value - L = 255 # dynamic range of pixel value (2^bits per pixel -1) - C1 = (K1 * L)**2 - C2 = (K2 * L)**2 - C3 = C2 / 2 - - luminance_x_y = (2*mean_X*predicted_mean + C1)/(mean_X**2+predicted_mean**2+C1) - contrast_x_y = (2*stddev_X*predicted_stddev + C2)/(stddev_X**2+np. predicted_stddev**2+C2) - structure_x_y = (covariance+C3)/(stddev_X*predicted_stddev+C3) - return luminance_x_y * contrast_x_y * structure_x_y - -# Returns the structured similarity for the entire data set -def structural_similarity_mean(test_X, model): - structured_similarity_coef = 0 - - for i, data in enumerate(test_X): - # get reconstructed image - image_reconstruction = model.predict(data) - data = data[0,:,:,0] - image_reconstruction = image_reconstruction[0,:,:,0] - - # Calculate structured similarity and add to total - mean_X, predicted_mean = calculate_mean(data, image_reconstruction) - stddev_X, predicted_stddev = calculate_stddev(data, image_reconstruction, mean_X, predicted_mean) - covariance = calculate_covariance(data, image_reconstruction, mean_X, predicted_mean) - structured_similarity_coef += structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance) - - return structured_similarity_coef / len(test_X) - -print(structural_similarity_mean(test_X, trained_model)) \ No newline at end of file diff --git a/recognition/s4581053 VQVAE OASIS/train.py b/recognition/s4581053 VQVAE OASIS/train.py index de4984a1e3..046e882cd6 100644 --- a/recognition/s4581053 VQVAE OASIS/train.py +++ b/recognition/s4581053 VQVAE OASIS/train.py @@ -69,16 +69,19 @@ model.compile (optimizer='adam') # Train model -history = model.fit(train_X, epochs=15, batch_size=128) +history = model.fit(train_X, epochs=5, batch_size=128) print("disaster!!!!") + #%% # Plot Loss plt.plot(history.history['reconstruction_loss'], label='Reconstruction Loss') -plt.title('VQVAE Loss') +plt.plot(history.history['loss'], label='Reconstruction Loss') +plt.plot(history.history['vqvae_loss'], label='Reconstruction Loss') +plt.title('VQVAE Reconstruction Loss') plt.xlabel('Epoch') plt.ylabel('Loss') -plt.legend(['Train', 'Validation'], loc='upper left') +plt.legend(['Training Data', 'Validation'], loc='upper right') plt.show() #%% @@ -165,7 +168,7 @@ def plot_comparision_original_to_reconstructed(original, reconstructed, ssim): idx = np.random.choice(len(test_X), 5) test_images = test_X[idx] reconstructions_test = trained_model.predict(test_images) - +ssim_mean = 0 # Perform Predictions on the test images for test_image, reconstructed_image in zip(test_images, reconstructions_test): """mean, mean_r = calculate_mean(test_image, reconstructed_image) @@ -175,34 +178,15 @@ def plot_comparision_original_to_reconstructed(original, reconstructed, ssim): """ structured_similiarity_rating = tf.image.ssim(test_image, reconstructed_image, max_val=1.0) plot_comparision_original_to_reconstructed(test_image, reconstructed_image, structured_similiarity_rating) - - - -#%% - -# Returns the structured similarity for the entire data set -def structural_similarity_mean(test_X, model): - structured_similarity_coef = 0 - - for i, data in enumerate(test_X): - # get reconstructed image - image_reconstruction = model.predict(data) - data = data[0,:,:,0] - image_reconstruction = image_reconstruction[0,:,:,0] - - # Calculate structured similarity and add to total - mean_X, predicted_mean = calculate_mean(data, image_reconstruction) - stddev_X, predicted_stddev = calculate_stddev(data, image_reconstruction, mean_X, predicted_mean) - covariance = calculate_covariance(data, image_reconstruction, mean_X, predicted_mean) - structured_similarity_coef += structural_similarity(mean_X, predicted_mean, stddev_X, predicted_stddev, covariance) - - return structured_similarity_coef / len(test_X) + ssim_mean += structured_similiarity_rating + print(structured_similiarity_rating) # Calculate the mean structural Similarity for the reconstructed images -mean_structured_similiarity = structural_similarity_mean(test_X, trained_model) -print(mean_structured_similiarity) +print("The average structured similiarity rating is: ", ssim_mean/len(test_images)) -# %% + +"""Visualizing the discrete codes""" +""" encoder = model.vqvae_model.get_layer("encoder") quantizer = model.vqvae_model.get_layer("vector_quantizer") @@ -222,3 +206,87 @@ def structural_similarity_mean(test_X, model): plt.title("Code") plt.axis("off") plt.show() + +""" + +""" PIXELCNN Hyperparameters""" +""" +residualblock_num = 2 +pixelcnn_layers = 2 +pixelcnn_input_shape = encoded_outputs.shape[1:-1] +print(f"Input shape of the PixelCNN: {pixelcnn_input_shape}") + + +pixel_model = mod.pixel_model(pixelcnn_input_shape, residualblock_num, pixelcnn_layers, model) + +""" + +""" DATA PREPARATION""" +""" +# Generate the codebook indices. +codebook_indices = data.codebook_indice_generator(train_X, encoder, quantizer) + +""" +""" PixelCNN TRAINING""" +""" +pixel_model.compile( + optimizer=tf.keras.optimizers.Adam(3e-4), + loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), + metrics=["accuracy"], +) +pixel_model.fit( + x=codebook_indices, + y=codebook_indices, + batch_size=128, + epochs=5, + validation_split=0.1, +) + +# Create a mini sampler model. +inputs = tf.keras.layers.Input(shape=pixel_model.input_shape[1:]) +outputs = pixel_model(inputs, training=False) +categorical_layer = tfp.layers.DistributionLambda(tfp.distributions.Categorical) +outputs = categorical_layer(outputs) +sampler = tf.keras.Model(inputs, outputs) + +# Create an empty array of priors. +batch = 10 +priors = np.zeros(shape=(batch,) + (pixel_model.input_shape)[1:]) +batch, rows, cols = priors.shape + +# Iterate over the priors because generation has to be done sequentially pixel by pixel. +for row in range(rows): + for col in range(cols): + # Feed the whole array and retrieving the pixel value probabilities for the next + # pixel. + probs = sampler.predict(priors) + # Use the probabilities to pick pixel values and append the values to the priors. + priors[:, row, col] = probs[:, row, col] + +print(f"Prior shape: {priors.shape}") + +# Perform an embedding lookup. +pretrained_embeddings = quantizer.embeddings +priors_ohe = tf.one_hot(priors.astype("int32"), model.num_embeddings).numpy() +quantized = tf.matmul( + priors_ohe.astype("float32"), pretrained_embeddings, transpose_b=True +) +quantized = tf.reshape(quantized, (-1, *(encoded_outputs.shape[1:]))) + +# Generate novel images. +decoder = model.vqvae.get_layer("decoder") +generated_samples = decoder.predict(quantized) + +for i in range(batch): + plt.subplot(1, 2, 1) + plt.imshow(priors[i]) + plt.title("Code") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.imshow(generated_samples[i].squeeze() + 0.5) + plt.title("Generated Sample") + plt.axis("off") + plt.show() + + """ \ No newline at end of file From 09450f87bf39387ab431d1974e3f55e38ab3dd75 Mon Sep 17 00:00:00 2001 From: dapmiller Date: Fri, 21 Oct 2022 23:12:41 +1000 Subject: [PATCH 29/29] Fixed ssim to read 0.74 not 74 --- recognition/s4581053 VQVAE OASIS/README.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recognition/s4581053 VQVAE OASIS/README.MD b/recognition/s4581053 VQVAE OASIS/README.MD index 2b761b001d..bfd809679e 100644 --- a/recognition/s4581053 VQVAE OASIS/README.MD +++ b/recognition/s4581053 VQVAE OASIS/README.MD @@ -53,7 +53,7 @@ NOTE: The below images and Avergae SSIM rating was originally run on 15 epochs. >Figure 4: 5 randomly chosen reconstructed images and their ssim value compared to it original image -The reconstructed images achieved a mean Structured Similarity of 73.4 +The reconstructed images achieved a mean Structured Similarity of 0.734 ## Dependencies * Python 3.7