From 0aa9ad9d698cd7e4e0e33b5fc2d61250559d1d1b Mon Sep 17 00:00:00 2001 From: Richard Yu Date: Mon, 28 Oct 2024 05:45:38 +1000 Subject: [PATCH 1/8] repo recloned and files reworked --- dataset.py | 59 ++++++ module.py | 549 +++++++++++++++++++++++++++++++++++++++++++++++++++++ predict.py | 48 +++++ train.py | 101 ++++++++++ 4 files changed, 757 insertions(+) create mode 100644 dataset.py create mode 100644 module.py create mode 100644 predict.py create mode 100644 train.py diff --git a/dataset.py b/dataset.py new file mode 100644 index 000000000..3ea1a61d1 --- /dev/null +++ b/dataset.py @@ -0,0 +1,59 @@ +from module import GFNet +import math +import logging +from functools import partial +from collections import OrderedDict +from copy import Error, deepcopy +from re import S +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import logging +from sklearn.metrics import confusion_matrix +import seaborn as sns +import matplotlib.pyplot as plt + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +import torch.fft +from torch.nn.modules.container import Sequential +from torchvision import transforms, datasets +from torch.utils.data import DataLoader, random_split +import logging +import math +from functools import partial +from collections import OrderedDict +import torch.optim as optim + + +# Initialize logger +logging.basicConfig(level=logging.INFO) +_logger = logging.getLogger(__name__) + +# Check for GPU availability +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +_logger.info(f"Using device: {device}") + +# Data Preparation +transform = transforms.Compose([ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +]) + +# Load ADNI dataset +dataset = datasets.ImageFolder(root='ADNI_AD_NC_2D\AD_NC', transform=transform) +train_size = int(0.7 * len(dataset)) +val_size = int(0.15 * len(dataset)) +test_size = len(dataset) - train_size - val_size +train_dataset, val_dataset, test_dataset = random_split(dataset, [train_size, val_size, test_size]) + +train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) +val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False) +test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False) + +# Model Initialization +model = GFNet(img_size=224, patch_size=16, in_chans=3, num_classes=2, embed_dim=768, depth=12, mlp_ratio=4, drop_rate=0, drop_path_rate=0.) +model.head = nn.Linear(model.num_features, 2) # Assuming binary classification +model.to(device) # Move model to GPU \ No newline at end of file diff --git a/module.py b/module.py new file mode 100644 index 000000000..e9f204d33 --- /dev/null +++ b/module.py @@ -0,0 +1,549 @@ +import math +import logging +from functools import partial +from collections import OrderedDict +from copy import Error, deepcopy +from re import S +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import logging +from sklearn.metrics import confusion_matrix +import seaborn as sns +import matplotlib.pyplot as plt + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +import torch.fft +from torch.nn.modules.container import Sequential +from torchvision import transforms, datasets +from torch.utils.data import DataLoader, random_split +import logging +import math +from functools import partial +from collections import OrderedDict +import torch.optim as optim + +_logger = logging.getLogger(__name__) +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class GlobalFilter(nn.Module): + def __init__(self, dim, h=14, w=8): + super().__init__() + self.complex_weight = nn.Parameter( + torch.randn(h, w, dim, 2, dtype=torch.float32) * 0.02 + ) + self.w = w + self.h = h + + def forward(self, x, spatial_size=None): + B, N, C = x.shape + if spatial_size is None: + a = b = int(math.sqrt(N)) + else: + a, b = spatial_size + + x = x.view(B, a, b, C) + + x = x.to(torch.float32) + + x = torch.fft.rfft2(x, dim=(1, 2), norm="ortho") + weight = torch.view_as_complex(self.complex_weight) + x = x * weight + x = torch.fft.irfft2(x, s=(a, b), dim=(1, 2), norm="ortho") + + x = x.reshape(B, N, C) + + return x + + +class Block(nn.Module): + + def __init__( + self, + dim, + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + h=14, + w=8, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.filter = GlobalFilter(dim, h=h, w=w) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + def forward(self, x): + x = x + self.drop_path(self.mlp(self.norm2(self.filter(self.norm1(x))))) + return x + + +class BlockLayerScale(nn.Module): + + def __init__( + self, + dim, + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + h=14, + w=8, + init_values=1e-5, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.filter = GlobalFilter(dim, h=h, w=w) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + self.gamma = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, x): + x = x + self.drop_path( + self.gamma * self.mlp(self.norm2(self.filter(self.norm1(x)))) + ) + return x + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size + ) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert ( + H == self.img_size[0] and W == self.img_size[1] + ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class DownLayer(nn.Module): + """Image to Patch Embedding""" + + def __init__(self, img_size=56, dim_in=64, dim_out=128): + super().__init__() + self.img_size = img_size + self.dim_in = dim_in + self.dim_out = dim_out + self.proj = nn.Conv2d(dim_in, dim_out, kernel_size=2, stride=2) + self.num_patches = img_size * img_size // 4 + + def forward(self, x): + B, N, C = x.size() + x = x.view(B, self.img_size, self.img_size, C).permute(0, 3, 1, 2) + x = self.proj(x).permute(0, 2, 3, 1) + x = x.reshape(B, -1, self.dim_out) + return x + + +class GFNet(nn.Module): + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + mlp_ratio=4.0, + representation_size=None, + uniform_drop=False, + drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=None, + dropcls=0, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = ( + embed_dim # num_features for consistency with other models + ) + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + ) + num_patches = self.patch_embed.num_patches + + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + h = img_size // patch_size + w = h // 2 + 1 + + if uniform_drop: + print("using uniform droppath with expect rate", drop_path_rate) + dpr = [drop_path_rate for _ in range(depth)] # stochastic depth decay rule + else: + print("using linear droppath with expect rate", drop_path_rate * 0.5) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + # dpr = [drop_path_rate for _ in range(depth)] # stochastic depth decay rule + + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + mlp_ratio=mlp_ratio, + drop=drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + h=h, + w=w, + ) + for i in range(depth) + ] + ) + + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size: + self.num_features = representation_size + self.pre_logits = nn.Sequential( + OrderedDict( + [ + ("fc", nn.Linear(embed_dim, representation_size)), + ("act", nn.Tanh()), + ] + ) + ) + else: + self.pre_logits = nn.Identity() + + # Classifier head + self.head = ( + nn.Linear(self.num_features, num_classes) + if num_classes > 0 + else nn.Identity() + ) + + if dropcls > 0: + print("dropout %.2f before classifier" % dropcls) + self.final_dropout = nn.Dropout(p=dropcls) + else: + self.final_dropout = nn.Identity() + + trunc_normal_(self.pos_embed, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x).mean(1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.final_dropout(x) + x = self.head(x) + return x + + +class GFNetPyramid(nn.Module): + + def __init__( + self, + img_size=224, + patch_size=4, + num_classes=1000, + embed_dim=[64, 128, 256, 512], + depth=[2, 2, 10, 4], + mlp_ratio=[4, 4, 4, 4], + drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=None, + init_values=0.001, + no_layerscale=False, + dropcls=0, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim[ + -1 + ] # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = nn.ModuleList() + + patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=3, embed_dim=embed_dim[0] + ) + num_patches = patch_embed.num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim[0])) + + self.patch_embed.append(patch_embed) + + sizes = [56, 28, 14, 7] + for i in range(4): + sizes[i] = sizes[i] * img_size // 224 + + for i in range(3): + patch_embed = DownLayer(sizes[i], embed_dim[i], embed_dim[i + 1]) + num_patches = patch_embed.num_patches + self.patch_embed.append(patch_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + self.blocks = nn.ModuleList() + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depth)) + ] # stochastic depth decay rule + cur = 0 + for i in range(4): + h = sizes[i] + w = h // 2 + 1 + + if no_layerscale: + print("using standard block") + blk = nn.Sequential( + *[ + Block( + dim=embed_dim[i], + mlp_ratio=mlp_ratio[i], + drop=drop_rate, + drop_path=dpr[cur + j], + norm_layer=norm_layer, + h=h, + w=w, + ) + for j in range(depth[i]) + ] + ) + else: + print("using layerscale block") + blk = nn.Sequential( + *[ + BlockLayerScale( + dim=embed_dim[i], + mlp_ratio=mlp_ratio[i], + drop=drop_rate, + drop_path=dpr[cur + j], + norm_layer=norm_layer, + h=h, + w=w, + init_values=init_values, + ) + for j in range(depth[i]) + ] + ) + self.blocks.append(blk) + cur += depth[i] + + # Classifier head + self.norm = norm_layer(embed_dim[-1]) + + self.head = nn.Linear(self.num_features, num_classes) + + if dropcls > 0: + print("dropout %.2f before classifier" % dropcls) + self.final_dropout = nn.Dropout(p=dropcls) + else: + self.final_dropout = nn.Identity() + + trunc_normal_(self.pos_embed, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward_features(self, x): + for i in range(4): + x = self.patch_embed[i](x) + if i == 0: + x = x + self.pos_embed + x = self.blocks[i](x) + + x = self.norm(x).mean(1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.final_dropout(x) + x = self.head(x) + return x + + +def resize_pos_embed(posemb, posemb_new): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + _logger.info("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape) + ntok_new = posemb_new.shape[1] + if True: + posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:] + ntok_new -= 1 + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + gs_new = int(math.sqrt(ntok_new)) + _logger.info("Position embedding grid-size from %s to %s", gs_old, gs_new) + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode="bilinear") + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if "model" in state_dict: + # For deit models + state_dict = state_dict["model"] + for k, v in state_dict.items(): + if "patch_embed.proj.weight" in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == "pos_embed" and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed(v, model.pos_embed) + out_dict[k] = v + return out_dict diff --git a/predict.py b/predict.py new file mode 100644 index 000000000..00816a531 --- /dev/null +++ b/predict.py @@ -0,0 +1,48 @@ +from train import train_model, evaluate_model +from dataset import device, train_loader, val_loader, test_loader +from module import GFNet +import math +import logging +from functools import partial +from collections import OrderedDict +from copy import Error, deepcopy +from re import S +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import logging +from sklearn.metrics import confusion_matrix +import seaborn as sns +import matplotlib.pyplot as plt + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +import torch.fft +from torch.nn.modules.container import Sequential +from torchvision import transforms, datasets +from torch.utils.data import DataLoader, random_split +import logging +import math +from functools import partial +from collections import OrderedDict +import torch.optim as optim + + +# Model Initialization +model = GFNet(img_size=224, patch_size=16, in_chans=3, num_classes=2, embed_dim=768, depth=12, mlp_ratio=4, drop_rate=0, drop_path_rate=0.) +model.head = nn.Linear(model.num_features, 2) # Assuming binary classification +model.to(device) # Move model to GPU +# Train the model +trained_model = train_model(model, train_loader, val_loader, num_epochs=25, learning_rate=0.001) + +# Evaluate the model +test_accuracy = evaluate_model(trained_model, test_loader) + +# Save the model +torch.save(trained_model.state_dict(), 'gfnet_adni_model2.pth') + +# Load the model for inference +model.load_state_dict(torch.load('gfnet_adni_model.pth')) +model.to(device) # Ensure the model is on the GPU for inference +model.eval() \ No newline at end of file diff --git a/train.py b/train.py new file mode 100644 index 000000000..a66926dd9 --- /dev/null +++ b/train.py @@ -0,0 +1,101 @@ +import dataset +from dataset import device, _logger +import math +import logging +from functools import partial +from collections import OrderedDict +from copy import Error, deepcopy +from re import S +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import logging +from sklearn.metrics import confusion_matrix +import seaborn as sns +import matplotlib.pyplot as plt + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +import torch.fft +from torch.nn.modules.container import Sequential +from torchvision import transforms, datasets +from torch.utils.data import DataLoader, random_split +import logging +import math +from functools import partial +from collections import OrderedDict +import torch.optim as optim + + +# Training function +def train_model(model, train_loader, val_loader, num_epochs=25, learning_rate=0.001): + criterion = nn.CrossEntropyLoss() + optimizer = optim.Adam(model.parameters(), lr=learning_rate) + + for epoch in range(num_epochs): + model.train() + running_loss = 0.0 + for inputs, labels in train_loader: + inputs, labels = inputs.to(device), labels.to(device) # Move data to GPU + optimizer.zero_grad() + outputs = model(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + running_loss += loss.item() + _logger.info( + f"Epoch {epoch+1}/{num_epochs}, Loss: {running_loss/len(train_loader)}" + ) + + # Validation + model.eval() + correct = 0 + total = 0 + with torch.no_grad(): + for inputs, labels in val_loader: + inputs, labels = inputs.to(device), labels.to( + device + ) # Move data to GPU + outputs = model(inputs) + _, predicted = torch.max(outputs, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + _logger.info(f"Validation Accuracy: {correct/total}") + + return model + +# Evaluation function +def evaluate_model(model, test_loader): + model.eval() + correct = 0 + total = 0 + all_preds = [] + all_labels = [] + with torch.no_grad(): + for inputs, labels in test_loader: + inputs, labels = inputs.to(device), labels.to(device) # Move data to GPU + outputs = model(inputs) + _, predicted = torch.max(outputs, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + all_preds.extend(predicted.cpu().numpy()) + all_labels.extend(labels.cpu().numpy()) + + cm = confusion_matrix(all_labels, all_preds) + _logger.info(f'Confusion Matrix:\n{cm}') + + # Plot confusion matrix + plt.figure(figsize=(10, 7)) + sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=['Normal', 'AD'], yticklabels=['Normal', 'AD']) + plt.xlabel('Predicted') + plt.ylabel('True') + plt.title('Confusion Matrix') + plt.show() + + # Compute accuracy + correct = sum(np.array(all_preds) == np.array(all_labels)) + total = len(all_labels) + test_accuracy = correct / total + _logger.info(f'Test Accuracy: {test_accuracy}') + return test_accuracy From 46824c26b476846e106ffdfc8e5fb041d2058af7 Mon Sep 17 00:00:00 2001 From: Richard Yu Date: Mon, 28 Oct 2024 06:05:43 +1000 Subject: [PATCH 2/8] added loss function --- README.md | 29 +++++++++--------- __pycache__/dataset.cpython-312.pyc | Bin 0 -> 3024 bytes __pycache__/module.cpython-312.pyc | Bin 0 -> 24186 bytes __pycache__/train.cpython-312.pyc | Bin 0 -> 5345 bytes predict.py | 29 +++++++++++------- train.py | 45 ++++++++++++++++++++-------- 6 files changed, 66 insertions(+), 37 deletions(-) create mode 100644 __pycache__/dataset.cpython-312.pyc create mode 100644 __pycache__/module.cpython-312.pyc create mode 100644 __pycache__/train.cpython-312.pyc diff --git a/README.md b/README.md index 3a10f6515..b74eaf94f 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,18 @@ -# Pattern Analysis -Pattern Analysis of various datasets by COMP3710 students in 2024 at the University of Queensland. +# 1 Training parameters +- Image input size: 3, 224, 224 +- Training bacth size: 32 +- Learning rate: 0.001 +- Epochs = 25 -We create pattern recognition and image processing library for Tensorflow (TF), PyTorch or JAX. +Model is trained using 1200 images, validated using 900 images and tested using 300 images. -This library is created and maintained by The University of Queensland [COMP3710](https://my.uq.edu.au/programs-courses/course.html?course_code=comp3710) students. +# 2 Model Evalution -The library includes the following implemented in Tensorflow: -* fractals -* recognition problems +Final accuracy: 93.88% of unseen test data is predicted correctly. -In the recognition folder, you will find many recognition problems solved including: -* segmentation -* classification -* graph neural networks -* StyleGAN -* Stable diffusion -* transformers -etc. +Image below shows the accuracy using confusion matrix. + +![Alt text](images/confusion_matrix.png) + +# 3 Training process +The train.py module trains the data. Data is first loaded and preprocessed. Then the model is trained using the training data. The model is then evaluated using the test data. The model wieghts is saved in the model directory. diff --git a/__pycache__/dataset.cpython-312.pyc b/__pycache__/dataset.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d811e77f981b8a366694222564897ae1318f5eba GIT binary patch literal 3024 zcmaJ@TWlOx89qBZdz-!3j^j(>#NJ#T=VD@;DoR4?Y`k{r;@DB+NX@V?jAzcSXET@O z%&c1ngtih=T~(-#ipru2wUh^n;)guu2}R-sL@Rk~SGJF38HAwnn^EAUyzrm1JKX?+ zBklRW|9bv8|NqbIACgH4!8-Qi-oig32>qEc{)#uivtALQpCTRU0TWpPIS^o2Fa=rQ zG$;pw22Ifl$sw*2&9D`bBOHg!s1=iAmLyA@51Vl-AtxF!l5&d2XpvhuZME9uwubJ2 z)ussnWVOo&8zic;tQbbHTUcdG;6WriG6nxBgB1*TC*Yy$kkH ze9iZM5JJeu@^x;}{eLVn|E|N=AHX!>4BO=!pgV`J;p;eqZ$N&HOmjidjmcSg9Q0@L zn0`n{j0xpe$Lq%VOqpdZzH^*%kVAJdQPCtwmzuAF)lYebs_ zi?-rV>fG}?o-DriJYUCp+RqYCDpS55Y~S~Blj0Hoo&so57&yJhR09DTx;Zh8y=g#^ zl1e;7H37zFh>i)?vxepk2sAWKh(l;p$GD_9rA4p^+<~oCbL@QCH5^;9RF4?1gKfv; zt<25w>G4@5J3f)QJ2k7^8qZAAga0MIGn*X<(rA`ArQ51EPh*~=41490iD}X!Wm{8h zhghnqz(LX-{Jk=^d3FMoJfhle9<(mxqa$E~It5U}*SCdO+CwBisn-V%HKZNE^Q_U6Xc43%_6Z%?F3S zwli9IZ|rUJi+^s7g7T=WU)`PgbhNOzcX}wg^2uo7(@*#sg)Br1t2Pa~p8lXclbxPa zGFfGMOc~DRIA9jUaXffl|0xQ2zWzE|7~$);qkCz{VwOG9oa$-wifcT;G~&*e^LZ0b z@ErGA!DNrM;U4Q$0|DGiFcx0^r~p5{24Xa7ScQ57snm=XHEczjhkK-PyKE_%sk$z9 zX$)I)SXXqzq9GlZSY0f$q!86J92(Q1SPWyD;t=A^bADi&hAl@go0zba;1XC#Bc@X* zz$MdI9_r~ij_DGX9gS;_X<`jx+Ae_+1_#zxBn*(iXAMz-U=n2@1lg4qsbH6A$a9D` zPota++BOJn+U|3M{0xYwnpAoY2PSBXd*8$=u?H;-U8=c+%^Tb&R_7eTY6Z8@1{s%3 z$1{z&!O|i_G~A%UlF7I0&1eF#VgiJxOg4p)#&pu4 zVS6-aVw(>yv|c^R0*p6DF%P?59m4R#l_E>RIaFka}uTPxYlW=~N}MA4XE=QpY<--a7JO=vUG&q>c2Y zTKdv@`trm4lk!?@yb|6-kv9`>B$j?y%~xMtO`cvy{os}CTY7mT(OXOOZi*e1mp8iToaFNid~HFs)=1|;-O8kn{nMWvHP>$kVv+DvvXz4Fezx9%;MH@XIDT>~EvttBs1LO+r=QRvO+ P8_}iC&rvHepxplgcw9`f literal 0 HcmV?d00001 diff --git a/__pycache__/module.cpython-312.pyc b/__pycache__/module.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02df82672fe3c5872c0b4315b27c64a45d4dee14 GIT binary patch literal 24186 zcmeHv3v^W1x!yVReo8YVY4k)Ife{QK*?=DyFt)G&V}k%2$FWQ@8O@BO5%UmdMi`V) zL+;9LMh-4goLofW7Lla26w?@|$%@?Eu8@^9an`-7)p+Eq$+6s!gw7{~pZ9u#Mk12?{@ zl#s_f^Yju6T7w$8&LapGS_7S~A6I;g&>Rd$_RUb6nUN zsUH4hF^>{@RWIkfhvlq7&gwhl+{kjekkfsKoSRrq4|3MrA?Ie6vlcn)?vV3dmeY%z z^>@g5AIsT*oQ-$Lc|Xh9gq+QH$oW9H<#Sx$mhcMr51!=xt=DM!di}g;9tjHRXfTGL z{eTdT2$674G?e!1MB^?&ND87Q9EpsCk|QUPgMSL1E0jzOjHaT=L?9ka3(*tETeo+A z_nuw7yAB6>cJ1zddf(x|{$1U@V(l%_PaW>@>qSeCkQ_M}Ob?0HbTY6hJvtJLh|aVy zng|6FNg*DL1<;V~smQU>NP_iFC)(0NFp(NSY6^V{rh}!F0Eh-H>|B&E|=jmx>tcYS(R3gn>Jkajv@e>71O-t)5ZWl} zgP}BqHi>4UhvaBlG=vfGn?*w^5*rZh(L`V%5=@T@k(5|H6b*+Xng|C96qBu}ED#Hx zj0l1cofNHsKqwYWr2>JJ8P7iEPW@pxwDri-sfds|5=w@W@goNkkscv>JaVKb85)g) z*iuKHMF)NN`9>2_-}6b~=#fVc>_52qfsOYZ=}x7hg9*xXqeN1@68R@0W*=d#tyHFaknoO*Ejx!Jq(HCwZW)0Ustw7j&90!mU~HkPlE zVrso-AD?=BI-Re*D{FYsdIODcA(=oZ%m|VZXvdGppGeVgeLi6gJ(__O z&j*F@n4K0te&|9<3THTXiYqu=XC9k)Z1T_-p2+gIJnWs=I~n}Kz7I{Qc4aNXT2$t@ z5S1r_@d&8g76?cbfUqMFI5rxLNhv}Tb<414*S@D2We6MS*(sB0C=!bW0&~33f!GwS z%p+_ZLM?*Kqfw{Y;Qj7%+;t1rxS~+kSn$-IT{E?&;B@`gS!G=PfzM=gvo5g`B><=_ z|LD$ngL;dMC4CWkrV#)qsYymMBvH`?#blSJx|kbgyq5AL{bqnp8NhT}#h777X%2e2sC!i=q_BPrA%&2EO)3eZ z9t~2VUM`@vd=m84dyPm4M~^X_J~wda>(UwYVY6B>{pts7fM>%pLa1;?C- z#&l8$4T(lNdlI0#STdO2yh*GAyx>?n5qLfl9UMxF{PQ9|5bE&nT;2B zy|#aD|Dt2-{NbFV`-WcEX3H8DEzQ?WoTmn0Vk)>jKnxS3n~*{#sGOsPVhdaVJpwWK zVTvttN04J&KW-Q|W^||8(`xw-UvL~!GWsaLC-`yGxH*AZj$jQajP}&UDK?5FG-b>< zdSp@rPVvL`V!9Gat0Xd9;pkFY9p^RqTuRyE?{G~qtb~XZ!@7%lg#|?&P9<%awriOK zj(W?Y@Tm5{f}`9*1bfH65Ji+Zl*Mce5ErAnXegPJ=CZ7K#M~J$bMhL|%+VI__3N(D zVhH!c6OBnBJ(Tq8gh%MfK-?G@Ha^9kg&suut8lgt1xF&HKAOOh4JL312lg=m;p5TB z^P&zLVktVUpAb#p0m!hf7Ow%UA^E~o$a3Duhg_n^kloU$F4Y>XLe8Q zp4>PYoOVxl=N+wCp5l5YdcIV5cE!|+yrV74U&SuODRSn(#DVGVnfou9vj_5yEm?lq z?Ok$r7_MWb8EKd-P^LJg2Op!ns^+^Qn4jROU}_UeQ0LmOmL=Sc3}R21xHKX z@u<`iJDrMc7Y`!cHKO%vgp<;VX%adqOy{0(H+e)u?Bo$PQfL!A?P+r_MUwXbB@kKT zwE3;hWUTpnmq$5UC_RKf)hYOT;|QWk&K8hmSXY_7Mr;MF5916PFtZcMh!e>aHi7NN zGtMImh}A3_cgB*TqrZ{bc0D50AtcacQg{|#NQ)RMe&L>6en8pE4=6CsrXpX0gF)q; z(#r6FfIuG0MPAuK$R(o?_7;xLqu3sTJ*}cq5#x2@o=yJ4=obA%R-`n>3 zw)ylA#@-lvv;F(y`OW+DYkTu;2lB3iImbb;=PLHiS<9i0_%z`jc%pS5-3xm`NO!`& z=co*MPvb8|X9#iIHV!N4bQL^x3d4=7+4iaJ1)HN_v;VfnYqWiEk4}L{Do(SaI%P-S zAUW3mK0Jnv0~Q95CI*n^5`eUb)og~@h@T1tV-bNsxeO);-y8aM_Pf8kx+A28U==Tq z--?&^f24S6E5S?K?eS8B-$YntE@Ms8{fxO`x|No>;>kj29X4DvYQ!xn-w8}M^$J8K z(s~6DI*i+hQ-B~W3hpQ{C7zcy#c+l@CLb5PGS$yQNT2@nNkh~%aLp}q^&hcPuG?EgAD2?GTp;-}!OMtkc zj5t+DCNxf+gGZ3u@P8PRPg`y}IB&z*BU49a)_=$NJ=^QH`A75jbmvxfFC2P$vF>9) zZJTsIQn>PF&ynGY2|osk{0_I z`Xv#{9|h|5<*O>g`y{GK?T4pLUK)QX!u~tM zccHf7w{;E1XZR29H7F2HRg^$DEi<_rbgGp?I7v+y9P0qb1_+C}J|p8-O-z1bgOD&Z zv@6~p3ICyCZ#*~{@uibK7U^TL;b>woq$LFvhJS^&%H%4yNRNxx?Bc@XBB|B~DKWsD z+_;V|uEhUma9yO|F_0zVbaEW~r8ssOH4+mNP{xfiCCsi7Pih+)Xsp26vP-8fLl$0{&ecKE__34@6OYRHt zT>a*G%VqZ)_RC`nPd@d-@!XcfIq%b1d!fMx-qKlUf;9JZb)mU~WHt>~A?4jWwYT7? zos7=*UAEoS>*|Q5wB8C%bg(mU+JN=Ht?&jbGOm-wH;~y4(;AdHiB^uT#hiF>JXX%b zXovEox*ms~Gh_HF4!MjG{_RRI3S_xHq(-Eh{<-y4xkrCSY_Upp#cmcUzY@zEHY+=z z66=fmsaZLSWr2vLmyseP$}!L0#K>sc7iAllL=e8UBEN2|-tP-1BdNriv=8Es(2x%@ z-$=~2c1-8*VsaD+`gWs-0wcB0AVh*eQWqqk@*+jnNHP`inq|6*XyhU{1qo?aym8BKGQw3d7`S1_oZ^Ym1&h6-3c<8{J z$8ZcT9r|SM&?grHpUNEyE*$J%=npU0BR7mxy5QJ$1+Xp)&6ga$oWnQc&O25uIXZKW z&e`B4JZ#o->wd$vE9mkyVtuX=nO-Aeoims5@S-^o2q!~WO(b*Vkqyg6%l+tP3= zII8QjQUOQ#kVM0(Q(%WlxEV-7(yXqGnvETe!}5$5V;P56DOQ!zRs<^&n1P{CRuGVM zha3BCYJoH=tRA=oY!!J$9fM~&P|KVm;gTT{w-O>y;RW(`l1Jon?SH~CWi4<~c$}gc zshFYnjgLY#!U*Fa#Sm-sJNRNLh&qEki2+-?7>Dyki5U zI;GTU&-CH*N6sC|JJywem;M@QV6Htv-YIy(Y4XZtsb?sXyw~wpf=MRhy&t%Y3MNsO z%9uo3NXu+mp>wPJFDyv}Bqg2>ET|!3tb`GjbRopDDzx5bVRjDIur+LiBGAUful6v> z7#xhLb%Lq2iN-yCGGoC2rH8qfzalj&?ndgeo zr%~8w+eg#n82rg#I_^ljrC8!~2_7@X5-wB~g<+FmE#_51X=0|C z8z%m<%z-DWDk@+Uz65ijjzm*?B{6~xf&&?

Rku=4f;T>*Lj#>adAIVFSC`xiVH^ zb$XSOFKo`(6>bM}x>||Pn3P(3xHImMZV;vep8$A`yEFDF-D#778~2R66WyAcEE#*) z%0$odY@@#7*a`B>T-cVeqa7JQ(N2xr4rYG5CQ~!)FDrmYg)GsBTGj`x7@e+SW$rd3 zs+C)UXnedj-Ki7;H*3z=GM-E=y8$^e)fv}C`>XPii8dPzn1}INWo|Izs&E~`c}I!; z3*Aqc%UFbGG8W{OM~cg+Q%M^pj3{&I`;>^VLz9yLw#=cw7G?FI@^fl(6CRa0W$p%a zgX$enGZIx?m1xw|v%RbmJSz2IL~0o*J8VqZ;nS3@)Z@ywO9o{WC2ElmZ@W$DS;i^1 zy-q`oRT<|P4s~UGpvB^xSK3F;l6~Y;>i!6Gp_WNMs+0zbv?$@?Z@BuROBpY$ytv_A z#WW>^*xgFF_*)#!N$g3UXwm&Vd={!vL$%Lye$OwR*seC|zToq93xlbx>J!B(*Ja;Y z=xjImn8lrct1nu1yD2GCyjYi&VhKLF4VC1FwHCWev)rg0m+}6H;7bn3%}^q4_NkIW zbYo|RT9MSP<&*@R|2`Iu4n#wwB}({)BEfJ;Me6-ssfI<6bj1gjHIg8M#gYQy$Vhrf z$t*pftE!b@L@3Ek{iHT4^paic)J~NSPt|KF4KXDb%?VO`k_L8};`;*BSrZ$oY_QxVV7DtnV48G`q@31ghx+KSr22!L1@U3NLDJYpd4huPmRD9vc zKyWmcMp2)X1%09ck_g1NZk@05p04|-PMBGi^hJn_KoSilA6o||ck!(~Fhb)G#-0Op zeWKVuSj13yUuhX1Gt={}MLnId{mhKjt;i9}FGshfLkKHj(Js z&ujrPACLC-`qUJRbu<;hrsy1r1w#=chT^rJBnhw|EE!Kx1-iAgT1p7UqGQsQVAHA= z{LPF1?YlF7|C38Q=6F#b8A(C4tIEBEQ$QKvv*hiBhaCI}W9i4%z_1rP2F8ydtGJJ* zgJ#3`d=y^f#7HD0&G%S`lrhHaId7kD5Y@w7c~}zfnT1F2RIXc~Od7RL{Ql6Y zEYs?)O#{CE2)YzeaR6jYryh+il@9m%?<$H2MJ5N7!9#S=)>X_%r9sotEL~uaQi!Ai zqMe~yfNera1o$AOZKX6L?l(!u2qCnQGthA)TIiYjC>louvV|cVu)&CE!K}b+B@_`2 z(R3s(+9W#I7lkzQIVx!CCnJUwGp7-V_Y>%*C^S==;czf9B-*e!0@CJSsvOZkdsB1{ zM6t_Qn{)@KLrydXM@C{NVM)leF|tLB-&-c?l$QLP)C{dVEZm5U*-D5trTbHor8;C9 zkOnAe^NHxis?t5eltK^-U>ZWPlr(b>Q!_iL89pHD!y|%Z%fm+{S%4QQ%28GFs}&AZ zu{(w;82Aawr0CA@J1B^D^BN72A{@Qqs3Tz(vuTv(l4*?t-77xc;Mou*S61X zpFg~~YG>AVwRv@+ZbhNKoqTT_)bcH?Ov7~tSKqwk?Z|mMmb{yC-c4Ei)yC%YE$3Rk z5qah4g`=0+FFkkJpYPb0Z`=<_L)CIa(|OxD+sw0z4R_<&dBssXZOb`UvK!>28zw@B z$fW0&_IgG__5SezoO;I=i##dM#Jyy;;M#VT90LXkAliTv=#spJ_igP-ycPy!H71LAAy0 zd0&srH}uYWOu}jVd#zku+w{qNt)FBm@0mD9Tfw(E+q39cLFR?=bMa+Y^OeRGFAL{S zo;x|)xNz^DeA{CS4SNfXt>4hSV!mMhx^d}#!)kQ=!I=;DmPtvCg<6{=y+(kebp;X7n)`V7u)a8_Pp(Ay^4tD3(a3&L0xXW zYN?wHpG{09X7762a(BVvIAfczP2RgyvpQF^`fW?c@83Vlb2ShC{(TeX@%QgLFcT@d z2>!@tecW&Oac%wMU50;eZFzjNrNmr=HY-8MjlaWP4A%Yfjf`%YY56w9ag{_u3Lhv8 zYmJI3zI;GpyyDI3qqndHA|D(2Smr*vUem)0f}i2)vI2OlToc4gYX44h^zSzFpx#o8 zH7g~Br~e^(^C#7*7-GTPR>+1m>~nTuAuLPJ^80f7!6)r37>VleZ4B#qU`FsKFh2S5%trjZI6 zsl9G8__F7GJI}<8=G^wddB(8s2tvE?48}NT_7ukD`Oy|7`& zvdeSUJY`;TwdY*GgI78(bj-HD?b>i%&vib$P`&1=vwoU?dDHpr=e938*DgD%&g_}k zv*cKjbF7#So*y_jFcVyK+!8-@ZfI8jwqwI}BUj~@B(5saqNhYEY|9W$;zc>gIfryl zadY}!;aiBojKorbbn<8O@kuqYi{4q2S4}I%!(Uq937&p+oT$ zcLnJ3+(NZ~!BU|&R5R-h$!MS)#Ui($dWZmy(D8TT_yW zb(O~UP={!nL1I~5bLQm4$pu9oP3TAI+s-Q)W&l$${rg0%Ig-_88M5aaH~FNOLu(mzWRAB-#hr z3j~1?++0%BC{BDP8PU++zjGsB0 zcWeR?Tb(ajm!H`GAGiH%+h08-Ki{eXP^bKNsP}gIQAzg+&3R?RJJoEC#N^0Bb(Xm? zJToD_ELGAb7xk7bM+F4x;XZ-=GOmZT32#8WN@NQ+&YyM|h-mH7v(_)>8dfDB#q_W$ z36cf-*nUJE>=pp+I0+$yi{#O!6ka3myX5u2gQa&Qn7~Uo?G&?~yeBDI-yb`gqM;Kj zC$B6xb$aF-7i z9s>n=zVWEK;{38TQ#X;C0o%2>PFN##``ji6Oz@d0CHBl!WD(vVj{(j)gi6kX4Tu0^0EyEJLC&O#(G7rD8rGIgz_rKMv_x;E*Dan_me&K&M-!XiZ%$omC( zY_K$<%7*AKQwn)M#$Rd}9$E5OIGAA8)ZvX4SnZHGW)(BaZ1EN9n&`2a zL4#@oM2&~=r#hMqayKYNtCpd|EUX^{zwk94zLEK+ukm3s{#l?(v4!meP@fF&c%zx# zvT}Yz1x;0nf~Fd!t>K!miz#T_c(RA_uChi!Q!6?cl^i^Yca)-GX@RHEYvFIC?WJ$w zNGj6?HQb^NA|_3adPO9svRmdw}%eL z5H>a<&qb4}rgdhVm0t2G(H1qZic7pS@rcY-lpLF(Et1<(^d<_npnoc-4wRuwOPL#P zFN?rq#;KI&IZ&$QYy5b%Mv87#QgQBJZpK}t5uuUFrUN!+I)J$Ch&$~uaGJ8}3tcF? zlgeszF-k24xiW$ZCYQqku6=btMUYD^8htr#ty2Bc((2Sjql`>as4Z0{s8Or=&3|?8w=0n@w^aDy! zymBVdkV*bEL*rJH@ha<|v1V#Bbr-8NdbgEK@8x<*SOvp1i23j4jIsKmu^bWsOK|v)a=u_3c zY1W7~VwpofDqRCq1Vl|Fyksup2pieeVx5K>+!@Dhb!nA$k)vc6`Q)m(5?vY)q1-9- zjQ;qPw6v-l!ze1~?ZTdmH42h5UD_U{WbqdzA1gj7A!QzMlZQ6AOqb?Wb!iCs>x6%S zQo@(XJ5SyWdBhq?yzGjQjO(AwWH$lW4NrZ*hDdr4$L_(VT!ME`UIs9l`_&1br zi99CPevU$M@_NXl^G@P!-=I)Gd4$ozza@`2M}MPqzxXsoWXSs_d7mZkFUeyQ@&gK8 zCU1~Dng-DcVE|Ksuvrv-NC|9gG%UQb3Uy^lAa+u^yZne^^`TKgqj}(mgVaXKO3#NC)?7I15Z|+_A*fT%d2U}o>aP7GE)URCfcILdDS^M&>;_ElNf4Cvv zaUkD#kVM#5ERH2h1HM7AWNFJ;+GgyFmJPphwPlTXO?C3vUo>R93-x#nb?U^-+Fbp* z*-);2V|LeM`wNfZisfm#iC4($rEJSht7dl1wqJN`HagGG#pd^34!+s`!$@xXzI@aE z>=T8i)k{qqb4?o&RIYYQj#W9ws+lL=cH9FYFKV-K=&n|vzKnM=AlH5=Ns?(a)^^wV z#JR-mp-WXi)?eQBBU`@p2?)lkZH30BY&X4=`y%><=d2zRT5ZEr;Po(laVRdx%BCzB zlgmAMXv#G0p0Z`Te_7qK?5bPvZdi16$_+x0zH-gX=*SPkNYNN1r^9OD#HA7|fRrQ`p{qXc_!tb^_&~e*LISmU$!it4llLdool`OQggocA&S3Z z_xuA$-H-8Dp5MRk#RUESeINSIr0zedso!rg{Pe!n`<=F*Sv&T7Y$f+|1NI~K)Bl&c zV-DaXne;z?sk`tK3|9Cld4CPBL?S{5##oc~uG>|j1$_q~Ec^#5x)xptPpZ=)#Hfkx zx6_~nfUoCHTO1^y`?#7nl;yiz#Syc!cwK?3y2(RE@{40 z*8I2B`p>B-Yn@tCoVsgI=xIO|FTnF~M;wvc`=|w3uKLmjIxB+`HruxIhzpO@+(v{-x ztu>zB$ppKq=s}ZLZMTxx7s@!IFgRJTYv`5uP?`gYSv5)A8{>$WboaZI)I%Q4iC>o4 zucwF~!z-QrtB5I^{c>6Ed+0$biG0vXsoaBW)uds`<;%J7LP*ZFYNk8y>c9b9?V9X< z;dr68ZrX6Z>ReT>wquenxLWZBxP8h#-Ho@vkL6wMkmOo%TtWBy|B@tE_|G(jbQdha zstq)Ql9EW_73CTH??kr0p+ZEq!rzhiU&-49kExsgVtlS~9F=b4!F}7T^Dl0J#x#w-_HdF0N-M8r@ z>SM1T6+b^@D870>+@SQP_)G7N4r0wN+LRmrIMy1kAJ}`x?s_sp*SW9{23S#Md8yh? zZuPBBku~A!6oWl>WglC2@RoeOL0Ic`vN!q$gk-!}>dWYYblaz!yJ+c;N;YozB+_3{ zgn?f5^Yg+Zc^G3W=aei7=*3w`iN!O_)ubFW4AC^03M6CFFN&mS zW6IaoqN)`W1|_Uwz((K1!BK~6E&?#AGI2)7=l6Yf--7#rdG8Ne-e|czwzTJ&+@5C^ z4IeL5H!fAT<*Mw4FFRdQ{PdQ*`>womHKGi5sS6Ly zAHRHj(a>A)tXT4_&Usejtwk@|@Z>$)mpnZ=PY=Dy^XA4kgL%)vQ%@}8GftgToip}J z_s`$IuxaO;O^cqxr=HN3cF%Ut9?N?+E_oizc^;f^pa0n9fj0tg_UAo^P~Pb|^X$a4 z)3%u>7V7W06kK#}IkmfBcc0pSDplO;=f8Obi54%bgp~NrK8;rS^|LfN^;>@9Y>VmsRlq zh(vGoY)9RvzX`?(1|@#e*j^adz{ol6V+NfXt7^$K(#N)YJea_j8Ko@Ifs?-VKG~9$ z-V-PMph`p|?e*ALy~Z@Oi!yXcRY@!s0}SG*=%c4(@0k+JSStw%7`ycuBr+o)2?ZGU zMf(p0Y!C^iBe1y+r3HMt8F)iqHKj+9KPvLaMgD-u?-l+j)$nb5b%*_&PyBo-=P@nVXEDa>79G z4HxWp&GI?B|6=G;`}fwozUI}~{GqqHc3f#}U20sHYh1U~cz3Sx?xn{2bB*^eHEz!} zZim7ipGBK&pF9Lc0{hw=hu_ ztpoU4aGH!t1mZzNdng%;!8{pu=qYASD;nsl#R4hd1==a14q9m1zW5&1Xnf=(K1e$v zfv<&q!LcimkiHpEqda!8CYWKnXe)*Ve6gDot5Zi~_)=`53txd0qM?*vp{mWPNU%RC zBp?jMM+Mw?Iea9ROvj@A_#iBWfbi0H|A1f}K``Ae*DgE&kbJ#KOXKk{Nda*BpG4p0g z-aAUwFEE-yIk`XTXS~JlC3#sYo2~AZ(DuJd{bA?yR$3L_$T}~IRtOs+swNsH__*J zZ*mCU+}?t8W@~hlL-6JfAJY5JMyH}TIRs&lj8c;xI_BgGTzJ;eX1K{Ac(dbKU9koV b-#lRm;74hM!teJO-Mr_<+G@U*k^KJxg-+T& literal 0 HcmV?d00001 diff --git a/__pycache__/train.cpython-312.pyc b/__pycache__/train.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2fa2cbd618da196909268f156c186a9f3b6e031 GIT binary patch literal 5345 zcmcIoYitu&7QSPT-($ydUVusB5FRmk5CW7!p>0YM2u%VMC}JAc8a$I=GY|L97)aKZ zf|ga2Z6q9ZC5+IDtW>EWSS|h4{jpm0N2RT_f8uqzaVJ4YyOmb_1A(>@+8=xF*b}EA zTPm&gTE6$pz2}~L&V8Ko-TN1>*Nq_U%ruOSdJ*~u`7o2EL_9lAA#?-rh^IuvlF}j~ ziDD_q!dj?ey_K~Z?KDe6X%%gfowXZnwCIqWtka-vqDykKZpp)XjBmThNM6=klHp@3 z3>kjbZ^#*(Vpno>=1B!^fl)vEow8j7nBVAt?+;LFUW5o4fa_v#m&8ERzP{LM5k@qvW&QW1w)BUp}6biTC;bG zzE9mNNl)7~kI9*`-YbpXgEG{)PZ3HxKoXpZ)yb3uYidb5Q&bN+_X~uEL1x;OcBfqN ziq$<3()?zfIZDd?)A-Wvv@2zgSDF3GWmyjkN}-g?l$`d!w=LyKxtHx^QcS$2+|#7W z5@3Wnv$kBWXm^5y_2m|mGC9U!p3=_gU(!emnr-ooCRHwZYs$gX<4|!6@wSu|a(l|D zHJThL%CutKLwJ5;H@_*cG2UeIl}l3tSPe(o3xA*1Y_`TXn^d_p`S&1@Xo0nx{Qs`q z>a|&+-PW>JCYAE=w7F`=j;cueQ$E-$VUwd=@(y!ujM?|4yeWUmeX#|hcx#!P5jxn*NN2l{<-=(N3gNm%)dhg_)<&boh$( zEV@jET|?khEFvB?H66G2CY0!f&|`8QUr;;7oo%53MOF8QFgds5HScp`3|tMkx=?pC zn#5dmY=0%;Tzr1cbS%xQbZuOtGZVdI4sBH z(Fo==L3c$ljE%u$-DTJVa2c!B65@M0Wgx}iOxq#O%fR`A8)L&pY>@7lYwbGTboW1})*8t&dTe5q>~0t{FpcgvhOrpDCv zuFkIA!^WI+B*t`4adwQU!fz36;IIAxw!#Ej@&^|D%{hN_-oIs{f5~5S!#(59?pX-7 z=7O#HVEcpidH>rJ{ohsAX2usPH%;0KPVaTklxNyosM|2fEH-SP8k+3O^cCuxZ=bz+ z_R~xG`i=+oFI=Cy{?<%e3ru5W^`WVud1hmQS(oj(-G8(H*1I`o+tk353e?#8+>c!T>&z5W@Kt9{ zXLrt=$@FJWWXH4Oot6g&zOsHfkbnC`Zr6#quJ`7clh3Udm-~quxqR2%Q|@V|5U84T z6#SKwwk2=i`njodA4i@dr|ZqdO>Hy2j6G{FG;RBA{cqRbe=XnC^`P&I{?GfDsy95d zTJ{98_Kf|h9R)+P_FL^4$5P|w+haG!?$kYK$~W$wVKP)^U%_9Kt-T$*8JsGy%kh=9w-?$SJ!su z%!AW+&)qpTS99QR&Vx_(p+NJK1Bj`d82G*pSf3D;U#KL6e)al0zQa3gf8Jp`{JJZ; zGPsjaP0ro3IGn2+#L1Imzbcu|3!FTaw!q&yiqbUvZ5ruoB+`F%GW!|Mo&@T@mz=#F zg1bHLSj`WC+G*CAPAuj4X?$r%+HN@e<&c$juF%ea*{1AbnR4y=I@ZMcZY{N-c93``k)kMVfiVI##5ZAFgAHc^Py79H}nB z4*!sWCNP1$^b=x~)Qh*3MWL%1L zOLtG$rrS7KRy5r@B567umAHiNxuV6Q7fWH-Gq&Qxb=-`&I(-T!g>nCC1;#5*397ad z-)z9!Kov~Z9}_f)LnI^a50jO~tt2BWiOE*f?ZOpJka^u2O(b<2AdAEpCI%Vs1#clN z)`Y0(b}%f72}qFvD_jsbji_$FVyLa#$BLy*i)nBfq}2<`Wx!bpuoh!(43m8u_TqLT z&ZB|GB`kyYlcC-q)heJ zjxIr{t)Hs@xDn#2ZTDhB_?6IL84fH3L$~+c+&6pX4wnyhP7O?UPmUFs%FO#WKA8Dn zw(^so<(THl-U3sVbVK_P$|8*|LY?AhDr zZl1dpS*Y*I)pzCVch56>3QX++Q=en%vm>|TH{-X&g@(>tLubCB>l^?xI{ZXdiUzP z8B2Ea?4diOUmp3IpF4DN?tFYsOw4f~enVrz0o;>6pQnQ+eQBPqC3OGQ{yAT0o^B!^ zN3I^Zb`+#n1b)5gM$1e~c3(cQ<$mDz>+Y_5ust6>m%QLuCK>DTLM1N)zdO`&q{aGmi|t641CAr{ z@1y^|`}D7a=a0V?-l;oE&ke;hK%&1+!_x;~aPo`-&{=nlz|)1MD58qVi-GQmDxwJ3 z7htrCi5rH43fBZo&NwE4U$`i5myp+`~_)DCzk(%{A; zU`!q*;Mfx0OEL0vgb7~20aEQR_DgCpQ7vjfo*-~_xl4y}5y9lXZy(c0T137_sCf~2fOv@NAEM?*DD)8d7g1mlF%MDABh)yD z8jaSrMO5<;HU1NIKSJF()V+xO57Ev?sO=Hj0}LRfDB&RrK67rODxV#tLezRpo*DiF Du)ecP literal 0 HcmV?d00001 diff --git a/predict.py b/predict.py index 00816a531..697ae7640 100644 --- a/predict.py +++ b/predict.py @@ -29,20 +29,29 @@ import torch.optim as optim -# Model Initialization -model = GFNet(img_size=224, patch_size=16, in_chans=3, num_classes=2, embed_dim=768, depth=12, mlp_ratio=4, drop_rate=0, drop_path_rate=0.) +model = GFNet( + img_size=224, + patch_size=16, + in_chans=3, + num_classes=2, + embed_dim=768, + depth=12, + mlp_ratio=4, + drop_rate=0, + drop_path_rate=0.0, +) model.head = nn.Linear(model.num_features, 2) # Assuming binary classification -model.to(device) # Move model to GPU -# Train the model -trained_model = train_model(model, train_loader, val_loader, num_epochs=25, learning_rate=0.001) +model.to(device) + +trained_model = train_model( + model, train_loader, val_loader, num_epochs=25, learning_rate=0.001 +) -# Evaluate the model test_accuracy = evaluate_model(trained_model, test_loader) -# Save the model -torch.save(trained_model.state_dict(), 'gfnet_adni_model2.pth') +torch.save(trained_model.state_dict(), "gfnet_adni_model2.pth") # Load the model for inference -model.load_state_dict(torch.load('gfnet_adni_model.pth')) +model.load_state_dict(torch.load("gfnet_adni_model.pth")) model.to(device) # Ensure the model is on the GPU for inference -model.eval() \ No newline at end of file +model.eval() diff --git a/train.py b/train.py index a66926dd9..0ef70c8f3 100644 --- a/train.py +++ b/train.py @@ -33,6 +33,9 @@ def train_model(model, train_loader, val_loader, num_epochs=25, learning_rate=0. criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) + # Initialize a list to store loss values + loss_values = [] + for epoch in range(num_epochs): model.train() running_loss = 0.0 @@ -44,9 +47,10 @@ def train_model(model, train_loader, val_loader, num_epochs=25, learning_rate=0. loss.backward() optimizer.step() running_loss += loss.item() - _logger.info( - f"Epoch {epoch+1}/{num_epochs}, Loss: {running_loss/len(train_loader)}" - ) + + epoch_loss = running_loss / len(train_loader) + loss_values.append(epoch_loss) + _logger.info(f"Epoch {epoch+1}/{num_epochs}, Loss: {epoch_loss:.4f}") # Validation model.eval() @@ -63,8 +67,18 @@ def train_model(model, train_loader, val_loader, num_epochs=25, learning_rate=0. correct += (predicted == labels).sum().item() _logger.info(f"Validation Accuracy: {correct/total}") + # Plot the loss values after training + plt.figure(figsize=(10, 5)) + plt.plot(loss_values, label="Training Loss") + plt.xlabel("Epoch") + plt.ylabel("Loss") + plt.title("Training Loss Over Epochs") + plt.legend() + plt.show() + return model + # Evaluation function def evaluate_model(model, test_loader): model.eval() @@ -81,21 +95,28 @@ def evaluate_model(model, test_loader): correct += (predicted == labels).sum().item() all_preds.extend(predicted.cpu().numpy()) all_labels.extend(labels.cpu().numpy()) - + cm = confusion_matrix(all_labels, all_preds) - _logger.info(f'Confusion Matrix:\n{cm}') - + _logger.info(f"Confusion Matrix:\n{cm}") + # Plot confusion matrix plt.figure(figsize=(10, 7)) - sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=['Normal', 'AD'], yticklabels=['Normal', 'AD']) - plt.xlabel('Predicted') - plt.ylabel('True') - plt.title('Confusion Matrix') + sns.heatmap( + cm, + annot=True, + fmt="d", + cmap="Blues", + xticklabels=["Normal", "AD"], + yticklabels=["Normal", "AD"], + ) + plt.xlabel("Predicted") + plt.ylabel("True") + plt.title("Confusion Matrix") plt.show() - + # Compute accuracy correct = sum(np.array(all_preds) == np.array(all_labels)) total = len(all_labels) test_accuracy = correct / total - _logger.info(f'Test Accuracy: {test_accuracy}') + _logger.info(f"Test Accuracy: {test_accuracy}") return test_accuracy From b448425a12f1a879a36824229cb5e7ef9d6074f9 Mon Sep 17 00:00:00 2001 From: Richard Yu Date: Mon, 28 Oct 2024 06:20:35 +1000 Subject: [PATCH 3/8] comments added --- README.md | 3 +- __pycache__/dataset.cpython-312.pyc | Bin 3024 -> 3334 bytes __pycache__/train.cpython-312.pyc | Bin 5345 -> 5966 bytes dataset.py | 47 +++++++++++++++++++++------- module.py | 4 +++ 5 files changed, 41 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index b74eaf94f..99c03088a 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,8 @@ - Learning rate: 0.001 - Epochs = 25 -Model is trained using 1200 images, validated using 900 images and tested using 300 images. +Model is trained using 80% of the data and tested using 20% of the data. +``` # 2 Model Evalution diff --git a/__pycache__/dataset.cpython-312.pyc b/__pycache__/dataset.cpython-312.pyc index d811e77f981b8a366694222564897ae1318f5eba..92364fe500afc9848d6c277946eeef746add743f 100644 GIT binary patch delta 1060 zcmaJ<%}>){7;n1|+O6G(3yiUWW8a&e@B;xi8JiLaVgjD#l4Z&Jx^#gpp^G8YWb>lC z3C=`Bjc~(^1BS$S^yI}$OEHw4JQx#iBk~9Mw%d>q&hS6c@ zym>c}c}T4qdhw4R{1QQ^8hOx+wQ3mO*9c9Fn9y>qW3tA)bJ)+)L>YI#X1{71QE&Q*k?K!V_PBzl%K^zX8U=X-~8+d>h z_y7j{+kqO@v?nH!!W_@8w9-y};AHmsskT)DcYB3!GBw@ik@jUZpdQ47yVGib-?2sG zvs0;PJT)6j4aJj6kt{5w*<6sQtC#ZmLY+cwj=P^J&QBGs zck_kxf>ViMa~UVQn9e|M)LFDOi3jNg7NiSoKKGZhv^*&61gvVbFYuY&b!|5>&M0Xq zWJ`0fP^Sw>c+#oZT%oR7fVnz}vQr#eglKG8p_FL|Bu`?qWXjSQu5#SW$szErXN&i&;tJe^>_>* delta 698 zcmZpZx**PXnwOW00SFX(<6mm&cd zk*o*GtY!phWMD{jr~(?1>IfpEoPcVyQ*BcnQXNyB(wSE?LDa-B1Ld5fT%ufo@>;3R zDUvBtYou2LML{6S4J_&c6qP{|bq9;OqKSF{MH$mlWK-l)=Xrdr{?jo1m1r^H! zkyp6<8`y913QQ25SUkgga?M3S<;%P(4O}<5`8rA_CeKixoO6+1=`y!+1IJAs{s~&_ z-99s{F7l|Z;JCYmd zz6r$(iZ(bNFuV{Dd?7maLT1*5vhoiM44zEhOkWrnIDMEJSa0x(O}@vY!jcK}Dga3a B#}NPk diff --git a/__pycache__/train.cpython-312.pyc b/__pycache__/train.cpython-312.pyc index a2fa2cbd618da196909268f156c186a9f3b6e031..03ca16915a8803ee4f7ac9989bfaaa8936d2655a 100644 GIT binary patch delta 1523 zcmZuxT}&KR6ux(N{&r_)X4xNf`P-Fkp<5~~EtpENX+;BUsny_vq?*l?8J1n9yX(vV z?RGXT5mF>Xi1!79O^ERULSr<2X<{0EF(zsZja$2!)WkmWU_zwEgeULac7koXll#p% z=YIEo=iGZ{A0~cn7k?H6FTlUkpEso=(l_FVE}r;qJa&>ans-f7X;5N6bw}vi0F*e? z-o)2)!l*us_glGkFgLG+NXI{<+TSitgbK%#=#KJ3$$ z&*C@m+G<3wglw3Q5ktjk5B4J1i|+R<%u>MenDvi&09er3rO4x*M2pQfB3TTgZJ`!5 z3894B;uNZg=_^2?EjLES5*2oZy1_UrA0!N4*-yTxmy0; z^h=VoFi!ywP^!;{3-Lm0g9kJHq~sNF!{zJ&!4h2fmO_WbXr1D{$?wI@!;WV5C^?>b zR`J~woGc;u-+U+fLx`sK_^&uC_uh*1+?|i_F&sX|uj66k(otsqkSase=MA0uy`bFEVsX3dG3k5Z& z=oHqlsZ4G(k9ygbZrfeZOq;=q6S`_s1uc)gOOIuk%TMIYjH#(Mqp4}UgH0J@`Kd$@ zjk7~OUc)J*CS`3xHP9Wli}k7`wG{e`jl&?S;X)|RUE$xsqA@&-xC^awZR|D!g5hgt zP??+U+YW%*<}Z8Ke827Mx*P7A^H#XveXi~>1vu*{v^X3!^|@2;Kzf%v0S5YXK9K+w+K(W{Rr~Bv5 zuS)l*BgDfqb5mvN1;?H&Q?$>^6J_e)lZlx@V&Jti$v=7jwZVTAoaf{=z_4rJ zCI>%=uZzhD{ZoWaHhV_M7k{hh{eqJ82sXD^3_AI|QHNwDNE_-$2y_r=B|!d%^rHlh z5$Gh)hJKft&$kn^ivVd_+im0wn;lbS^P*hPNq6e666hw-Ll~}H!KP(hmoMvwu%8h0 z*GQN*Rl`had0A0)eUPxvqLjZr+(*oQ0s{ozB5)2Z`V-N&v1yAcxlq|uQy24!s_El6 zIE07s4-ijEg_@s&us)QSt&CSq0=lzes(##`2z5x^p6kWX#Ee7W6WTK!D zAZK86W?ml|YtQBM7&;d0gBQ@1U=((tmEc-SGhvZ_smiW{9c%)fD<-DJJl-ORl) zW}T%0@u4A*4fC=f1q(t`d}*6L6be;AuuzZ{QNpD!zDU!jKtw@(@Jw`RaW3bZGc)IW z^PQQw4|YDa%)bmnN6RRJ?`Xc}GzZpSy`r zL7^yZum!W8^@(z|O7T&2?kYlO1z6JxI@Q>Wm=LHAU7Mm8G#W}*^Y8>l0-}g&6~;ms z#xf04{TAf%R*29Dn>RmVy3``7;C~`D$?DuS39+VF{E$eMq?Cua>nJHx5imqmKfay{ zKyigR5Bjg8??RO^hJDjbTtpp4Nu^+bnSM(pG2OaePC|~!9~KaY0zOpzzr+N)yP>vU zs_0)ogAIi!HL0GqD)j&LNu{DFZ$PysHE5LT7uyj^cYuT~EFNma2I~uLWX-XLZWY$e z_q7O>Obquh_k#o#4WSm)i99?dU^I*{d~?=iaTrZuOJmE*cBZIXaUG*-DoP{}5lCa1 z3)w()oeX=eCIlAqzC5;^TlO@|Zba42XLo55ds`;C3;09vO^@9+QOcEy$y^I$qqmJ#5 z+qocg*|pC*ZV;M&*USEx?>d3xjb^6;A(IPan-F`NB!I^<$PP{y_8re3akF;PA>;tx ztD8@2w(E}Mh?DdJox7ZHlk1UvJYqkGE)G2$4zjf>E8YukU^Y83P0eimASeEk z13#TW^|R#b(VdwL+01U#4&X1?Kecguim7#ri7h;e-|qTuKcY2o5#e%PsXZWL(sg>t V751bqZ8qcB+6ml Date: Mon, 28 Oct 2024 06:28:52 +1000 Subject: [PATCH 4/8] comments added --- README.md | 7 +++++-- module.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 99c03088a..7fda88f02 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,11 @@ - Learning rate: 0.001 - Epochs = 25 -Model is trained using 80% of the data and tested using 20% of the data. -``` +Model is trained using 80% of the data and tested using 20% of the data. + +Total training images: 21520 +Total validation images: 7200 +Total test images: 1800 # 2 Model Evalution diff --git a/module.py b/module.py index 378e8c69b..3548425ce 100644 --- a/module.py +++ b/module.py @@ -170,7 +170,7 @@ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): def forward(self, x): B, C, H, W = x.shape - # FIXME look at relaxing size constraints + # FIXME assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." From fed2e9c2458b6b07e3c79608425e0f4e2be86b2e Mon Sep 17 00:00:00 2001 From: Richard Yu Date: Mon, 28 Oct 2024 06:37:53 +1000 Subject: [PATCH 5/8] readme updated --- README.md | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7fda88f02..312af519b 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,56 @@ Total training images: 21520 Total validation images: 7200 Total test images: 1800 -# 2 Model Evalution +Following code is used to load the data and split the data into training, validation and test data. + +``` +train_dataset = datasets.ImageFolder( + root=r"ADNI_AD_NC_2D\AD_NC\train", transform=transform +) +test_dataset = datasets.ImageFolder( + root=r"ADNI_AD_NC_2D\AD_NC\test", transform=transform +) + + +val_size = int(0.8 * len(test_dataset)) +test_size = len(test_dataset) - val_size +val_dataset, test_dataset = random_split(test_dataset, [val_size, test_size]) + +train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) +val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False) +test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False) + +``` + +# 2 Libraries used + +Following are the main libraries used. +timm +``` +conda install conda-forge::timm +``` + +torch +``` +conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia +``` + +sklearn +``` +conda install anaconda::scikit-learn +``` + +seaborn +``` +conda install anaconda::seaborn +``` + +numpy +``` +conda install anaconda::numpy +``` + +# 3 Model Evalution Final accuracy: 93.88% of unseen test data is predicted correctly. @@ -18,5 +67,5 @@ Image below shows the accuracy using confusion matrix. ![Alt text](images/confusion_matrix.png) -# 3 Training process +# 4 Training process The train.py module trains the data. Data is first loaded and preprocessed. Then the model is trained using the training data. The model is then evaluated using the test data. The model wieghts is saved in the model directory. From d343a20ff7a61d723bbfc9f31931d3f84371d464 Mon Sep 17 00:00:00 2001 From: Richard Yu Date: Mon, 28 Oct 2024 07:03:40 +1000 Subject: [PATCH 6/8] module changed and final comments added --- module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module.py b/module.py index 3548425ce..a128d6375 100644 --- a/module.py +++ b/module.py @@ -170,7 +170,7 @@ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): def forward(self, x): B, C, H, W = x.shape - # FIXME + # FIXME assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." From ad9747ad1a4e600c449c9f9dba2d854f1322123b Mon Sep 17 00:00:00 2001 From: Richard Yu Date: Mon, 28 Oct 2024 07:03:56 +1000 Subject: [PATCH 7/8] module changed and final comments added --- module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module.py b/module.py index a128d6375..e09f6b006 100644 --- a/module.py +++ b/module.py @@ -26,7 +26,7 @@ import torch.optim as optim -# The model was mainly inspired by https://github.com/raoyongming/GFNet/blob/master/main_gfnet.py +# The model was mainly inspired by raoyongming's GFNet implementation _logger = logging.getLogger(__name__) From 604c608b5c220bf2c90d2cfd99e9a1965bb15e8b Mon Sep 17 00:00:00 2001 From: Richard Yu Date: Mon, 28 Oct 2024 07:16:52 +1000 Subject: [PATCH 8/8] loss graph added --- README.md | 3 +++ image.png | Bin 0 -> 22159 bytes 2 files changed, 3 insertions(+) create mode 100644 image.png diff --git a/README.md b/README.md index 312af519b..449da65f9 100644 --- a/README.md +++ b/README.md @@ -67,5 +67,8 @@ Image below shows the accuracy using confusion matrix. ![Alt text](images/confusion_matrix.png) +Training loss graph is shown below. +![alt text](images/LOSS.png) + # 4 Training process The train.py module trains the data. Data is first loaded and preprocessed. Then the model is trained using the training data. The model is then evaluated using the test data. The model wieghts is saved in the model directory. diff --git a/image.png b/image.png new file mode 100644 index 0000000000000000000000000000000000000000..108cff1f4dc97d6feea9f89cef1d3f34bd4e98ed GIT binary patch literal 22159 zcmagG1z42b*DgL5f{KNLgn%d@lF|(ViZp`c5F(v}q%f3rPVe2L-`3uTM6aL=1noObDGPQacan#CJ-ueE0DxO#0x zWN%bxXjZTl(M~$MaTD|d=EeMRLX6-|moU;*c~|v@f-5qg(v_WK0#ChtK_qQ#7JxCnA@~ zM&I+eO@l2^Scd$5?DzkK_r;Z*%s<2?<>STt7y{BU<=rdy7^ zL>lJx`|12SFWMvnTHC7qNq>4awmez)e7&0G`vI-iWn(6&_TbJnT{5u`BO+U^Tz2*3 zrj4_0u`>k?)S*K~rY)G$6X%12xx|@Pu8uI%ME8C@a-CJw;p(Nm7|$Lg0c_mLfSMgn z06Xs&Yg5@&Y_T#{HPHW(T}(`$g4^s>;HVrN+b*)F9Lco-_#n-WsI(NVYt&Dd3?AhXJwURUXd*i$98vjPj58PL?Tx! z8u|&ytQVI0b7PP8YEMge!0SDcqk-}N5txfJMDJU?n#a`E5+Tb*?feD!4C}9ubDIS% zC}y&H>sC4hAFfs@?XHZNwjZhu)bwB+S5pE-Idk7)_uJ!zTEduAmv4*qIj;5E4rji& ze*hX(=6{^ z>FHFX@koqD`j_Xm_Czs#?9P(uiHG%Yaq!_cLN1A*;>&i)tfc}E?-9xHnoPmJD+Rpq@*55jLh)g*IH+_%?yGn8neMcpfF&cY-} zfRR%@U}l!ft37e8*d4Z1@DE01se}3AN?uBf)Z;{akq)3T|5axkscX_s)4Vy;qM%I4 zV-a>VCwBVy-h4kC!EviWCgcTBmwc5o_-G)%^RW;up5sP3C%^S@|N^^5_pGz98 zp=)M(l*D)jEC&^3Wka%5GlrR)&ENKTrF4Ak&d>d+>e)V*fNVd?<$1V(`BB-K(;@lt zW!TXn=9D+LUOUgvhu!*XPPJM@d_1d$zIzGL3YN<(XyE!WxB#tp&u+Z#%$D2flKyE( zSBi9OZ4tT*TUL9t*|MT0ianY)R1#X@-qNW)!7j~ubdxrUmsCjaAk8&T#@0McPlP%# zx((|6+tp{ppLedt!@S1rroQsIQt(=t1-iA19$KvgtVIJ$$<0dGYpZIY{3D#`c~Vh< zJ~SCh*mz6rw*Eww>>&?N9|jEdFY*Tjl>M6o+1Xi8Trg`FeJVH+Q&m`Ba!!zV@t zhWgOpU;|4xf@{>v=AB9J8Ym-L3;4`jstuaLuBE?7c}9(}EtM{rB9@#r?M+N!KnI!R zjZ`{j3+xiyR?^YY8S2PT&(kr_bD51cwffr7Q(5LtGZDBzm+E`nR7=3Z zg{%fS?A;zx@>+2W&{)Nad7^3xkEpkO*x*z4hbv{sU%!91tlmRB*L-N#9X2_51Sa z6GyHfns0t~^t)PiD~e(Gb#I=o5b{B~rA|Y@-M@znBmGoyPV_mW-!gMK-UaRuDEH)U zee-843@URAzJ}@s6iw~39Ss589rZ8t-XrS?AadB};7Y))&$u%8* zpT_mZ!qP-{<7C8g7wfD#OqXG}S{&Y@1V1T;XNhLWR(0g*)eINsWz|LSTTk?nzOr7p z;_&yj#-PfInPsd~S#I@0&5h@Y($;>LQ^hh$Z(-qM+ZASmKMcY*BC$FTJ)Qfni5+z= zuT?Iw73IrVufA|cZ(CXnn5_8Zcxu34V?t-@UO2Ss84C`zc%3rL+X62u6t45 z7^RvudY{*_zcr%Sd_J?y(6KrSb>>=XU>}=@$8e2emA$&U?pSd5+KVwvhT~Gd$=cc^ zpGoT-uGn;+ik}}Y#$t4Te+!7*>VY`{Q;o5S&q(yBBnJ~W$k9`>@hXN7VRJ$X{`|qy z*JI=F8ttZMdu#!sPz?q^g zD`zeCRE`gc$vOWxcoKLiLvi_H))9lYd3JLIE4($8RImosPMwA5FL}zKIZx^2X-o7{ z^^>3uR;2iEHs|rkLtY#oyCNYYMg#~6EP&2%Y`U* ze!Dk}QBk`~%ud10Exee@)7OQimy3w%*RDaY&Bh{l@4bW@JF>)_P;1Z4oeuH*P`B~h zLWIMozgKB2N+@)sh(#SFB0RIx^%WHZ%;UFDMFpF__>5LoUMqIoVD~)mH^1GNpEY&3 zy{0`x^^SY|mf^(mNE_YP?uJEV7oDhf6^$mNdSoWm?KPcLn0x|p;L=phxr3T(T0RW|Xm z^}Bx3GI3ypBKgH>YUcdemcw*e_jFH7hj;pD=FiaHx`Bn&O*quc-d*Bz+qW&^jmtAm zM>z1%p1-{E$Ys)xF@~IKhxjB%t4Q85HXuut;*NzR+}eSmxjEK}#BKfOMFnSRYwK8E z>lmUa?6Je_^Y~n@$B(<*h^=?JI(K$Q5V4QV1b?TLE~cfY$Gj>*?%8lzraPVKt4#}# z?ky~>Ghfg4YtPHlm#fWS`;#Z;%Z5louDs9Y%3E}CZ$E3k-^+8vy>6b=U8^xu>*w?K z?&S>DkYT|ru_~WO)*pMx9|fvEd**X+sE+o)E-)fL29{2mzwLWE>u91H&$RoOOg~I$ zwLHbDHQwoC-Ce=9EjgLR-@%zabmXPUJaBr_7C;l zcf#kIne(?FFftzeZvW%jr=}5|KSx}i02ZQa zJMhO`@bpB_-S(&cq(d<=cP)0?m-4FG^|U;$c>wU7AnJ~d8d7nbRYpW*b7s#2sMm&h z(XoUPU*kdy@_J(!1U?!-j%?HqK@DXtkKV8t4}C)71-sKeHdzEl~Kts=+y&YxTs zy$_XaZ1NyLqZ)M*xcBrf%*%DJ@qS<<2z#bI%?>|GR;~*>ym5->ct-Z~B8>kcfIZ6W z#X?DoqZLJ3XP-1+7Oea$%ga4;$lgZ`AJ}z}C%2%L`mylia*%#R|@PDThWz zno@W>tqjxq`T3Rd^54K-{UBO^^|sgtNeQ?jxkYBk%TEXV>pypjpdPxcTv|rP@7C?x zL-&)uWoAC~`}FCDw3+}D<#p`Ub5PDxcd)&JaB?b*cD#}4*FK6VG;XmNm}l6oxP?Dk zSXd}OE(gFB0WrnPLm%p7h5ITV9;oT*>8SC6?`q7*bgJZzG~Af9r|Sw1 zvtm`Q0R|O2J^+B~BVCdBFyg32Xk7077V}Yp?|VsZU>Fr?4rQQ3OS~~zj?v;b>mCu* zD2(vMT%3Kt7b<~n>bt`L%qv{S0_R{&99P$sjF|l67ZMS{P*70NUFD3326#e5wNHU> z%*dv*84j9XxecBmZu@=J-D*62%`xx~1UW%S{UIV)42GWThGLn`m{nWBlkyyR0I@rl zNh?R9?3cSj>cEN_&JkP*0+CP-_=%ct-#m||Xl4m^5Yf@m-Nojr2OvwZ&IeD2y;mJS z93&!Pi1L#>yR=U3(e5V5^iS932PR{QBIwC4cY=YaSOvO$=e5Q27 zCX`LLGAdSI%q5E3Jf!9uUA&ewLix1x?Z)e&3j2&vx2?8aDBGsxzkmwt+bbEqG%F?w zma3Ne_*XtO#~4ou*=Tk?%EBa}dN#%~)q14VL?T30QQ&F1Qld@mH>H`?crfIV($p?e zHWf3ZBm^TRmW=Gx>+eP2B^OqUIz+h|>}Okpp@q2CQ|sxeRb=v2U~O-p_N1>h>bt%w zrWWIjaqmu%zJ}agzkTagqAfpV& zC2?D^m_}m0QQFI7Y{h|@Xc8r1gR^nF=~Y|pLNjHx$GJHGe_w55T)ivU2$FH@_g%3FlsV0d8h>`wptlII3dI8+ z7IS|+1@7K>_uV18%_hcJbz$pIU7cA8F05!s}k0N{a^SkR3esl2 z-&Na46yO@tpDIy0gvbPI9w9DbxMjfFUtfYy_5Hw6yf1qQaH zW2*AZ>WEUmKA@;q6xn%MblpAW+1Ke%B~gKSsyiO8HHoOQezw#~aTkTNG$~^Y;ir#> zWBSVw`l`FxZf^Wgl;$$o_`=oxgNN?Lix;z&7TF2i{Y?YN znR~>G${YQB9YBtT58m)m2*RYFPi}G4t{vU;Gih zW@-ix7wEOw0&qZH&wu+6TQ^Bp!9{m^RT;M3unQRs^Ja31SHr*i5oC%}9LdVbt-RPd zOEZP=CUTQ^{K_NUd^)&@O0QLf!}zR+jkLwetcK{$UA)SjoA6yDU$4;MHvyOB!pZUB z%=hfP5MW>3arUfhG!4PkGUzFV_1(q{U~dV4^yr{gL#&QgxJJH3ot_}!sz~&>DRuVY z)XH4^K|(L`bV;Uatu~?h_+u$47+#(YRMTFLPDS_farMNXqWq-!aw}}jwSnF&RS^&; zb{nTQ2<~iAJA!ajT0aMt+Rmt53oAIx#Kg4Wusqnc)uMmW-Tzh?utlYFU`^GCY`#@d zRTV)efyA`Zd2Q7N0%{eTdz-aoXyBCu)Lj*qwzb0(5?09{zxHN2j0Y*->=JeBPY=ct z=QTYgz;&4p7n_&XgO;_Fz)BMi6gw%;&6=;6OV|PtaU|ulD0cq-R!u1aV`JH@=3Kjf zL6w*PR)f^Y6eB`w4QGdFEI%p6(c~SkYO@7c0B%^X5vXKjA zI*g8co@~)TB$F{9E=)Kn<2Urpmol7=dz;MrL4>R4w;)r$dorUA0*IvVRh|~^FO+=k z+kNqTh=kTSfg&qj#rT{wJ5S@4Z&^i0`v`)JE$59RAGQQAftgaKKYH`N45z6^nsmOh z&lF${xS%Wegwm)OnO19=7LeUDbMxv9APsp*3eVdN>uv#NDV#a4{MVJC?)6?-{Cy6!=i$7We#j{+!uf%p1KoRLvtuUB`rv(N2hGY_0HxFIP}Iv74sdZ& zYk~;erh4NU@zju(~1;}A{yE?BkCIvVweWjwi zWJ_6$(EV!q>6sk`!x{*5ooCnO)`jmnEe{%ys_TOUPD_e<$G3CNcA?atacKtvL77*R zJ0`lV=Q^(>x#PMGJO0@if(xyTQw2?7|J<4^z zJkax(1O`iDsQgjZO-}y~LL?`L^;w!3YaHj7w5d){Tti1s(bu|9y-hZUSB`PG?0aZ< zI6dI0LQ+#7K)lJp^}B?rH)PI1c^|jUsHS#_g>grieYc=HfOXe%l$M8ckw%@WTMu|2 z>7bD75`F=xXUKr*e;@(tx+rv<=w033G%tMe!-ad6^PkB%m{F+xmlP-azdPYQk+TtBX9 zt1_QLlO&hwyw2X;Zo~$T!J5GxbRunK&ksYs3Z<5(_J%=&=+!xQGhVx1PQog6nVBV+ zrgVBp8@f6<0GzlnVbEP>RfAeF!7=3CqGTJ8Ex*C>=qwV6{`-vf2PRWjnV5`K)S3RET)e#0+otNIr&FmqL0!V8-E`AD5WPY+;h+m``kS0?C52Eb_PkLBm zr9%^10X-P%x5UC-w<&CKTE~=u4mADOn+e_7Qm3%r?*FEDMkF5JT_E?$fH^5qKhshWnaV>qZBNb9*S&_j4rK2cOx z(0SFWqf!I2i|X7nY}acVgTHD9F9DN~jB03!xJxf0qU*Y|NDqkmLLNG(Gyo7srumPy?FVFFEcy45@6?z_g2S3g2pKs=WdaS@+kFNjol~7U!o^V z>~4l)1&sfuVlzR2)dDjMOBf(_8k3%Uu=*NE&93>Qj#_Q|=ydw!!s0=sm z5pcyFB>0h^K7D$W>nb21pr(C#1UaEfM~k;xJ&(O8Zt)?|R+f(_xJPSp~%!k?->We+7YgZA8}~t-m%7 zmsrvZ3+oJ*T3cE^>NdXXplmWeYqW58ev$}u$D886MfIGbGK;=P^KpvgbNFaW&0j-PU9sA1VJ?_w9di zHnaf5Sw+|Fc>{@2U{r0tmL4%PXa9% z8nH2#P0OI)(cwqTCO|i1?X7LMHq}M@-6?zHo?W+S*B)~_o4E&g$7##YM9J^Et4JjZ zd_evJ{>9Eq=zn5@=OaY^xfribve(^0``TF4sY}^1^Fv-^@cOXTr}LK|TgP$-@qV33 zO$7muccmB#c(BpWon-Gd@$2}mH|qNI+T;PTe=%eWdD8QjpPV%5M3}BdoO?>2egzz> ziw8^eVTsl`8O2Y*zd}^BVt$rG5wWffI?lrKB@Id+yskf)zx={I8t(##z+?M56lV)r z3?pyd;>v?2)s`7_TQK?QeRK4~Vak2knX0xboEwFe-P`{7@a}*{|6h9%oELzV(QSYH zQP`xTnUA}ZRYIR|t8ANfs95#0tgHe#)Lzwftb}I9^jq^$TG`FZa?^*2wERXg zyZ1+5-zM{w<0&lO6rf{&(ni=4FhBGl2Rtqd`ZtjO0l^vhm(p2QCaVD#d&)0@%N)(a zX*V7R7iJ`Bs@pUb;YaL!C)p(3uivRkhYab}K{)K>wR9az3xfaLfcz}9sLQV=Uo#G^ zU>Po#l>OI}OUc)|a}~aEe+md1JM=ed>leZKW-mZ^(7w1a(Jj;$umtbG52ga4JI9e6 zAIoUnfBlV(VC_ap3RBnP;DybT`44X_Mx^}wu0*aJUYco$tHD36=LYuGAE{^0^PNt1 z{?`j%0G$CQQTg^JF5vCaZYRsT#>BT8A3Ncn#`a`hGCE;}Le4>|e0rHF=^D5>u@wb>&c_c+(f$5CJ*Rs} zZ+{WzpI{l_LGKB=g6(ACLV1DxIFs2RbkC%-U4Xm=to_-%mhP+2=z$KaZJT4H ze*Ko>befjyUp>aZ9EpJ7wn7~L3sBj8z{{)dy0^-@HP=yQ`W?Gm)bW6YC8xR6b;kmE z;b_t@CDqarQ!Z+<0Wn%}Z3W4Q`Y+_1cGJ(vg`9F&{?BNz0|5GcEiEnM5gEpKqYm1j zS}>JML=cql3Kihp7CD#>wOh(9YX+rn2FURPj^#P1@q$uolirIO2y%hEW{C904TBC^ zW!Hq)>coDGn8e3`u;R1hVq5h>DV>VZ-OLjOz%Zs)MG;<5X6k(CMM}3Box?# zs{b|lEP!GFRsYz6x8d*|J3G7Y#V38K`n4W~He-Uwmum9zUmiSo@N!VX&y$l3&r;6W z8O{3#1n|mnx5n_a^zYBVIXyXE=}KP6NgQ4w>07dt#KZtx+JO2OMOXq>&;qX8X3@ef zh4RsyPr-=MgH)Zfh35*5=xE$PUNp#$xs20_kf=R0GTj&f(xTbwfHg{x5!Rdy=drwX zpOK!2(^&fuOeM_J&T5`g&>@q^4u~E?sYP6)h_p=FM|e z83`8s+lQ0>?9(9ANYwE-nAyGbvH>&P&`k^(1=mLxhh381&ul|UMQ;~V*^)U?YsWM2 zY3H8Erl#DRMzd=d2O+3RTMQ^ooXl@)fMcju@UkZBqKeF0eKHpY; z`tv=;uP6!$nyXE<4!>+eT<=g2iDZBPc661sZ_8))WmgBMfg;R2GB(QsM&yKcFs4LTuoNc+6-^DnVnB1`sKZ(6qe;{gr6FaA>o| zHl(ll8rQS;;ABZiH@BiTeZA}pKj{dQWqh!Be;1s%NlMD#e!TP#M~>g|7VXtOC2`Xg z7Spe4TZX!PUJN{SWmHkdq}FZArm0B?kD?$>yCfp)I`h~FL(FDIwtXu#xLDlw{`SO= zE6X8PyG0t-JVc_>JT+tPuCc9uHjDWs$u`>W3$Ow0-6 z0xRvE8(7=H*C}*Hbuw)VE~G_oAs_w(bmIOb%>bV-PI_~mM}j7x(Ir0g0gJrR-w5HR z;TjAMz!CC#i&6#8etn&4Dr=~k@S7z8YOE79_Nfa><6wjy;DUlJ>-odYjFk|5O)LB$ zUrxRzonvTs-+bhO;w68%8GpwLJX+_t2W|ztWq8SMTe?4#VqFgdFZk$O)r08cjO;QQ z^M4`g{58-eSyyc?^IIfL70Am*2eZ2PNBp4Op2(Dv_GuZq3y-2X z=KyH;fxYy$uY^6AFrdpW+H0JR|CcOqK=<;P-(js$jg11+gg2w}BoZ`>f5fXCe&>Wc z;GF7vU|5cmZbEs83GCg%vZO&=*rf6V_TWdN%^kAaM)BN0CaV=#1s>?4fxjm)FfcII z6*euxCn4P65Z6f>1Eanj7KbIRrEx zJtwCMgtIJ_$rLRyFDDJ}8v|!?z-ym@%bIM3fwyjpdMx=YfQremAt2niC87x`N3W0n z3<0`={7m%fSd}rTYIE9-i^D%u-n@N-ZO&m647IilK4-hb|Nr#&pd2TeSF@`vaJ=1P zO}rT5N!lTh{SQPD0JI7wuh6LJCZr;Qh*wZt8%+70t{DGF?07p=cz^suDS_ikT!75Y z8`KO-cDu6myKZ=d_x8U#`Q0|sg^Y%wp&?M816A=RpxR;VNmt;SIhL=5{Fv$wjEd z2te1F+OAMFUrik6lA$O zF-Sf*ROaw|lxO6Ba}m9@9D(f~@3p&zgd8Qv?qZ}hI%3Q_m$pt#TJnzQqBmkaLeR<4 zB9K4gPYf-5mtwjmwf^}FVWhp#(8pnoxhN5E#tSPw|NAl54}bRU0v!0XJp#+HHLkDV zqm++t6W02$dPEOr3Q3~CC$&EK^GwFoH$7e0^fo>to&xC0(BhS8)&EM7r@o;j^+8~^ zLRSw_`+-6Y9|Uq-CO(o`5syMsGN~YB?CgP`l1#B3YAj^9(LfEwWNK>Ka)C_e)q70h z$u?urEcIxbZV^HEgxC0@pz;QGcg3<9QN7{gu+*WAb@o30m#o|xXoW%0%BtHAu$DaQ0fDHe!^#1E+9f`23~^x-o2mw-*`oj zMCCk$tg}*iEol?S>;t?%ejMV0=5h<9W4%Z@44~4}l9G~G)`2cghCx1x7AVVz7VCUd zxRMMn`vnk2DvWH+WLW>Bi41emk{BSokUIe7E#qd0vD@BufJ6#LjY0P~&AT6{UfY8l z`my5r*C8WPh(}5~0dEQiJmZx+9CcwB*Ckc}0eN%XDb6Q)V9iypId67M3Yhk(7eP6X z?Q$P-zNMoha)1A=>Io=nOar$WxxQ|@Fp!@}ba=n{Wa^bfr9 z_k$Ps3OJ+$_gyWQ?Jj9!fZ$Dte4!#GRHv#qJvm%avn_h(v_NooVD2A?s4KWJ>)xZA zzg7vn3@do`xT8t3+8;ckrw1LL(?Dy$vb0M61-NGU%QL`)qRqD-PLq+!1->b2jY0)V zC(y@hV$f1-n4M*9?N*a`;c*)p$on z%RLVP_oEE-oW4=l;8vPT${nDZ?(TFy_P9%cm^kZ}FuMU6ts0k)nrsG1gen_0aP zly1NXRH^&D0h#OjdFiW(c_uh}9M{a+i$HzQLb+9JFZO{VysVYNdU9I6jR=or?vY`; zV8sv1LSauy8r(WPFQ$$4(&onmEXCHcXKY224i->GL+&J2uyo_iShxz%`7 z;oQ7(Bmg@$4mT6G1i5ggX3Nqy5+w@i77T(}{_V?6QK-&Mfa~m9TKv--U+1{FU|bjo zA;k7G;IVsOr`U4^xP`^{D}d0^`>Nb^>OFgt@-`dXi6$sq%LNaQbcteygyOv-P=-^b z8l zd5=O6ey`LI_-My~A;|q_QhJM?|M<=+*{Ko-Bpv-beUofa#P0e>QOp-Vzjq#jlH59T zLU+PTy4rtZOR!gOg*Qi{$DvV4vf=-qa$G1GfjBRdwip?}MLDdFYT?!oX+tbq^Xl59 z0P;4T46K|45e`x@3)rRpX90l^F=g|o8~2TXY`PFoVFz1@r>8dT)Q~y_>R5(yg56 z>4Sfqxesx$l@o)1hkAN?(m?GH@=oyk-Dz@Ukm~UZjTpMWcNtglX0pa#Sj4p|i!MEX?55PLY!?foDoZCDIv<@8|-S^_fnd#}5=EvFcria2a zVXD&r)-_T6rB8UmgIh zFdL{y*VLT-`AaNsttGKk&cmPp56w9L@s@_0K*ho+;=0YTwAL0UpbGf(wu79UoJ*Fw z;Smurw3!2o57;_{x4sBt=Tl}k2-)t2QsNR!O`S*jIV~kPLM*`Ba9}}*INYWu<5$XXGUbls3Zh zk>kd!F8Mlk{LVl|M$GY-r_oTc5SKMtJe&Vu80TqM_~38D^HsFO!P}>$c&`caGphTK z=ydW{mncUXyrHU^_zHN&!+~uDsEVUiZs40UQrUpd#qNNs)6`zaU0IGZYVINBOol3R zwhz5U`P+An-AF2{YmACO;ELr0hrJeWV8{7Q69#ouVtIl@F8tj*DTCvyIE$bG{)q_# zWB-!cO54!44jT}&2kWET{u%5sCiD~(wPt)1w?JY#hWEV2pam5$FLnz|x$@C7II{S| z{q3Ixc||0ZZa>`oK}n4p$5`J6jcV2m^F*-a-b_s@e~H#E9Kz?@QeX&xA8EpOw3Oom zkuKCr3(yxcN!Q}wXMnI3bxvg2H^&E*aq0OFDLmT*DE%EP!)Ez;C{(i~YeG|_F1EQ9vw5Kq?IXfvYp!^a?%ZNXO4!5twJpe3E zQ#;5x4%{Pfb1y-bd@T<`x&zzB5}<|)tLylm^k|Ug3=-!;I;opCZ$fNeBv3^RCVY(5 zan$}S<@y`b{YAIXPA&oew^6&`J7S z2?n~Xk(e0Am0>loIpYx|4a&{Z($h-->?MT66)f+)AJ7o9jMxI z2ynv{6%{HfDs)XvO^_O_sHkY=MO(t#Nur@LM52ykpIT}9cd(Hsbo?A2za)@RYUSwG zU>$w0^MOMubep-oDvx z>ylr3lkE0A%hJ{0zp+lkc#e?&KZTPIXi%to1SMQ?o+k(YG(zqU$2c1H9OLsZ{vrAf z29IB;ker&WV)`Z=Y5Vx`V-rvt zBQUK%>3<*c+;=av!r<$i;t$_g?X`m~B`woUp~lT&Ob|Bs4`s4&0#T9v0ynV5fh*U$ z()=>@=u%jzU-wwm?TD~uN`F2aAg6TzJj#N~WfBJYpf^+XGJ(`0t%7cq1o{@=^Nktt z)iEeG zWZp(K{vsAK8v9rBv1JD*WS8pSaQ=`hWKx1R;`(R{NK=UbPl?E~NUMmWU$1dBK~SCZ z4dNRAGaE9T^KO~xVZ>hT*lV1ZaR!ld+8d-aCGPJ3oUbr7{+>_Zo^9ZLZr&wl6FZ7y z1dz293e;YQ*Rj__VtBtq$SIl|zYru=5|wR_gjJ9xV}1SR_CRzoj{ zS(o9OM1m%}DgH_|=ew3XHfC^uydLEZ?Dfqfz!z&(sFlv%<-iRtD;PqTcsRrJ#9ju7 zz8)|tB!UdT6^_59NAuXKG^YK z_Zb~r?BAqQJ$&>S*S97@cX6*RK}MOSKbk|)`VY8jJ8)_tFZ&x_ za}qSlO`RyFn+wFAa$HVLWJ`9qTH3h0w`fvG>b@p~FM6L23oE0WU7lfW@C@PaC)Rwj(YpDn|U8RH>NKmow^b&fR4J zw~teKi;mO2rn_%6=~y!zqN*(eDA%LiKKj;qsO;TvZ~F|-r_Y3uo;qWaKHFxjfKMae z7*;vxC=(kg0vCv{1I@PY^#x)NC7nLWyyY?V>I$W2?fd;KXedeHKLdJqLAQ3TfLSrz zvS-Avf%52Nzu$Rt_}3LAr1~S&18)`atLEq0De(dW-wpy1aJs#^x49Wfi6h+!!G4l) z4nUbSeTa-_agOWzNhP-ZQ`W@*t%OUV&fk}yv5n&1ALA=88g)ne&F%+BXU_~N4Mao8 zl{~R2!q_gn1nd20e3+BBrTqU@2Nz9WvOIhkM9QiiwN&N2 zW@Gs-#kpaKvGH7Ff>m>eCrYpW*qQz(RGEHqzx8moH+W6)f6kU(t^t}V7!;lwfe^z= zVP$$|M&|YFZ_DT!IEcnptm9jY1F_Q`Ysase2lgx(r#!maTKA!e+tZf#Tgbel_`(G_N*SA zf1-`?agV;DTw!+wCBJosTIa}PmfFOP+*>z}zuQGg(n?rAel&|9CDQ5xYl zuoIH`iQ@p{tYtS)oTj3z^BUi1LvS*?px`h|7l&m(3w$c20q9=Jr0X^u4F)1Me-XPs z5aiSm1#ZOW6zNCxkZvEimn08?|A{5$0DFV%^{Py+LRybw)bTF72<$u70Gnw|nwd57 zicQ~DIs>tV%R;>br*ScQ(9mT2yh-Gtsm*^{cu=tKA4AsXcJK0by~G>LPH)jbDVYGAeFga@x^Azl+Al zIQn(Ysen9F=(t0x;kag+*wbD;GVpMzpW%s(NGSf2kGM$eemJP+4|$-%D{j_xzOTSV%$hs1mYi{_2iA;k+VyL{K@MN zIp4Wg*J4mi-Hat!4|qRu?ts2zlH+=8d+t~XRr$tq@lWcrZmzU~ZCPtNkirFrRyRyc z$iu+PDF3@Kmd1WJW`=q;e}sXuY;I)O4iEs{DY}LW3&dVE%c741`o+Ox@%=Ly*&}T% z1X&5UVa5<7Mml~^fIO!oUfn)#^UaFg}Z4f*meuv^H z&#LUN8+^WytSezn*vpyw4v*xZ5zTBN21Sx$Rn&-136CJqpCo3)00gq( zCsH}MmZy{EY{^p&L+QT$y7Qpf>tj9_49;$Hle$Am-72l4lZt9+( zz59V9#WwHSD7$6ZYH;Qx4*+i*6N;L_r@aoO^@3$%{G=ZW8;X~F+y4r?HBan1Mdr3#bs zuq#)*ht-M=t{8xgH$Vag?fW9stqhpZM!=3-qP>LdQoX&|H+{XxrREObXtvz&K*~Q-&+jAFzxZ9rFH%;?kf;-8?T9?t9ZD7ax_=D?3JQ*4BNvacj8uwf|4ao zYsX5<>RO{lDhoVpZ`gFeV7G7s1+%cs;Jb2C-ZnrNRh6METg_flY2+Op-OIu>5u4mw zw%50`oQA6G_Oj>cja;eO2}h-)4DzdeUh8(kv;?kT1Ke{zccGOv`!6kw9U<@LYgMq( zy{a}j5FM{9B<rzS>47MU-bgwhwm2_#07M}4rb_P)UP1>H-vOvT zqTk;fLe@F!=$Y~%YN*!=cHSA==lxhcH(fB@_R#nYhal%SmRk`YB~|rHtlBmB$zJH~ z+B`lp_c~=8`{~i)oi)A4<8{wmETI%u2Z7Nr`!M>hP+7xinb?71*};>ahQO@x>fXuG zg}=8Wb%|10>&;I%21h9gYG$sY3#pebp2clIj-=Z@L#TEFt5Eu2_$1hw`C8Dsi5KuY zE%19S13KJ?h5e1PJH|+ayH<9(%}m4XeBym-^j2kQip|gc{ODN`V$YN7cgh1x&J?gb zBv@u9kU8W>Xr0a)vpVp0E))c?_SMQ*n`c{rxK{iF4rq!8^lw|fRvWLOsgO$~Q{P&5 z;58W2<5KrR(Sa)K5oXF(k3^6$vh%o)L=Y7iv%aq!NK-Y)iE5CenvDj+w;%B#=hADk@;;1%?XGqC7 z_8$ko;t*Vp46O%?OBT^Lj)jb@I?4=e6Zx*JmK?gL&uqLZzR)$GTO4eDd;6mVVyJ2W z^KSJUEDlKHf%AveCC`gxQ(WaaioQNi>`f7H^<~kX@1HvNnWeKd`6O#$C9OlSIYe5| zk6ZG&rd6AlTeQ!&iPBzQTyy2vC;2JCDuyS8LXRl;X9i`aP*JsJ6Z=JQX!h9E*zY*xN6R6cU->YzO@yl>&E2NW zGSoT0!(4UEQfcei>(5{|Zk;pDuEvK(FiZmK8m$Nzrbx{L+dozwFuGTWn zMf`~Mo@;88XU`^!(OR^;{cETnuZP%0w+Xi+m)R|!@3=>eAHsf}{(O%qpSaNazj)DJ=fy1RyWsYHG9A!rm<1I_GV_H=1A}uTarv;XGXR$i+va7 zjG+^)>@Fy?+Or{Flf$n{SVI8n%viCAKBx0^EIHyhR|LJR((jt$h-yQQj~z9jPx&0f z^Ltz|72gM<4UxJ1mf0UsO*_sLJ1Z?1*kNGH67zssO})PU#v7ENNWc26j2e=yGG&Ww z{p)j=-w(6a2w%tU zog`&q*fqfhh|iFIYB8pC{MHcB=i-uQRVp!s&OJ?^n>)hwQ;;=mw%zyj1(MJ@8jFW9 zu3vL|YFv73(B&7;b0c$*TlMI}bkyW|NmO3mu*y6EmQ?dxxsN#U2Zrkw94zZAE$_dH z?CPp+@UIuRDl%4Gio2(A`tr7zZKvJAMdIPyUj4a4+R5t!u~S}l<)?A^^kpnYhl!Hz zisuqEPPbO_x5C{!3L;u)rbZ6e&aqk!4y>u(J+7;7y;EH#Zr8H=5$%3pq&i#eEHpaf z?cg*1Irn2%H_An_;@}H6^6!S^d^RHqqhc8N5{C}hj+_e0sGJ7B&XaSz@)Cjf%_VxW zrAP-xfJ*9fuybmy^Ng3IG%$A>N4jdX+RtBA#GIN0+kKr>?M2><+KO?u?M^X{t+V` zi?8y9JqX=*X#X$U8<1-?8fec`cSv%Rd!whh49I|>4@g-l;GYW%1Cp2f8qZk&(=LY8 zpLv@*pt_h0D*u{br;rsSp@fzD18FSSFCP{b_WZx?P<d5c?+-9YfOMO47Q^ur9n)++gQd0*ZMFwpA3Lta;k4*<#N7|)U zS}Qxj!NHK01g_cz^sMqUx~CD*(dHm(Lo5qOQJ+dn%Ym;G7y}_N!~q}ftsw#*2?Bjr zw$3T|6cSWkeDLT|bN|a*BqW9e#soM);`jp~Pt96dS-43IZRX^%3!vbU<>cgS?jP$> zuQd4W6J6Mi1v_S=8y0r3<&k=>GOHDH)W{1^tL1@!bLdQ#Y@g5Jw;pte9=7TLS9+}lY#h*C;(?3@R_8}mrHI)WPc7kx{-hH6S7L+y zQ0Dr7e>@Ca=H*{t^xL3wL6efth6QXTXBmQS_U6znyhl&}6zuOej<6v#fj$D@vSqUM z2Vw!dx(Sj&x;&gRE(Jas05*q!UHPPb;LBD&esI8%Zu??dNqsk@{gFhDAOHQlJ0x_R zd9@c(p9uud&^iE{G|HvhZeXco5u(=5`q?t7gC@*u*^~4B{jk~6BQVWOamV`0{{JA zzNj{*46WYcVs5{$U&FyJl9r=`trhYlCx=P&j~_oS!GZi}MJve_+622fL^Pn}bAapYkgJ&~BQgpQcY6bT_k&WPpMYRskNa!NTx zmSlDkb8Du@RK#>t7$Vozigg)lxz6RR$>n^$^w;_8{J-CxZNKmL^ZV}edB0xS^0tb< zCwkk*!@|N&vCCkGzbdn=uAZdf`-a#ugphlp7!)x;G(5|=-9FCFflPw+}y+^6kHX(B{UXt3EN-4vjlZW zd$IeDTi^rWtdX_nB`a;L_T^!MtoKcJI(OGV@5D=WqxMYvI3VCATnqa-T$^+Iq;(+_ z3SlQ8`e;i}50PWjAS?N_b5Xh*|K^RD96pptgI~^DI-;J7Cuw5kv_~NG`G7{s(LTBz zPD#DQz$fW1l#mQ(Z55e3PHj1Gg?nJ#7|*Q_a(@q&0ncu(FS!+yM26&dDwFB`eo`M0 zFl4UsVV$wTe;qU)XBR{CbDo<8_|Zbul?)-8Q(`bBlu4=Twnbyr5^1MU*NXL2vd|J! z#v5GI9!UO)=p{Na&5fNQ;Ag*_i~!u}#SGFB%sakR%=XR-H~X-N>nN{kfryg8W=z-1 zrqxoAJ8awmyOO9C(zOA=;Vj^g+c{pQhwI^s*-#a!3%B_W59_eG8iy4wGHF{^0^qEb z6Rz(E+}!kdABvb4#zH)*S=;PT75B&CB^o;ZDR@qJu)!`-uMeT?^!eH zLcNX4Va}2Yy(yo&L=I5LUZ|*}G$1eJO?USgyxqA{dZD>fr1#bdoC6Tl9y$91HJ9bq zi%eFT9TJ)NbrrewtF188FyHMeKCZ^c%gp?>zH1#=o5DD2(c0WBFgCjeg*UxSJD5Lu@h(sS4KoVy4hYQc9GI{eYtZTM_ycqe_D~6j+$>+9 zDrubI?JMC0$MIjvW+CPz#n@AH{+yq3KN7f( z=4bwqzB0Cg2o^S3tzbYS=p#{)%Fvprlte#WvSbQ0b22+e$4MJoTRISceM&GRp-KVM z%Fm>(qV)Q&R{e7RqX;BQXuYh79fYzGE^X-wh$%X4+Dp5O^Y!%njQDj60M;gGbBiXj zsNEjf;A`m3QdQ}q?C6t9>A;6bNNqU@5m$qRCCtKB*#3Vmjw*VbM7eEFEvk+a>UKI& z71u*&=>i#-aIXW!g)oM78S^9e|09fqm!D6bKYM%=vvj6G-HrgC)!-WF@Lq&-+ER5W zVQK2#=?{|)D;s>^@Bbcj!4_*(pCd<(B!o;&$WWhg*akww@mDf3)+6pEfn%4ptv~C* zlw;;DJ2V=GvWQeDWWJJ(lPcg&H$_O9fi0%N-3~EGk^!}L$b+SSn0*kuC@&O2@C#m9 zzq2-5E&#v<1<~67+OLX}<)%sC+J}MA1KW*GFGoQnSaU4wxh4uBSAgEGFEhC0x?Ay) zTj5HjlATAi{*ZGZ*Qu0Uc9uR2fn@nn7E``P zQ!{{opE|lS1x^iEWE;K5t(g@uq9z`}ie*P3T?b?TP^wG;eoA{9?GbV_@LrXZ#Xj&2 zPeOxu^+vcE=dZaCiJKvuuDP4(*x{@f*!J2KZYCCS_9%rMu(teDJl0%$v~C0W28s{& zw=l6Fae0U&il_FnB4I=>xulnU4ML-y-5f7HBsmGpj;PC0#k`}z*a~A+XS*rdpW(9f zZOh(q2YR<@S^ogHCbX`n8uXY(A);L!wfU4w2`IhL%!?3_@&)PcKBT%4i?4%a{Z;Lj zW^fcYmM0s&IU~!vnK`-y^F;n^5RL>i*{e$UAnx!qJ`oHDj3xi|Cs9Vw2pF^OZr-