From 0e1bf50129aaae3f5447bb8dd801c7aaafc35e39 Mon Sep 17 00:00:00 2001 From: Tejas Kamtam Date: Thu, 27 Jan 2022 20:37:23 -0600 Subject: [PATCH 01/14] sauce --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index db4f44e..e7c7759 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ __pycache__ .vscode - +/sauce/ training_summaries From b6d058767124fdc51aa918f424f5c04097a7885d Mon Sep 17 00:00:00 2001 From: Anand Gowda Date: Thu, 27 Jan 2022 20:08:42 -0800 Subject: [PATCH 02/14] Updated StartingDataset.py --- .gitignore | 2 +- constants.py | 1 + data/StartingDataset.py | 21 +++++++++++++++------ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index db4f44e..a2169ec 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ __pycache__ .vscode - +sauce/ training_summaries diff --git a/constants.py b/constants.py index 987c6c9..6bc6fee 100644 --- a/constants.py +++ b/constants.py @@ -1,3 +1,4 @@ EPOCHS = 100 BATCH_SIZE = 32 N_EVAL = 100 +DATA = "../50whales/sauce" \ No newline at end of file diff --git a/data/StartingDataset.py b/data/StartingDataset.py index 232485f..a4d27bf 100644 --- a/data/StartingDataset.py +++ b/data/StartingDataset.py @@ -1,4 +1,8 @@ import torch +from PIL import Image +from PIL import ImageOps +import pandas as pd +import constants class StartingDataset(torch.utils.data.Dataset): @@ -6,14 +10,19 @@ class StartingDataset(torch.utils.data.Dataset): Dataset that contains 100000 3x224x224 black images (all zeros). """ - def __init__(self): - pass + def __init__(self, path): + self.path = path + self.images, self.labels = pd.read_csv(constants.DATA + "/train.csv") def __getitem__(self, index): - inputs = torch.zeros([3, 224, 224]) - label = 0 + image = Image.open(constants.DATA + self.path + self.images[index]) + label = self.labels[index] + + image = image.resize(224, 448) + image = ImageOps.grayscale(image) + + return image, label - return inputs, label def __len__(self): - return 10000 + return len(self.labels) From 1020b8d95fb29994fc3271d939beb2135f6d5ae8 Mon Sep 17 00:00:00 2001 From: Anand Gowda Date: Mon, 31 Jan 2022 19:48:13 -0800 Subject: [PATCH 03/14] Fixed Dataset class and implemented basic CNN --- constants.py | 2 +- data/StartingDataset.py | 15 ++++++++--- main.py | 13 +++++++--- networks/StartingNetwork.py | 42 +++++++++++++++++++++++++------ train_functions/starting_train.py | 32 +++++++++++++++++++++-- 5 files changed, 86 insertions(+), 18 deletions(-) diff --git a/constants.py b/constants.py index 6bc6fee..2b910bb 100644 --- a/constants.py +++ b/constants.py @@ -1,4 +1,4 @@ EPOCHS = 100 -BATCH_SIZE = 32 +BATCH_SIZE = 128 N_EVAL = 100 DATA = "../50whales/sauce" \ No newline at end of file diff --git a/data/StartingDataset.py b/data/StartingDataset.py index a4d27bf..f599237 100644 --- a/data/StartingDataset.py +++ b/data/StartingDataset.py @@ -3,6 +3,7 @@ from PIL import ImageOps import pandas as pd import constants +import torchvision class StartingDataset(torch.utils.data.Dataset): @@ -12,16 +13,22 @@ class StartingDataset(torch.utils.data.Dataset): def __init__(self, path): self.path = path - self.images, self.labels = pd.read_csv(constants.DATA + "/train.csv") + self.data = pd.read_csv(constants.DATA + "/train.csv") + self.data.rename(columns=self.data.iloc[0]).drop(self.data.index[0]) + self.images = self.data.iloc[:, 0] + self.labels = self.data.iloc[:, 1] + self.transition = list(set(self.labels)) + self.whales = self.labels.replace(self.transition, list(range(1,5006))) + def __getitem__(self, index): image = Image.open(constants.DATA + self.path + self.images[index]) - label = self.labels[index] + label = self.whales[index] - image = image.resize(224, 448) + image = image.resize((448, 224)) image = ImageOps.grayscale(image) - return image, label + return torchvision.transforms.functional.pil_to_tensor(image), label def __len__(self): diff --git a/main.py b/main.py index 820f0b1..c3ef5d3 100644 --- a/main.py +++ b/main.py @@ -1,24 +1,28 @@ import os import constants +import torch from data.StartingDataset import StartingDataset from networks.StartingNetwork import StartingNetwork from train_functions.starting_train import starting_train +from PIL import Image def main(): # Get command line arguments hyperparameters = {"epochs": constants.EPOCHS, "batch_size": constants.BATCH_SIZE} - # TODO: Add GPU support. This line of code might be helpful. - # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + # Add GPU support. This line of code might be helpful. + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Epochs:", constants.EPOCHS) print("Batch size:", constants.BATCH_SIZE) # Initalize dataset and model. Then train the model! - train_dataset = StartingDataset() - val_dataset = StartingDataset() + data = StartingDataset("/train/") + train_size = int(0.7 * len(data)) + test_size = len(data) - train_size + train_dataset, val_dataset = torch.utils.data.random_split(data, [train_size, test_size]) model = StartingNetwork() starting_train( train_dataset=train_dataset, @@ -26,6 +30,7 @@ def main(): model=model, hyperparameters=hyperparameters, n_eval=constants.N_EVAL, + device = device ) diff --git a/networks/StartingNetwork.py b/networks/StartingNetwork.py index 9924108..5032931 100644 --- a/networks/StartingNetwork.py +++ b/networks/StartingNetwork.py @@ -1,20 +1,48 @@ import torch import torch.nn as nn +import torch.nn.functional as F -class StartingNetwork(torch.nn.Module): +class StartingNetwork(nn.Module): """ Basic logistic regression on 224x224x3 images. """ def __init__(self): super().__init__() - self.flatten = nn.Flatten() - self.fc = nn.Linear(224 * 224 * 3, 1) - self.sigmoid = nn.Sigmoid() + + self.conv1 = nn.Conv2d(1, 4, kernel_size = 5, padding = 2) + self.conv2 = nn.Conv2d(4, 8, kernel_size = 3, padding = 1) + + self.pool = nn.MaxPool2d(2, 2) + + self.fc1 = nn.Linear(8 * 112 * 56, 20020) + self.fc2 = nn.Linear(20020, 10010) + self.fc3 = nn.Linear(10010 ,5005) def forward(self, x): - x = self.flatten(x) - x = self.fc(x) - x = self.sigmoid(x) + x = x.float() + + #Forward porp + # (n, 1, 448, 224) + x = self.conv1(x) + x = F.relu(x) + # (n, 4, 448, 224) + x = self.pool(x) + # (n, 4, 224, 112) + x = self.conv2(x) + x = F.relu(x) + # (n, 8, 224, 112) + x = self.pool(x) + # (n, 8, 112, 56) + x = torch.reshape(x, (-1, 8 * 112 * 56)) + # (n, 8 * 112 * 56) + x = self.fc1(x) + x = F.relu(x) + # (n, 20020) + x = self.fc2(x) + x = F.relu(x) + # (n, 10010) + x = self.fc3(x) + # (n, 5005) return x diff --git a/train_functions/starting_train.py b/train_functions/starting_train.py index 9bab2a9..743c22b 100644 --- a/train_functions/starting_train.py +++ b/train_functions/starting_train.py @@ -4,7 +4,7 @@ from tqdm import tqdm -def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval): +def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval, device): """ Trains and evaluates a model. @@ -31,6 +31,9 @@ def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval): optimizer = optim.Adam(model.parameters()) loss_fn = nn.CrossEntropyLoss() + # Move the model to the GPU + model = model.to(device) + step = 0 for epoch in range(epochs): print(f"Epoch {epoch + 1} of {epochs}") @@ -38,6 +41,18 @@ def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval): # Loop over each batch in the dataset for batch in tqdm(train_loader): # TODO: Backpropagation and gradient descent + images, labels = batch + labels = torch.stack(list(labels), dim=0) + + images = images.to(device) + labels = labels.to(device) + + outputs = model(images) + + loss = loss_fn(outputs, labels) + loss.backward() # Compute gradients + optimizer.step() # Update all the weights with the gradients you just calculated + optimizer.zero_grad() # Periodically evaluate our model + log to Tensorboard if step % n_eval == 0: @@ -45,15 +60,25 @@ def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval): # Compute training loss and accuracy. # Log the results to Tensorboard. + with torch.no_grad(): + images = images.to(device) + labels = labels.to(device) + + predictions = torch.argmax(outputs, dim=1) + + accuracy = compute_accuracy(predictions, labels) + print('Accuracy: ', accuracy) + # TODO: # Compute validation loss and accuracy. # Log the results to Tensorboard. # Don't forget to turn off gradient calculations! evaluate(val_loader, model, loss_fn) + pass step += 1 - print() + print('Epoch:', epoch, 'Loss:', loss.item()) def compute_accuracy(outputs, labels): @@ -79,4 +104,7 @@ def evaluate(val_loader, model, loss_fn): TODO! """ + + model.eval() + model.train() pass From 055b4453d81fc9f54f54dc2893eccd8bbb277c75 Mon Sep 17 00:00:00 2001 From: Anand Gowda Date: Mon, 31 Jan 2022 19:59:36 -0800 Subject: [PATCH 04/14] Updated constants.py --- constants.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/constants.py b/constants.py index 2b910bb..950ee93 100644 --- a/constants.py +++ b/constants.py @@ -1,4 +1,4 @@ -EPOCHS = 100 -BATCH_SIZE = 128 +EPOCHS = 10 +BATCH_SIZE = 32 N_EVAL = 100 DATA = "../50whales/sauce" \ No newline at end of file From ae5bb5c8fa11747a476467ee93f5ac7506ab4284 Mon Sep 17 00:00:00 2001 From: Anand Gowda Date: Thu, 3 Feb 2022 20:00:26 -0800 Subject: [PATCH 05/14] Fixed training function --- constants.py | 6 ++-- data/StartingDataset.py | 3 +- main.py | 1 + networks/StartingNetwork.py | 23 +++++++----- train_functions/starting_train.py | 58 +++++++++++++++++++++++-------- 5 files changed, 63 insertions(+), 28 deletions(-) diff --git a/constants.py b/constants.py index 950ee93..6c5c7a7 100644 --- a/constants.py +++ b/constants.py @@ -1,4 +1,4 @@ -EPOCHS = 10 -BATCH_SIZE = 32 -N_EVAL = 100 +EPOCHS = 1 +BATCH_SIZE = 16 +N_EVAL = 1 DATA = "../50whales/sauce" \ No newline at end of file diff --git a/data/StartingDataset.py b/data/StartingDataset.py index f599237..1a25fb0 100644 --- a/data/StartingDataset.py +++ b/data/StartingDataset.py @@ -18,8 +18,7 @@ def __init__(self, path): self.images = self.data.iloc[:, 0] self.labels = self.data.iloc[:, 1] self.transition = list(set(self.labels)) - self.whales = self.labels.replace(self.transition, list(range(1,5006))) - + self.whales = self.labels.replace(self.transition, list(range(5005))) def __getitem__(self, index): image = Image.open(constants.DATA + self.path + self.images[index]) diff --git a/main.py b/main.py index c3ef5d3..de740f7 100644 --- a/main.py +++ b/main.py @@ -14,6 +14,7 @@ def main(): # Add GPU support. This line of code might be helpful. device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + # device = torch.device("cpu") print("Epochs:", constants.EPOCHS) print("Batch size:", constants.BATCH_SIZE) diff --git a/networks/StartingNetwork.py b/networks/StartingNetwork.py index 5032931..5e10af4 100644 --- a/networks/StartingNetwork.py +++ b/networks/StartingNetwork.py @@ -13,12 +13,13 @@ def __init__(self): self.conv1 = nn.Conv2d(1, 4, kernel_size = 5, padding = 2) self.conv2 = nn.Conv2d(4, 8, kernel_size = 3, padding = 1) + self.conv3 = nn.Conv2d(8, 16, kernel_size = 3, padding = 1) self.pool = nn.MaxPool2d(2, 2) - self.fc1 = nn.Linear(8 * 112 * 56, 20020) - self.fc2 = nn.Linear(20020, 10010) - self.fc3 = nn.Linear(10010 ,5005) + self.fc1 = nn.Linear(16 * 56 * 28, 5005) + # self.fc2 = nn.Linear(20020, 10010) + # self.fc3 = nn.Linear(10010 ,5005) def forward(self, x): x = x.float() @@ -35,14 +36,20 @@ def forward(self, x): # (n, 8, 224, 112) x = self.pool(x) # (n, 8, 112, 56) - x = torch.reshape(x, (-1, 8 * 112 * 56)) + x = self.conv3(x) + x = F.relu(x) + # (n, 16, 112, 56) + x = self.pool(x) + # (n, 16, 56, 28) + + x = torch.reshape(x, (-1, 16 * 56 * 28)) # (n, 8 * 112 * 56) x = self.fc1(x) - x = F.relu(x) + # x = F.relu(x) # (n, 20020) - x = self.fc2(x) - x = F.relu(x) + # x = self.fc2(x) + # x = F.relu(x) # (n, 10010) - x = self.fc3(x) + # x = self.fc3(x) # (n, 5005) return x diff --git a/train_functions/starting_train.py b/train_functions/starting_train.py index 743c22b..917a2a2 100644 --- a/train_functions/starting_train.py +++ b/train_functions/starting_train.py @@ -2,6 +2,7 @@ import torch.nn as nn import torch.optim as optim from tqdm import tqdm +# from torch.utils.tensorboard import SummaryWriter def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval, device): @@ -34,7 +35,9 @@ def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval, d # Move the model to the GPU model = model.to(device) - step = 0 + step = 1 + + # tb = SummaryWriter() for epoch in range(epochs): print(f"Epoch {epoch + 1} of {epochs}") @@ -55,30 +58,30 @@ def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval, d optimizer.zero_grad() # Periodically evaluate our model + log to Tensorboard - if step % n_eval == 0: + # if step % n_eval == 0: # TODO: # Compute training loss and accuracy. # Log the results to Tensorboard. - with torch.no_grad(): - images = images.to(device) - labels = labels.to(device) + # with torch.no_grad(): + # images = images.to(device) + # labels = labels.to(device) - predictions = torch.argmax(outputs, dim=1) + # predictions = torch.argmax(outputs, dim=1) - accuracy = compute_accuracy(predictions, labels) - print('Accuracy: ', accuracy) + # accuracy = compute_accuracy(predictions, labels) + # print('Accuracy: ', accuracy) # TODO: # Compute validation loss and accuracy. # Log the results to Tensorboard. # Don't forget to turn off gradient calculations! - evaluate(val_loader, model, loss_fn) - pass step += 1 print('Epoch:', epoch, 'Loss:', loss.item()) + evaluate(val_loader, model, loss_fn, device) + # tb.close() def compute_accuracy(outputs, labels): @@ -93,18 +96,43 @@ def compute_accuracy(outputs, labels): 0.75 """ - n_correct = (torch.round(outputs) == labels).sum().item() + n_correct = (outputs == labels).int().sum() n_total = len(outputs) return n_correct / n_total -def evaluate(val_loader, model, loss_fn): +def evaluate(val_loader, model, loss_fn, device): """ Computes the loss and accuracy of a model on the validation dataset. - - TODO! """ model.eval() + + correct = 0 + total = 0 + loss = 0 + with torch.no_grad(): # IMPORTANT: turn off gradient computations + for batch in val_loader: + images, labels = batch + images = images.to(device) + labels = labels.to(device) + + outputs = model(images) + predictions = torch.argmax(outputs, dim=1) + + # labels == predictions does an elementwise comparison + # e.g. labels = [1, 2, 3, 4] + # predictions = [1, 4, 3, 3] + # labels == predictions = [1, 0, 1, 0] (where 1 is true, 0 is false) + # So the number of correct predictions is the sum of (labels == predictions) + correct += (labels == predictions).int().sum() + total += len(predictions) + loss += loss_fn(outputs, labels) + + + print(correct / total) model.train() - pass + + # tb.add_scalar("Loss", loss, epoch) + # tb.add_scalar("Correct", correct, epoch) + # tb.add_scalar("Accuracy", correct / total, epoch) From bf8681130fb4e44dc38b2cb2469dc4e96713a82a Mon Sep 17 00:00:00 2001 From: Tejas Kamtam Date: Fri, 4 Feb 2022 01:20:56 -0800 Subject: [PATCH 06/14] md edit --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9cd7cf4..75fe12a 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# ACM AI Projects Skeleton Code +# 50Whales Skeleton Code ## Setup From 0debb55125fa2d710f8fbaa3828c854c37ae36be Mon Sep 17 00:00:00 2001 From: tejaskamtam Date: Fri, 4 Feb 2022 01:34:41 -0800 Subject: [PATCH 07/14] gitignore changes for mac --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index a2169ec..eadee56 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ __pycache__ -.vscode sauce/ training_summaries +.* +*.zip From d25a27527187aa3bf27126e1f8f2c24e6e573995 Mon Sep 17 00:00:00 2001 From: Tejas Kamtam Date: Mon, 7 Feb 2022 18:12:38 -0800 Subject: [PATCH 08/14] jupyter setting --- main.ipynb | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 main.ipynb diff --git a/main.ipynb b/main.ipynb new file mode 100644 index 0000000..da2f132 --- /dev/null +++ b/main.ipynb @@ -0,0 +1,76 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Tejas Kamtam\n", + "# Anand Gowda\n", + "# Austin Yang\n", + "# Anish" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import torch\n", + "import torchvision\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import pandas as pd\n", + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "EPOCHS = 1\n", + "BATCH_SIZE = 16\n", + "N_EVAL = 1\n", + "DATA = \"../50whales/sauce\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "e9b712ea729e402836c5595724b4e6d35efa45a29af25a2578c0523890674d22" + }, + "kernelspec": { + "display_name": "Python 3.8.12 ('acm')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 5c95822aed95fdc0e94e3b1f001ab279e4e2bdaf Mon Sep 17 00:00:00 2001 From: Tejas Kamtam Date: Mon, 7 Feb 2022 18:32:11 -0800 Subject: [PATCH 09/14] Co-authored-by: austinqyang Co-authored-by: Anand Gowda --- data/ImageAugment.py | 37 +++++ main.ipynb | 321 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 344 insertions(+), 14 deletions(-) create mode 100644 data/ImageAugment.py diff --git a/data/ImageAugment.py b/data/ImageAugment.py new file mode 100644 index 0000000..b648b67 --- /dev/null +++ b/data/ImageAugment.py @@ -0,0 +1,37 @@ +import torch +from PIL import Image +from PIL import ImageOps +import pandas as pd +import constants +import torchvision + + +class ImageAugment(torch.utils.data.Dataset): + + def __init__(self, path): + self.path = path + self.data = pd.read_csv(constants.DATA + "/train.csv") + self.data.rename(columns=self.data.iloc[0]).drop(self.data.index[0]) + self.images = self.data.iloc[:, 0] + self.labels = self.data.iloc[:, 1] + self.transition = list(set(self.labels)) + self.whales = self.labels.replace(self.transition, list(range(5005))) + + + + def __getitem__(self, index): + image = Image.open(constants.DATA + self.path + self.images[index]) + + label = self.whales[index] + + image = image.resize((448, 224)) + image = ImageOps.grayscale(image) + + return torchvision.transforms.functional.pil_to_tensor(image), label + + + def __len__(self): + return len(self.labels) + + def augment(self, index): + \ No newline at end of file diff --git a/main.ipynb b/main.ipynb index da2f132..ad1963b 100644 --- a/main.ipynb +++ b/main.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -14,38 +14,331 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "# imports\n", + "# constants\n", + "EPOCHS = 1\n", + "BATCH_SIZE = 16\n", + "N_EVAL = 1\n", + "DATA = \"../50whales/sauce\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# data processing\n", "import torch\n", + "from PIL import Image\n", + "from PIL import ImageOps\n", + "import pandas as pd\n", "import torchvision\n", + "\n", + "\n", + "class StartingDataset(torch.utils.data.Dataset):\n", + " \"\"\"\n", + " Dataset that contains 100000 3x224x224 black images (all zeros).\n", + " \"\"\"\n", + "\n", + " def __init__(self, path):\n", + " self.path = path\n", + " self.data = pd.read_csv(DATA + \"/train.csv\")\n", + " self.data.rename(columns=self.data.iloc[0]).drop(self.data.index[0])\n", + " self.images = self.data.iloc[:, 0]\n", + " self.labels = self.data.iloc[:, 1]\n", + " self.transition = list(set(self.labels))\n", + " self.whales = self.labels.replace(self.transition, list(range(5005)))\n", + "\n", + " def __getitem__(self, index):\n", + " image = Image.open(DATA + self.path + self.images[index])\n", + " label = self.whales[index]\n", + "\n", + " image = image.resize((448, 224))\n", + " image = ImageOps.grayscale(image)\n", + "\n", + " return torchvision.transforms.functional.pil_to_tensor(image), label\n", + "\n", + "\n", + " def __len__(self):\n", + " return len(self.labels)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# neural network\n", + "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", - "import pandas as pd\n", - "import numpy as np" + "\n", + "class StartingNetwork(nn.Module):\n", + " \"\"\"\n", + " Basic logistic regression on 224x224x3 images.\n", + " \"\"\"\n", + "\n", + " def __init__(self):\n", + " super().__init__()\n", + " \n", + " self.conv1 = nn.Conv2d(1, 4, kernel_size = 5, padding = 2)\n", + " self.conv2 = nn.Conv2d(4, 8, kernel_size = 3, padding = 1)\n", + " self.conv3 = nn.Conv2d(8, 16, kernel_size = 3, padding = 1)\n", + " \n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " \n", + " self.fc1 = nn.Linear(16 * 56 * 28, 5005)\n", + " # self.fc2 = nn.Linear(20020, 10010)\n", + " # self.fc3 = nn.Linear(10010 ,5005)\n", + "\n", + " def forward(self, x):\n", + " x = x.float()\n", + "\n", + " #Forward porp\n", + " # (n, 1, 448, 224)\n", + " x = self.conv1(x)\n", + " x = F.relu(x)\n", + " # (n, 4, 448, 224)\n", + " x = self.pool(x)\n", + " # (n, 4, 224, 112)\n", + " x = self.conv2(x)\n", + " x = F.relu(x)\n", + " # (n, 8, 224, 112)\n", + " x = self.pool(x)\n", + " # (n, 8, 112, 56)\n", + " x = self.conv3(x)\n", + " x = F.relu(x)\n", + " # (n, 16, 112, 56)\n", + " x = self.pool(x)\n", + " # (n, 16, 56, 28)\n", + "\n", + " x = torch.reshape(x, (-1, 16 * 56 * 28))\n", + " # (n, 8 * 112 * 56)\n", + " x = self.fc1(x)\n", + " # x = F.relu(x)\n", + " # (n, 20020)\n", + " # x = self.fc2(x)\n", + " # x = F.relu(x)\n", + " # (n, 10010)\n", + " # x = self.fc3(x)\n", + " # (n, 5005)\n", + " return x\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "# constants\n", - "EPOCHS = 1\n", - "BATCH_SIZE = 16\n", - "N_EVAL = 1\n", - "DATA = \"../50whales/sauce\"" + "# train function\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from tqdm import tqdm\n", + "# from torch.utils.tensorboard import SummaryWriter\n", + "\n", + "\n", + "def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval, device):\n", + " \"\"\"\n", + " Trains and evaluates a model.\n", + "\n", + " Args:\n", + " train_dataset: PyTorch dataset containing training data.\n", + " val_dataset: PyTorch dataset containing validation data.\n", + " model: PyTorch model to be trained.\n", + " hyperparameters: Dictionary containing hyperparameters.\n", + " n_eval: Interval at which we evaluate our model.\n", + " \"\"\"\n", + "\n", + " # Get keyword arguments\n", + " batch_size, epochs = hyperparameters[\"batch_size\"], hyperparameters[\"epochs\"]\n", + "\n", + " # Initialize dataloaders\n", + " train_loader = torch.utils.data.DataLoader(\n", + " train_dataset, batch_size=batch_size, shuffle=True\n", + " )\n", + " val_loader = torch.utils.data.DataLoader(\n", + " val_dataset, batch_size=batch_size, shuffle=True\n", + " )\n", + "\n", + " # Initalize optimizer (for gradient descent) and loss function\n", + " optimizer = optim.Adam(model.parameters())\n", + " loss_fn = nn.CrossEntropyLoss()\n", + "\n", + " # Move the model to the GPU\n", + " model = model.to(device)\n", + "\n", + " step = 1\n", + "\n", + " # tb = SummaryWriter()\n", + " for epoch in range(epochs):\n", + " print(f\"Epoch {epoch + 1} of {epochs}\")\n", + "\n", + " # Loop over each batch in the dataset\n", + " for batch in tqdm(train_loader):\n", + " # TODO: Backpropagation and gradient descent\n", + " images, labels = batch\n", + " labels = torch.stack(list(labels), dim=0)\n", + "\n", + " images = images.to(device)\n", + " labels = labels.to(device)\n", + "\n", + " outputs = model(images)\n", + "\n", + " loss = loss_fn(outputs, labels)\n", + " loss.backward() # Compute gradients\n", + " optimizer.step() # Update all the weights with the gradients you just calculated\n", + " optimizer.zero_grad()\n", + "\n", + " # Periodically evaluate our model + log to Tensorboard\n", + " # if step % n_eval == 0:\n", + " # TODO:\n", + " # Compute training loss and accuracy.\n", + " # Log the results to Tensorboard.\n", + "\n", + " # with torch.no_grad():\n", + " # images = images.to(device)\n", + " # labels = labels.to(device)\n", + "\n", + " # predictions = torch.argmax(outputs, dim=1)\n", + "\n", + " # accuracy = compute_accuracy(predictions, labels)\n", + " # print('Accuracy: ', accuracy)\n", + "\n", + " # TODO:\n", + " # Compute validation loss and accuracy.\n", + " # Log the results to Tensorboard.\n", + " # Don't forget to turn off gradient calculations!\n", + "\n", + " step += 1\n", + "\n", + " print('Epoch:', epoch, 'Loss:', loss.item())\n", + " evaluate(val_loader, model, loss_fn, device)\n", + " # tb.close()\n", + "\n", + "\n", + "def compute_accuracy(outputs, labels):\n", + " \"\"\"\n", + " Computes the accuracy of a model's predictions.\n", + "\n", + " Example input:\n", + " outputs: [0.7, 0.9, 0.3, 0.2]\n", + " labels: [1, 1, 0, 1]\n", + "\n", + " Example output:\n", + " 0.75\n", + " \"\"\"\n", + "\n", + " n_correct = (outputs == labels).int().sum()\n", + " n_total = len(outputs)\n", + " return n_correct / n_total\n", + "\n", + "\n", + "def evaluate(val_loader, model, loss_fn, device):\n", + " \"\"\"\n", + " Computes the loss and accuracy of a model on the validation dataset.\n", + " \"\"\"\n", + "\n", + " model.eval()\n", + "\n", + " correct = 0\n", + " total = 0\n", + " loss = 0\n", + " with torch.no_grad(): # IMPORTANT: turn off gradient computations\n", + " for batch in val_loader:\n", + " images, labels = batch\n", + " images = images.to(device)\n", + " labels = labels.to(device)\n", + "\n", + " outputs = model(images)\n", + " predictions = torch.argmax(outputs, dim=1)\n", + "\n", + " # labels == predictions does an elementwise comparison\n", + " # e.g. labels = [1, 2, 3, 4]\n", + " # predictions = [1, 4, 3, 3]\n", + " # labels == predictions = [1, 0, 1, 0] (where 1 is true, 0 is false)\n", + " # So the number of correct predictions is the sum of (labels == predictions)\n", + " correct += (labels == predictions).int().sum()\n", + " total += len(predictions)\n", + " loss += loss_fn(outputs, labels)\n", + "\n", + " \n", + " print(correct / total)\n", + " model.train()\n", + "\n", + " # tb.add_scalar(\"Loss\", loss, epoch)\n", + " # tb.add_scalar(\"Correct\", correct, epoch)\n", + " # tb.add_scalar(\"Accuracy\", correct / total, epoch)\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epochs: 1\n", + "Batch size: 16\n", + "Epoch 1 of 1\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/1110 [00:00 Date: Mon, 7 Feb 2022 18:32:41 -0800 Subject: [PATCH 10/14] image augment --- data/ImageAugment.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/data/ImageAugment.py b/data/ImageAugment.py index b648b67..24aa914 100644 --- a/data/ImageAugment.py +++ b/data/ImageAugment.py @@ -17,6 +17,7 @@ def __init__(self, path): self.transition = list(set(self.labels)) self.whales = self.labels.replace(self.transition, list(range(5005))) + for index in range(5005): def __getitem__(self, index): @@ -33,5 +34,3 @@ def __getitem__(self, index): def __len__(self): return len(self.labels) - def augment(self, index): - \ No newline at end of file From b2df135859d031d8be3d8be995a11ab1d7b659b8 Mon Sep 17 00:00:00 2001 From: Anand Gowda Date: Mon, 7 Feb 2022 19:13:12 -0800 Subject: [PATCH 11/14] Implemented Tensorboard --- constants.py | 2 +- data/StartingDataset.py | 1 + main.ipynb | 75 ++++++++++++++++++++++++------- train_functions/starting_train.py | 42 ++++++++--------- 4 files changed, 79 insertions(+), 41 deletions(-) diff --git a/constants.py b/constants.py index 6c5c7a7..c55ce45 100644 --- a/constants.py +++ b/constants.py @@ -1,4 +1,4 @@ EPOCHS = 1 BATCH_SIZE = 16 -N_EVAL = 1 +N_EVAL = 2 DATA = "../50whales/sauce" \ No newline at end of file diff --git a/data/StartingDataset.py b/data/StartingDataset.py index 1a25fb0..095e37e 100644 --- a/data/StartingDataset.py +++ b/data/StartingDataset.py @@ -1,3 +1,4 @@ +import os import torch from PIL import Image from PIL import ImageOps diff --git a/main.ipynb b/main.ipynb index ad1963b..34c8752 100644 --- a/main.ipynb +++ b/main.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -14,7 +14,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -27,9 +27,20 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/anand/opt/anaconda3/envs/acm/lib/python3.8/site-packages/torchvision/io/image.py:11: UserWarning: Failed to load image Python extension: dlopen(/Users/anand/opt/anaconda3/envs/acm/lib/python3.8/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libjpeg.9.dylib\n", + " Referenced from: /Users/anand/opt/anaconda3/envs/acm/lib/python3.8/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 14.0.0 or later, but libjpeg.9.dylib provides version 12.0.0\n", + " warn(f\"Failed to load image Python extension: {e}\")\n" + ] + } + ], "source": [ "# data processing\n", "import torch\n", @@ -69,7 +80,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -132,7 +143,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -279,7 +290,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -287,7 +298,20 @@ "output_type": "stream", "text": [ "Epochs: 1\n", - "Batch size: 16\n", + "Batch size: 16\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/1110 [00:00\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m__name__\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"__main__\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m()\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0mtrain_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mval_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom_split\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mtrain_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_size\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mStartingNetwork\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m starting_train(\n\u001b[0m\u001b[1;32m 25\u001b[0m \u001b[0mtrain_dataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrain_dataset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0mval_dataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mval_dataset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Repos/50whales/train_functions/starting_train.py\u001b[0m in \u001b[0;36mstarting_train\u001b[0;34m(train_dataset, val_dataset, model, hyperparameters, n_eval, device)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Compute gradients\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Update all the weights with the gradients you just calculated\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 58\u001b[0;31m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0;31m# Periodically evaluate our model + log to Tensorboard\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/opt/anaconda3/envs/acm/lib/python3.8/site-packages/torch/optim/optimizer.py\u001b[0m in \u001b[0;36mzero_grad\u001b[0;34m(self, set_to_none)\u001b[0m\n\u001b[1;32m 215\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 216\u001b[0m \u001b[0mp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequires_grad_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 217\u001b[0;31m \u001b[0mp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 218\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 219\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclosure\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], @@ -339,6 +374,13 @@ "if __name__ == \"__main__\":\n", " main()\n" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -346,7 +388,7 @@ "hash": "e9b712ea729e402836c5595724b4e6d35efa45a29af25a2578c0523890674d22" }, "kernelspec": { - "display_name": "Python 3.8.12 ('acm')", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -360,10 +402,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" - }, - "orig_nbformat": 4 + "version": "3.8.5" + } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/train_functions/starting_train.py b/train_functions/starting_train.py index 917a2a2..20a18f9 100644 --- a/train_functions/starting_train.py +++ b/train_functions/starting_train.py @@ -2,8 +2,9 @@ import torch.nn as nn import torch.optim as optim from tqdm import tqdm -# from torch.utils.tensorboard import SummaryWriter +from torch.utils.tensorboard import SummaryWriter +writer = SummaryWriter() def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval, device): """ @@ -58,33 +59,34 @@ def starting_train(train_dataset, val_dataset, model, hyperparameters, n_eval, d optimizer.zero_grad() # Periodically evaluate our model + log to Tensorboard - # if step % n_eval == 0: + if step % n_eval == 0: + model.eval() # TODO: # Compute training loss and accuracy. # Log the results to Tensorboard. - # with torch.no_grad(): - # images = images.to(device) - # labels = labels.to(device) - - # predictions = torch.argmax(outputs, dim=1) - - # accuracy = compute_accuracy(predictions, labels) - # print('Accuracy: ', accuracy) + tloss, taccuracy = evaluate(train_loader, model, loss_fn, device) + writer.add_scalar("Loss/train", tloss, epoch + 1) + writer.add_scalar("Accuracy/train", taccuracy, epoch + 1) # TODO: # Compute validation loss and accuracy. # Log the results to Tensorboard. # Don't forget to turn off gradient calculations! + + vloss, vaccuracy= evaluate(val_loader, model, loss_fn, device) + writer.add_scalar("Loss/val", vloss, epoch + 1) + writer.add_scalar("Accuracy/val", vaccuracy, epoch + 1) + model.train() step += 1 print('Epoch:', epoch, 'Loss:', loss.item()) - evaluate(val_loader, model, loss_fn, device) - # tb.close() + + writer.flush() -def compute_accuracy(outputs, labels): +async def compute_accuracy(outputs, labels): """ Computes the accuracy of a model's predictions. @@ -101,18 +103,15 @@ def compute_accuracy(outputs, labels): return n_correct / n_total -def evaluate(val_loader, model, loss_fn, device): +async def evaluate(loader, model, loss_fn, device): """ Computes the loss and accuracy of a model on the validation dataset. """ - - model.eval() - correct = 0 total = 0 loss = 0 with torch.no_grad(): # IMPORTANT: turn off gradient computations - for batch in val_loader: + for batch in loader: images, labels = batch images = images.to(device) labels = labels.to(device) @@ -129,10 +128,7 @@ def evaluate(val_loader, model, loss_fn, device): total += len(predictions) loss += loss_fn(outputs, labels) + accuracy = correct / total - print(correct / total) - model.train() + return loss, accuracy - # tb.add_scalar("Loss", loss, epoch) - # tb.add_scalar("Correct", correct, epoch) - # tb.add_scalar("Accuracy", correct / total, epoch) From e4a6d2792f18aab33f7abb58651803ac7cc73229 Mon Sep 17 00:00:00 2001 From: austinqyang Date: Mon, 7 Feb 2022 19:20:49 -0800 Subject: [PATCH 12/14] add image augment --- data/ImageAugment.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/data/ImageAugment.py b/data/ImageAugment.py index 24aa914..12115af 100644 --- a/data/ImageAugment.py +++ b/data/ImageAugment.py @@ -17,7 +17,14 @@ def __init__(self, path): self.transition = list(set(self.labels)) self.whales = self.labels.replace(self.transition, list(range(5005))) - for index in range(5005): + self.transform1 = torchvision.transforms.Compose([ + torchvision.transforms.CenterCrop(200), + torchvision.transforms.function.resize((448,224)) + ]) + + self.transform2 = torchvision.transforms.ColorJitter() + + self.transform3 = torchvision.transforms.RandomRotation(180) def __getitem__(self, index): @@ -34,3 +41,21 @@ def __getitem__(self, index): def __len__(self): return len(self.labels) + def augment(self, index); + image, label = ImageAugment.__getitem__(index) + + self.images.append(self.transform1(image)) + self.labels.append(index) + + self.images.append(self.transform2(image)) + self.labels.append(index) + + self.images.append(self.transform2(image)) + self.labels.append(index) + + self.images.append(self.transform3(image)) + self.labels.append(index) + + self.images.append(self.transform3(image)) + self.labels.append(index) + From 586b1018c8446c07f8c4d224908f66da2f07abba Mon Sep 17 00:00:00 2001 From: austinqyang Date: Thu, 10 Feb 2022 18:11:42 -0800 Subject: [PATCH 13/14] implement image augmentation --- data/ImageAugment.py | 37 +++++++++--------- main.py | 3 +- ...44290941.Austins-MacBook-Air.local.44461.0 | Bin 0 -> 40 bytes ...44291635.Austins-MacBook-Air.local.44676.0 | Bin 0 -> 40 bytes ...44291739.Austins-MacBook-Air.local.44770.0 | Bin 0 -> 40 bytes ...44291762.Austins-MacBook-Air.local.44795.0 | Bin 0 -> 40 bytes ...44292005.Austins-MacBook-Air.local.44871.0 | Bin 0 -> 40 bytes ...44292139.Austins-MacBook-Air.local.44905.0 | Bin 0 -> 40 bytes ...44292163.Austins-MacBook-Air.local.44927.0 | Bin 0 -> 40 bytes ...44292426.Austins-MacBook-Air.local.45032.0 | Bin 0 -> 40 bytes ...44292510.Austins-MacBook-Air.local.45063.0 | Bin 0 -> 40 bytes ...44292663.Austins-MacBook-Air.local.45120.0 | Bin 0 -> 40 bytes ...44293927.Austins-MacBook-Air.local.45471.0 | Bin 0 -> 40 bytes 13 files changed, 21 insertions(+), 19 deletions(-) create mode 100644 runs/Feb07_19-29-01_Austins-MacBook-Air.local/events.out.tfevents.1644290941.Austins-MacBook-Air.local.44461.0 create mode 100644 runs/Feb07_19-40-35_Austins-MacBook-Air.local/events.out.tfevents.1644291635.Austins-MacBook-Air.local.44676.0 create mode 100644 runs/Feb07_19-42-19_Austins-MacBook-Air.local/events.out.tfevents.1644291739.Austins-MacBook-Air.local.44770.0 create mode 100644 runs/Feb07_19-42-42_Austins-MacBook-Air.local/events.out.tfevents.1644291762.Austins-MacBook-Air.local.44795.0 create mode 100644 runs/Feb07_19-46-45_Austins-MacBook-Air.local/events.out.tfevents.1644292005.Austins-MacBook-Air.local.44871.0 create mode 100644 runs/Feb07_19-48-59_Austins-MacBook-Air.local/events.out.tfevents.1644292139.Austins-MacBook-Air.local.44905.0 create mode 100644 runs/Feb07_19-49-23_Austins-MacBook-Air.local/events.out.tfevents.1644292163.Austins-MacBook-Air.local.44927.0 create mode 100644 runs/Feb07_19-53-46_Austins-MacBook-Air.local/events.out.tfevents.1644292426.Austins-MacBook-Air.local.45032.0 create mode 100644 runs/Feb07_19-55-10_Austins-MacBook-Air.local/events.out.tfevents.1644292510.Austins-MacBook-Air.local.45063.0 create mode 100644 runs/Feb07_19-57-43_Austins-MacBook-Air.local/events.out.tfevents.1644292663.Austins-MacBook-Air.local.45120.0 create mode 100644 runs/Feb07_20-18-47_Austins-MacBook-Air.local/events.out.tfevents.1644293927.Austins-MacBook-Air.local.45471.0 diff --git a/data/ImageAugment.py b/data/ImageAugment.py index 12115af..8fe65cb 100644 --- a/data/ImageAugment.py +++ b/data/ImageAugment.py @@ -4,6 +4,8 @@ import pandas as pd import constants import torchvision +import torchvision.transforms.functional +import numpy as np class ImageAugment(torch.utils.data.Dataset): @@ -17,10 +19,7 @@ def __init__(self, path): self.transition = list(set(self.labels)) self.whales = self.labels.replace(self.transition, list(range(5005))) - self.transform1 = torchvision.transforms.Compose([ - torchvision.transforms.CenterCrop(200), - torchvision.transforms.function.resize((448,224)) - ]) + self.transform1 = torchvision.transforms.RandomResizedCrop(size = (448,224), scale = (0.5, 0.75)) self.transform2 = torchvision.transforms.ColorJitter() @@ -35,27 +34,29 @@ def __getitem__(self, index): image = image.resize((448, 224)) image = ImageOps.grayscale(image) - return torchvision.transforms.functional.pil_to_tensor(image), label + image = torchvision.transforms.ToTensor()(np.array(image)) + + return image, label def __len__(self): return len(self.labels) - def augment(self, index); - image, label = ImageAugment.__getitem__(index) + # def augment(self, index): + # image, label = ImageAugment.__getitem__(index) + + # self.images.append(self.transform1(image)) + # self.labels.append(index) - self.images.append(self.transform1(image)) - self.labels.append(index) - - self.images.append(self.transform2(image)) - self.labels.append(index) + # self.images.append(self.transform2(image)) + # self.labels.append(index) - self.images.append(self.transform2(image)) - self.labels.append(index) + # self.images.append(self.transform2(image)) + # self.labels.append(index) - self.images.append(self.transform3(image)) - self.labels.append(index) + # self.images.append(self.transform3(image)) + # self.labels.append(index) - self.images.append(self.transform3(image)) - self.labels.append(index) + # self.images.append(self.transform3(image)) + # self.labels.append(index) diff --git a/main.py b/main.py index de740f7..c391eb6 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,7 @@ import os import constants +from data.ImageAugment import ImageAugment import torch from data.StartingDataset import StartingDataset from networks.StartingNetwork import StartingNetwork @@ -20,7 +21,7 @@ def main(): print("Batch size:", constants.BATCH_SIZE) # Initalize dataset and model. Then train the model! - data = StartingDataset("/train/") + data = ImageAugment("/train/") train_size = int(0.7 * len(data)) test_size = len(data) - train_size train_dataset, val_dataset = torch.utils.data.random_split(data, [train_size, test_size]) diff --git a/runs/Feb07_19-29-01_Austins-MacBook-Air.local/events.out.tfevents.1644290941.Austins-MacBook-Air.local.44461.0 b/runs/Feb07_19-29-01_Austins-MacBook-Air.local/events.out.tfevents.1644290941.Austins-MacBook-Air.local.44461.0 new file mode 100644 index 0000000000000000000000000000000000000000..c85dfe80f6ad6da3f1a7b4f8134a87a25fd7fa07 GIT binary patch literal 40 rcmb1OfPlsI-b$Q;feH638g4jB@g@}|X6EU+mZj#ESQ+K9T9p9+#F7hD literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-40-35_Austins-MacBook-Air.local/events.out.tfevents.1644291635.Austins-MacBook-Air.local.44676.0 b/runs/Feb07_19-40-35_Austins-MacBook-Air.local/events.out.tfevents.1644291635.Austins-MacBook-Air.local.44676.0 new file mode 100644 index 0000000000000000000000000000000000000000..d1eba93fd962e07ba4de292293d65bad8cda5c46 GIT binary patch literal 40 rcmb1OfPlsI-b$RUoA35iHr#NO;!P?_%*@ksElbTSu`)_C%9Q{B-?a=- literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-42-19_Austins-MacBook-Air.local/events.out.tfevents.1644291739.Austins-MacBook-Air.local.44770.0 b/runs/Feb07_19-42-19_Austins-MacBook-Air.local/events.out.tfevents.1644291739.Austins-MacBook-Air.local.44770.0 new file mode 100644 index 0000000000000000000000000000000000000000..0e949c1401a96fdc56ca92f2320b5e9092650598 GIT binary patch literal 40 rcmb1OfPlsI-b$P=Zhu-<*>J;AiZ`h!F*8rkwJbHS#L8&%eOW62^>7W= literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-42-42_Austins-MacBook-Air.local/events.out.tfevents.1644291762.Austins-MacBook-Air.local.44795.0 b/runs/Feb07_19-42-42_Austins-MacBook-Air.local/events.out.tfevents.1644291762.Austins-MacBook-Air.local.44795.0 new file mode 100644 index 0000000000000000000000000000000000000000..2d3970a73b18169f78bb7c618e8206b18d640707 GIT binary patch literal 40 rcmb1OfPlsI-b$Rw*B7j*Y`Ebl#hX-=n3<>NT9%quVr9f>sJ9XT+inag literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-46-45_Austins-MacBook-Air.local/events.out.tfevents.1644292005.Austins-MacBook-Air.local.44871.0 b/runs/Feb07_19-46-45_Austins-MacBook-Air.local/events.out.tfevents.1644292005.Austins-MacBook-Air.local.44871.0 new file mode 100644 index 0000000000000000000000000000000000000000..edb442e6ec9f3f928a5cf12d0f098f652b0fbb2b GIT binary patch literal 40 rcmb1OfPlsI-b$PudO0sE8*VsC@g@}|X6EU+mZj#ESQ#DAxB3A9(hLl~ literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-48-59_Austins-MacBook-Air.local/events.out.tfevents.1644292139.Austins-MacBook-Air.local.44905.0 b/runs/Feb07_19-48-59_Austins-MacBook-Air.local/events.out.tfevents.1644292139.Austins-MacBook-Air.local.44905.0 new file mode 100644 index 0000000000000000000000000000000000000000..444d3000098fee724c5e541bd3727b4a0c065b1c GIT binary patch literal 40 rcmb1OfPlsI-b$Q1P9Ni{YPjJj#hX-=n3<>NT9%quVr4WrMNtj_+2#x! literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-49-23_Austins-MacBook-Air.local/events.out.tfevents.1644292163.Austins-MacBook-Air.local.44927.0 b/runs/Feb07_19-49-23_Austins-MacBook-Air.local/events.out.tfevents.1644292163.Austins-MacBook-Air.local.44927.0 new file mode 100644 index 0000000000000000000000000000000000000000..1d49ec7aef8840902d76910b06b69bf49299c790 GIT binary patch literal 40 rcmb1OfPlsI-b$R;O+O1%HQaEN;!P?_%*@ksElbTSu`()%S$+lp)A|hI literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-53-46_Austins-MacBook-Air.local/events.out.tfevents.1644292426.Austins-MacBook-Air.local.45032.0 b/runs/Feb07_19-53-46_Austins-MacBook-Air.local/events.out.tfevents.1644292426.Austins-MacBook-Air.local.45032.0 new file mode 100644 index 0000000000000000000000000000000000000000..433f691820c2439eb6a8f046e86f7386f473b841 GIT binary patch literal 40 rcmb1OfPlsI-b$Q?PRoO;8g4jB@g@}|X6EU+mZj#ESQ$Nwm5%}d!siRS literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-55-10_Austins-MacBook-Air.local/events.out.tfevents.1644292510.Austins-MacBook-Air.local.45063.0 b/runs/Feb07_19-55-10_Austins-MacBook-Air.local/events.out.tfevents.1644292510.Austins-MacBook-Air.local.45063.0 new file mode 100644 index 0000000000000000000000000000000000000000..3aa95ab2d707946f6b13e2ce4a7a1985cbe88b9a GIT binary patch literal 40 rcmb1OfPlsI-b$Q`j7!t28g4jB@g@}|X6EU+mZj#ESQ%Y0)d>dxySfWa literal 0 HcmV?d00001 diff --git a/runs/Feb07_19-57-43_Austins-MacBook-Air.local/events.out.tfevents.1644292663.Austins-MacBook-Air.local.45120.0 b/runs/Feb07_19-57-43_Austins-MacBook-Air.local/events.out.tfevents.1644292663.Austins-MacBook-Air.local.45120.0 new file mode 100644 index 0000000000000000000000000000000000000000..1721c5e9bdef9125ccbafcef749bfc62120da4e3 GIT binary patch literal 40 rcmb1OfPlsI-b$QDBF^Xr?8 literal 0 HcmV?d00001 From 1224fe3ea258cf736112370e4f5f991299c47458 Mon Sep 17 00:00:00 2001 From: austinqyang Date: Thu, 10 Feb 2022 19:34:14 -0800 Subject: [PATCH 14/14] fix imageaugment --- data/ImageAugment.py | 50 ++++++++--- data/tempCodeRunnerFile.py | 17 ++++ main.ipynb | 78 ++++++++++++++++++ main.py | 3 + ...44545659.Austins-MacBook-Air.local.57953.0 | Bin 0 -> 40 bytes ...44546634.Austins-MacBook-Air.local.58453.0 | Bin 0 -> 40 bytes ...44546678.Austins-MacBook-Air.local.58482.0 | Bin 0 -> 40 bytes ...44546813.Austins-MacBook-Air.local.58547.0 | Bin 0 -> 40 bytes ...44547700.Austins-MacBook-Air.local.58736.0 | Bin 0 -> 40 bytes ...44547847.Austins-MacBook-Air.local.58802.0 | Bin 0 -> 40 bytes ...44547865.Austins-MacBook-Air.local.58831.0 | Bin 0 -> 40 bytes ...44548542.Austins-MacBook-Air.local.59001.0 | Bin 0 -> 40 bytes ...44548612.Austins-MacBook-Air.local.59043.0 | Bin 0 -> 40 bytes ...44548803.Austins-MacBook-Air.local.59121.0 | Bin 0 -> 40 bytes ...44548866.Austins-MacBook-Air.local.59151.0 | Bin 0 -> 40 bytes ...44548892.Austins-MacBook-Air.local.59180.0 | Bin 0 -> 40 bytes ...44548972.Austins-MacBook-Air.local.59206.0 | Bin 0 -> 40 bytes 17 files changed, 135 insertions(+), 13 deletions(-) create mode 100644 data/tempCodeRunnerFile.py create mode 100644 runs/Feb10_18-14-19_Austins-MacBook-Air.local/events.out.tfevents.1644545659.Austins-MacBook-Air.local.57953.0 create mode 100644 runs/Feb10_18-30-34_Austins-MacBook-Air.local/events.out.tfevents.1644546634.Austins-MacBook-Air.local.58453.0 create mode 100644 runs/Feb10_18-31-18_Austins-MacBook-Air.local/events.out.tfevents.1644546678.Austins-MacBook-Air.local.58482.0 create mode 100644 runs/Feb10_18-33-33_Austins-MacBook-Air.local/events.out.tfevents.1644546813.Austins-MacBook-Air.local.58547.0 create mode 100644 runs/Feb10_18-48-20_Austins-MacBook-Air.local/events.out.tfevents.1644547700.Austins-MacBook-Air.local.58736.0 create mode 100644 runs/Feb10_18-50-47_Austins-MacBook-Air.local/events.out.tfevents.1644547847.Austins-MacBook-Air.local.58802.0 create mode 100644 runs/Feb10_18-51-05_Austins-MacBook-Air.local/events.out.tfevents.1644547865.Austins-MacBook-Air.local.58831.0 create mode 100644 runs/Feb10_19-02-22_Austins-MacBook-Air.local/events.out.tfevents.1644548542.Austins-MacBook-Air.local.59001.0 create mode 100644 runs/Feb10_19-03-32_Austins-MacBook-Air.local/events.out.tfevents.1644548612.Austins-MacBook-Air.local.59043.0 create mode 100644 runs/Feb10_19-06-43_Austins-MacBook-Air.local/events.out.tfevents.1644548803.Austins-MacBook-Air.local.59121.0 create mode 100644 runs/Feb10_19-07-46_Austins-MacBook-Air.local/events.out.tfevents.1644548866.Austins-MacBook-Air.local.59151.0 create mode 100644 runs/Feb10_19-08-12_Austins-MacBook-Air.local/events.out.tfevents.1644548892.Austins-MacBook-Air.local.59180.0 create mode 100644 runs/Feb10_19-09-32_Austins-MacBook-Air.local/events.out.tfevents.1644548972.Austins-MacBook-Air.local.59206.0 diff --git a/data/ImageAugment.py b/data/ImageAugment.py index 8fe65cb..3d2874c 100644 --- a/data/ImageAugment.py +++ b/data/ImageAugment.py @@ -23,7 +23,10 @@ def __init__(self, path): self.transform2 = torchvision.transforms.ColorJitter() - self.transform3 = torchvision.transforms.RandomRotation(180) + self.transform3 = torchvision.transforms.RandomAffine(180) + + self.augmentedimages = [] + self.augmentedlabels = [] def __getitem__(self, index): @@ -42,21 +45,42 @@ def __getitem__(self, index): def __len__(self): return len(self.labels) - # def augment(self, index): - # image, label = ImageAugment.__getitem__(index) + def cutout(self, image): + size = 30 + x = np.random.randint(448) + y = np.random.randint(224) + y1 = np.clip(y - size // 2, 0, 224) + y2 = np.clip(y + size // 2, 0, 224) + x1 = np.clip(x - size // 2, 0, 448) + x2 = np.clip(x + size // 2, 0, 448) + image[y1:y2, x1:x2] = 0 + + + + def augment(self, index): + image = Image.open(constants.DATA + self.path + self.images[index]) + + image = image.resize((448,224)) + + self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform1(image)))) + self.augmentedlabels.append(index) + + self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform2(image)))) + self.augmentedlabels.append(index) + + self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform2(image)))) + self.augmentedlabels.append(index) + + self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform3(image)))) + self.augmentedlabels.append(index) - # self.images.append(self.transform1(image)) - # self.labels.append(index) + self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform3(image)))) + self.augmentedlabels.append(index) - # self.images.append(self.transform2(image)) - # self.labels.append(index) + self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(image + torch.std(image)*torch.randn(image.size)))) + self.augmentedlabels.append(index) - # self.images.append(self.transform2(image)) - # self.labels.append(index) + self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.cutout(self, image)))) - # self.images.append(self.transform3(image)) - # self.labels.append(index) - # self.images.append(self.transform3(image)) - # self.labels.append(index) diff --git a/data/tempCodeRunnerFile.py b/data/tempCodeRunnerFile.py new file mode 100644 index 0000000..e5acf1f --- /dev/null +++ b/data/tempCodeRunnerFile.py @@ -0,0 +1,17 @@ + def augment(self, index): + image, label = ImageAugment.__getitem__(index) + + self.images.append(self.transform1(image)) + self.labels.append(index) + + self.images.append(self.transform2(image)) + self.labels.append(index) + + self.images.append(self.transform2(image)) + self.labels.append(index) + + self.images.append(self.transform3(image)) + self.labels.append(index) + + self.images.append(self.transform3(image)) + self.labels.append(index) \ No newline at end of file diff --git a/main.ipynb b/main.ipynb index 34c8752..4760b74 100644 --- a/main.ipynb +++ b/main.ipynb @@ -78,6 +78,84 @@ " return len(self.labels)\n" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from PIL import Image\n", + "from PIL import ImageOps\n", + "import pandas as pd\n", + "import constants\n", + "import torchvision\n", + "import torchvision.transforms.functional\n", + "import numpy as np\n", + "\n", + "\n", + "class ImageAugment(torch.utils.data.Dataset):\n", + "\n", + " def __init__(self, path):\n", + " self.path = path\n", + " self.data = pd.read_csv(constants.DATA + \"/train.csv\")\n", + " self.data.rename(columns=self.data.iloc[0]).drop(self.data.index[0])\n", + " self.images = self.data.iloc[:, 0]\n", + " self.labels = self.data.iloc[:, 1]\n", + " self.transition = list(set(self.labels))\n", + " self.whales = self.labels.replace(self.transition, list(range(5005)))\n", + "\n", + " self.transform1 = torchvision.transforms.RandomResizedCrop(size = (448,224), scale = (0.5, 0.75))\n", + "\n", + " self.transform2 = torchvision.transforms.ColorJitter()\n", + "\n", + " self.transform3 = torchvision.transforms.RandomAffine(180)\n", + "\n", + " self.augmentedimages = []\n", + " self.augmentedlabels = []\n", + " \n", + "\n", + " def __getitem__(self, index):\n", + " image = Image.open(constants.DATA + self.path + self.images[index])\n", + "\n", + " label = self.whales[index]\n", + "\n", + " image = image.resize((448, 224))\n", + " image = ImageOps.grayscale(image)\n", + "\n", + " image = torchvision.transforms.ToTensor()(np.array(image))\n", + "\n", + " return image, label\n", + "\n", + "\n", + " def __len__(self):\n", + " return len(self.labels)\n", + "\n", + " def augment(self, index):\n", + " image = Image.open(constants.DATA + self.path + self.images[index])\n", + "\n", + " image = image.resize((448,224))\n", + "\n", + " self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform1(image))))\n", + " self.augmentedlabels.append(index)\n", + "\n", + " self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform2(image))))\n", + " self.augmentedlabels.append(index)\n", + "\n", + " self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform2(image))))\n", + " self.augmentedlabels.append(index)\n", + "\n", + " self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform3(image))))\n", + " self.augmentedlabels.append(index)\n", + "\n", + " self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(self.transform3(image))))\n", + " self.augmentedlabels.append(index)\n", + "\n", + " self.augmentedimages.append(torchvision.transforms.ToTensor()(np.array(image + torch.std(image)*torch.randn(image.size()))))\n", + "\n", + "\n" + ] + }, { "cell_type": "code", "execution_count": 4, diff --git a/main.py b/main.py index c391eb6..033586f 100644 --- a/main.py +++ b/main.py @@ -22,6 +22,9 @@ def main(): # Initalize dataset and model. Then train the model! data = ImageAugment("/train/") + currentlen = len(data) + for i in range(currentlen): + data.augment(i) train_size = int(0.7 * len(data)) test_size = len(data) - train_size train_dataset, val_dataset = torch.utils.data.random_split(data, [train_size, test_size]) diff --git a/runs/Feb10_18-14-19_Austins-MacBook-Air.local/events.out.tfevents.1644545659.Austins-MacBook-Air.local.57953.0 b/runs/Feb10_18-14-19_Austins-MacBook-Air.local/events.out.tfevents.1644545659.Austins-MacBook-Air.local.57953.0 new file mode 100644 index 0000000000000000000000000000000000000000..d72bd36d9470c8bf7cf537699e12f6e6294ccb59 GIT binary patch literal 40 rcmb1OfPlsI-b$RWSpLi_Y`ozp#hX-=n3<>NT9%quVr3+DiZ`h!F*8rkwJbHS#LCF1BkV5#-Z>2= literal 0 HcmV?d00001 diff --git a/runs/Feb10_18-31-18_Austins-MacBook-Air.local/events.out.tfevents.1644546678.Austins-MacBook-Air.local.58482.0 b/runs/Feb10_18-31-18_Austins-MacBook-Air.local/events.out.tfevents.1644546678.Austins-MacBook-Air.local.58482.0 new file mode 100644 index 0000000000000000000000000000000000000000..a108a2e950ffe440b41e99922b0adf8c1cd8b394 GIT binary patch literal 40 rcmb1OfPlsI-b$R$@3ze?YP{hn#hX-=n3<>NT9%quVr4YnbnYAg=&lWb literal 0 HcmV?d00001 diff --git a/runs/Feb10_18-33-33_Austins-MacBook-Air.local/events.out.tfevents.1644546813.Austins-MacBook-Air.local.58547.0 b/runs/Feb10_18-33-33_Austins-MacBook-Air.local/events.out.tfevents.1644546813.Austins-MacBook-Air.local.58547.0 new file mode 100644 index 0000000000000000000000000000000000000000..7826b2d3af97728ed394f8ccd833773a845e8128 GIT binary patch literal 40 rcmb1OfPlsI-b$R)IDPgPHQsQP;!P?_%*@ksElbTSu`-gKCG-XW#)1p$ literal 0 HcmV?d00001 diff --git a/runs/Feb10_18-48-20_Austins-MacBook-Air.local/events.out.tfevents.1644547700.Austins-MacBook-Air.local.58736.0 b/runs/Feb10_18-48-20_Austins-MacBook-Air.local/events.out.tfevents.1644547700.Austins-MacBook-Air.local.58736.0 new file mode 100644 index 0000000000000000000000000000000000000000..d253bdcb8429aca1fc30952e1f7e7a99f1bffaad GIT binary patch literal 40 rcmb1OfPlsI-b$Q@c?{+jH{NiR;!P?_%*@ksElbTSu`&v3TRjl~$0-aw literal 0 HcmV?d00001 diff --git a/runs/Feb10_18-50-47_Austins-MacBook-Air.local/events.out.tfevents.1644547847.Austins-MacBook-Air.local.58802.0 b/runs/Feb10_18-50-47_Austins-MacBook-Air.local/events.out.tfevents.1644547847.Austins-MacBook-Air.local.58802.0 new file mode 100644 index 0000000000000000000000000000000000000000..9db16d7d7de4cf50fdcb593b9df99b8082a1a417 GIT binary patch literal 40 rcmb1OfPlsI-b$P-efJL*H{NiR;!P?_%*@ksElbTSu`;r4WN8Kf((nuf literal 0 HcmV?d00001 diff --git a/runs/Feb10_18-51-05_Austins-MacBook-Air.local/events.out.tfevents.1644547865.Austins-MacBook-Air.local.58831.0 b/runs/Feb10_18-51-05_Austins-MacBook-Air.local/events.out.tfevents.1644547865.Austins-MacBook-Air.local.58831.0 new file mode 100644 index 0000000000000000000000000000000000000000..be5dace3133f6bdb8937d06a8a369751dcd9fd13 GIT binary patch literal 40 rcmb1OfPlsI-b$R|99hST8*eyD@g@}|X6EU+mZj#ESQ$xa75f4J!Z!;b literal 0 HcmV?d00001 diff --git a/runs/Feb10_19-02-22_Austins-MacBook-Air.local/events.out.tfevents.1644548542.Austins-MacBook-Air.local.59001.0 b/runs/Feb10_19-02-22_Austins-MacBook-Air.local/events.out.tfevents.1644548542.Austins-MacBook-Air.local.59001.0 new file mode 100644 index 0000000000000000000000000000000000000000..1656fde91cd181baecea6f7c4bc97b28055ce9aa GIT binary patch literal 40 rcmb1OfPlsI-b$R;k1WqGX}sYm#hX-=n3<>NT9%quVr6vd1m{-(NT9%quVr8T>+qDz`*c=RA literal 0 HcmV?d00001 diff --git a/runs/Feb10_19-07-46_Austins-MacBook-Air.local/events.out.tfevents.1644548866.Austins-MacBook-Air.local.59151.0 b/runs/Feb10_19-07-46_Austins-MacBook-Air.local/events.out.tfevents.1644548866.Austins-MacBook-Air.local.59151.0 new file mode 100644 index 0000000000000000000000000000000000000000..a0c2b92d6e18dd8f4fd425e1fa7c1d98e88d62a1 GIT binary patch literal 40 rcmb1OfPlsI-b$R0tQroKG~RHO;!P?_%*@ksElbTSu`;UvAl3x{+X@W% literal 0 HcmV?d00001 diff --git a/runs/Feb10_19-08-12_Austins-MacBook-Air.local/events.out.tfevents.1644548892.Austins-MacBook-Air.local.59180.0 b/runs/Feb10_19-08-12_Austins-MacBook-Air.local/events.out.tfevents.1644548892.Austins-MacBook-Air.local.59180.0 new file mode 100644 index 0000000000000000000000000000000000000000..9fd5de6086d59272e3836f31ab99cbf30a3b95e8 GIT binary patch literal 40 rcmb1OfPlsI-b$RyizHqsG~RHO;!P?_%*@ksElbTSu`*h7_v%*w%#973 literal 0 HcmV?d00001