Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
62 commits
Select commit Hold shift + click to select a range
2e6ac7b
Merge branch 'last-working' of github.com:hw16471/ActivityTrackingWit…
harrywaugh Mar 19, 2019
24af057
Added feature to use line acc
harrywaugh Mar 20, 2019
887fdb9
Checkpoint
harrywaugh Apr 21, 2019
739d04a
Checkpoint
harrywaugh Apr 21, 2019
84643a7
Checkpoint
harrywaugh Apr 21, 2019
27823fd
Checkpoint
harrywaugh Apr 21, 2019
96cb00c
:q
harrywaugh Apr 21, 2019
acbd5e1
Ported code to python file
harrywaugh Apr 21, 2019
a4f9d33
Pretty output
harrywaugh Apr 21, 2019
0b432b1
Pretty output
harrywaugh Apr 21, 2019
5506858
Pretty output
harrywaugh Apr 21, 2019
3df28af
Pretty output
harrywaugh Apr 21, 2019
6b2b795
Pretty output
harrywaugh Apr 21, 2019
0b264ba
Pretty output
harrywaugh Apr 21, 2019
d1413c5
Added ground truth for new activities
harrywaugh Apr 21, 2019
62267c8
Added ground truth for new activities
harrywaugh Apr 21, 2019
43def73
Added ground truth for new activities
harrywaugh Apr 21, 2019
6244e65
Added ground truth for new activities
harrywaugh Apr 21, 2019
f9698fe
Added ground truth for new activities
harrywaugh Apr 21, 2019
1dee38d
Added ground truth for new activities
harrywaugh Apr 21, 2019
91ceaa4
Added ground truth for new activities
harrywaugh Apr 21, 2019
ebab0f4
Added ground truth for new activities
harrywaugh Apr 21, 2019
174d41f
Added ground truth for new activities
harrywaugh Apr 21, 2019
745ac55
Added ground truth for new activities
harrywaugh Apr 21, 2019
5d33ecd
Added ground truth for new activities
harrywaugh Apr 21, 2019
61590c8
Added ground truth for new activities
harrywaugh Apr 21, 2019
7f31234
Added ground truth for new activities
harrywaugh Apr 21, 2019
d9d6476
Added ground truth for new activities
harrywaugh Apr 21, 2019
7e99c3a
Added ground truth for new activities
harrywaugh Apr 21, 2019
7188c0e
seqlen 170 offset 10
harrywaugh Apr 21, 2019
2558be3
seqlen 170 offset 10
harrywaugh Apr 21, 2019
9333dcb
Try much longer sequences
harrywaugh Apr 21, 2019
4672b94
Try much longer sequences
harrywaugh Apr 21, 2019
65273b6
Try much longer sequences
harrywaugh Apr 21, 2019
1a22b47
Try much longer sequences
harrywaugh Apr 21, 2019
3a15513
Another result stored
harrywaugh Apr 22, 2019
d835a6b
Next , unscaled data
harrywaugh Apr 22, 2019
afc21d4
Merge branch 'last-working' of github.com:hw16471/ActivityTrackingWit…
harrywaugh Apr 22, 2019
e6a6965
Next 5 secs of data
harrywaugh Apr 22, 2019
4d0ba42
Almost all sequence length data collected
harrywaugh Apr 24, 2019
be79669
Added some cycling data
harrywaugh Apr 24, 2019
54f9daa
Added all of ground truth data for new activities
harrywaugh Apr 24, 2019
31f0860
Added some more routes
harrywaugh Apr 25, 2019
c696e0d
Colected more seq len results
harrywaugh Apr 25, 2019
782050d
Merged
harrywaugh Apr 25, 2019
fa4c735
Added more cycling data
harrywaugh Apr 25, 2019
438d1f0
Merge branch 'last-working' of github.com:hw16471/ActivityTrackingWit…
harrywaugh Apr 25, 2019
e945ce5
Added more data
harrywaugh Apr 26, 2019
fc60bf9
merge
harrywaugh Apr 26, 2019
ae3e885
Merge branch 'last-working' of github.com:hw16471/ActivityTrackingWit…
harrywaugh Apr 26, 2019
a06e7f3
Many results
harrywaugh Apr 30, 2019
802aa24
Added more data recordings
harrywaugh Apr 30, 2019
473942f
Segmented classes
harrywaugh May 1, 2019
bed0ca0
Training going well
harrywaugh May 2, 2019
7749387
Got report screenshots
harrywaugh May 2, 2019
9abbcfa
Added new waslking files
harrywaugh May 3, 2019
5ccf4fc
Fixed merge
harrywaugh May 3, 2019
f188cd7
Changed deep learning.py
harrywaugh May 3, 2019
bc70213
Merge branch 'last-working' of github.com:hw16471/ActivityTrackingWit…
harrywaugh May 3, 2019
9050a8c
Final version
harrywaugh May 9, 2019
c65da82
Added theseis
harrywaugh May 18, 2019
2a24a49
Added thesis
harrywaugh May 18, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 2 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
*.pynb filter=lfs diff=lfs merge=lfs -text
*.ipynb filter=lfs diff=lfs merge=lfs -text
Git LFS file not shown
386 changes: 3 additions & 383 deletions .ipynb_checkpoints/GRUSupportFns-checkpoint.ipynb
Original file line number Diff line number Diff line change
@@ -1,383 +1,3 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"%%capture\n",
"import numpy as np\n",
"import matplotlib\n",
"from matplotlib import pyplot as plt\n",
"from scipy import integrate\n",
"from scipy import interpolate\n",
"import math\n",
"import sys\n",
"from scipy import signal\n",
"import pandas as pd\n",
"import tensorflow as tf\n",
"from numba import cuda\n",
"import numba\n",
"import os\n",
"from sklearn.preprocessing import MinMaxScaler\n",
"from sklearn.utils import shuffle\n",
"from IPython.display import display, HTML\n",
"%run KalmanFilter.ipynb\n",
"import mpld3\n",
"mpld3.enable_notebook()\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# from tf.keras.models import Sequential # This does not work!\n",
"from tensorflow.python.keras.models import Sequential\n",
"from tensorflow.python.keras.layers import Input, Dense, GRU, Embedding\n",
"from tensorflow.python.keras.optimizers import RMSprop\n",
"from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau\n",
"from tensorflow.python.keras import backend as K \n",
"from IPython.display import clear_output\n",
"from tensorflow.python.keras.initializers import RandomUniform\n",
"# tf.enable_eager_execution()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"x_dim = 11\n",
"y_dim = 2\n",
"def create_input_and_output(data, just_acc=False):\n",
" ## Needed Data\n",
" gps = data.gps[:,1:3] \n",
" acc = data.acc_with_grav_ERC[:, 1:4]\n",
" mag = data.mag[:, 1:3]\n",
" gyro = data.gyro[:, 1:4]\n",
"\n",
" time_series = data.acc_with_grav_ERC[:, 0]\n",
" ground_truth = data.ground_truth.dis[:, 1:3]\n",
" delta_time = np.diff(time_series, axis=0)\n",
" delta_time = np.concatenate(([[0]], delta_time))\n",
" \n",
" \n",
" # Choose which data to include in input\n",
" if (just_acc):\n",
" input_data = np.concatenate((gps, acc, delta_time), axis=1)\n",
" else:\n",
" input_data = np.concatenate((gps, acc, gyro, mag, delta_time), axis=1) ## Feature Vector Length = 11\n",
" return input_data, ground_truth\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def load_datasets(files):\n",
" training_datasets = []\n",
" for file in files:\n",
" data = Data_Stream(file, load_truth=True, higher_freq=True)\n",
" input_data, ground_truth = create_input_and_output(data)\n",
" training_datasets.append([input_data, ground_truth])\n",
" return training_datasets\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[ 3000. 3000. 3000. 3000. 3000. 3000. 3000. 3000. 3000. 3000.\n",
" 3000.]\n",
" [-3000. -3000. -3000. -3000. -3000. -3000. -3000. -3000. -3000. -3000.\n",
" -3000.]]\n"
]
}
],
"source": [
"gps_bound = 3000.0\n",
"acc_bound = 3000.0 #Actual bound (30)\n",
"gyro_bound = 3000.0 #Actual bound (2)\n",
"mag_bound = 3000.0 #Actual bound (30)\n",
"dt_bound = 3000.0 #Actual bound (0.1)\n",
"\n",
"custom_scale_matrix = np.asmatrix([gps_bound, gps_bound,\n",
" acc_bound, acc_bound, acc_bound, \n",
" gyro_bound, gyro_bound, gyro_bound,\n",
" mag_bound, mag_bound, dt_bound])\n",
"custom_scale_matrix = np.concatenate((custom_scale_matrix, -custom_scale_matrix))\n",
"\n",
"print(custom_scale_matrix)\n",
"x_scaler = MinMaxScaler()\n",
"x_scaler = x_scaler.fit(custom_scale_matrix)\n",
"y_scaler = MinMaxScaler()\n",
"y_scaler = y_scaler.fit(custom_scale_matrix[:, 0:2])\n",
"\n",
"def scale_dataset(training_dataset, test_dataset):\n",
" scaled_training_dataset = []\n",
" scaled_test_dataset = []\n",
" \n",
" for activity in training_dataset:\n",
" scaled_training_dataset.append([x_scaler.transform(activity[0]), \n",
" y_scaler.transform(activity[1])])\n",
" for activity in test_dataset:\n",
" scaled_test_dataset.append([x_scaler.transform(activity[0]), \n",
" y_scaler.transform(activity[1])])\n",
" \n",
" return scaled_training_dataset, scaled_test_dataset\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def get_seqs(sequence_length, dataset, offset):\n",
" \n",
" x_seqs = []\n",
" y_seqs = []\n",
" for activity in dataset:\n",
" ##Create all sequences\n",
" for i in range(0, len(activity[0]) - sequence_length, offset):\n",
" x_seqs.append(activity[0][i:i+sequence_length])\n",
" y_seqs.append(activity[1][i:i+sequence_length])\n",
" \n",
" x_seqs = np.asarray(x_seqs)\n",
" y_seqs = np.asarray(y_seqs)\n",
" return x_seqs, y_seqs\n",
"\n",
"def batch_generator(batch_size, x_seqs, y_seqs): \n",
" \n",
" x_seqs, y_seqs = shuffle(x_seqs, y_seqs)\n",
" #Print number of batches required to pass all sequences in an epoch\n",
" print(\"Batches per Epoch: \", int(len(x_seqs)/batch_size + 1))\n",
" while True:\n",
" \n",
" #For each batch in an epoch, given n sequences that are in shuffled order\n",
" for i in range(int(len(x_seqs)/batch_size + 1)):\n",
" i1 = i+1\n",
" yield (x_seqs[i*batch_size:i1*batch_size], y_seqs[i*batch_size:i1*batch_size])\n",
" #Shuffle sequences between epochs\n",
" x_seqs, y_seqs = shuffle(x_seqs, y_seqs)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def custom_loss(y_true, y_pred):\n",
" \n",
" y_true_slice = y_true[:, warmup_steps:, :]\n",
" y_pred_slice = y_pred[:, warmup_steps:, :]\n",
" \n",
" \n",
"# ts = tf.reshape(y_true_slice, (-1, 2))\n",
" \n",
"# ps = tf.reshape(y_pred_slice, (-1, 2))\n",
" \n",
"# def get_min_dist(pi):\n",
"# # print(pi.get_shape(), ts.get_shape())\n",
"# eu_dists = tf.norm(ts-pi, ord='euclidean', axis=1)\n",
"# min_dist = tf.reduce_min(eu_dists)\n",
"# return min_dist\n",
"\n",
"\n",
"# min_dists = tf.map_fn(get_min_dist, ps, dtype=tf.float32)\n",
"# print(\"Minimum distances to Ground Truth Points Shape\", min_dists.get_shape())\n",
"# mean_dist = tf.reduce_mean(min_dists)\n",
"# print(\"Mean Mimimum Distance Shape\", mean_dist.get_shape())\n",
" \n",
"# # min_dists_tot = 0.0\n",
"# # print(ps.shape)\n",
"# # for pi in ps:\n",
"# # min_t = sys.float_info.max\n",
"# # for ti in ts:\n",
"# # t = tf.norm(ti-pi, ord='euclidean')\n",
"# # if(t < min_t):\n",
"# # min_t = t\n",
"# # min_dists_tot += min_t\n",
" \n",
"# # mean_dist = min_dists_tot/len(ps)\n",
" \n",
" eu_dists = tf.norm(y_true_slice-y_pred_slice, ord='euclidean')\n",
" loss_mean = tf.reduce_mean(eu_dists)\n",
" \n",
" return loss_mean"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"class PlotLosses(tf.keras.callbacks.Callback):\n",
" def on_train_begin(self, logs={}):\n",
" self.i = 0\n",
" self.x = []\n",
" self.losses = []\n",
" self.val_losses = []\n",
" \n",
" \n",
" self.logs = []\n",
"\n",
" def on_epoch_end(self, epoch, logs={}):\n",
" \n",
" self.logs.append(logs)\n",
" self.x.append(self.i)\n",
" self.losses.append(logs.get('loss'))\n",
" self.val_losses.append(logs.get('val_loss'))\n",
" self.i += 1\n",
" \n",
" clear_output(wait=True)\n",
" plt.figure(figsize=(9, 8))\n",
" plt.plot(self.x, self.losses, label=\"Training Loss\", c='r')\n",
" plt.plot(self.x, self.val_losses, label=\"Validation Loss\", c='b')\n",
" plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=2, frameon=False)\n",
" plt.xlabel('Epochs')\n",
" plt.ylabel('Loss')\n",
" plt.ioff()\n",
" plt.show();"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def plot_dataset(dataset, seq_len=100):\n",
" \n",
" for activity in dataset:\n",
" input_data = activity[0]\n",
" ground_truth = activity[1]\n",
" orig_gps = activity[0][:, 0:2]\n",
" \n",
" \n",
" #Pad input\n",
" padding = np.zeros((seq_len, input_data.shape[1]))\n",
" input_data = np.concatenate((padding, input_data))\n",
"\n",
" ##Scale down the trial input, get predicted output\n",
" input_data = x_scaler.transform(input_data)\n",
" input_data = np.expand_dims(input_data, axis=0)\n",
" predicted_output = model.predict(input_data)\n",
"\n",
" \n",
" ## Remove data upto the start point\n",
" ground_truth = ground_truth[seq_len:]\n",
" orig_gps = orig_gps[seq_len:]\n",
" predicted_output = y_scaler.inverse_transform(predicted_output[0])[seq_len:]\n",
"\n",
" ##Print Graphs of X against Y\n",
" plt.figure(figsize=(8,5))\n",
" plt.plot(ground_truth[:, 0], ground_truth[:, 1], label='ground truth', color = 'g')\n",
" plt.plot(predicted_output[:, 0], predicted_output[:, 1], label='seen training data', color = 'b')\n",
" plt.plot(orig_gps[:, 1], orig_gps[:, 2], label='original gps', color = 'r')\n",
" plt.legend()\n",
" plt.xlabel('Position X (Metres)')\n",
" plt.ylabel('Position Y (Metres)')\n",
" plt.show()\n",
"\n",
"# print(\"Accuracy of GPS: \", measure_accuracy(ground_truth, orig_gps[:, 1:3]))\n",
" print(\"Accuracy of RNN: \", measure_accuracy(ground_truth, predicted_output))\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Print Data with Pandas"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# if(print_outputs):\n",
"\n",
"# if(False):\n",
"# training_frame = pd.DataFrame(np.concatenate((training_data, training_labels), axis=1), \n",
"# columns=['GPS X', 'GPS Y', 'Acc X', 'Acc Y', \n",
"# 'Delta Time', 'Ground Truth X', 'Ground_Truth Y'])\n",
"# else:\n",
"# training_frame = pd.DataFrame(np.concatenate((training_data, training_labels), axis=1), \n",
"# columns=['GPS X', 'GPS Y', 'Acc X', 'Acc Y',\n",
"# # 'Vel X', 'Vel Y',\n",
"# # 'Mag X', 'Mag Y',\n",
"# # 'Gyro X', 'Gyro Y', 'Gyro Z',\n",
"# 'Delta Time', 'Ground Truth X', 'Ground_Truth Y'])\n",
"# print(\"Training Data\")\n",
"# display(training_frame.head(5))\n",
"# training_frame.plot(figsize=(20,10))"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"# if(print_outputs):\n",
"\n",
"# if(False):\n",
"# test_frame = pd.DataFrame(np.concatenate((test_data, test_labels), axis=1), \n",
"# columns=['GPS X', 'GPS Y', 'Acc X', 'Acc Y', \n",
"# 'Delta Time', 'Ground Truth X', 'Ground_Truth Y'])\n",
"# else:\n",
"# test_frame = pd.DataFrame(np.concatenate((test_data, test_labels), axis=1), \n",
"# columns=['GPS X', 'GPS Y', 'Acc X', 'Acc Y',\n",
"# # 'Vel X', 'Vel Y',\n",
"# # 'Mag X', 'Mag Y',\n",
"# # 'Gyro X', 'Gyro Y', 'Gyro Z',\n",
"# 'Delta Time', 'Ground Truth X', 'Ground_Truth Y'])\n",
"# print(\"Testing Data\")\n",
"# display(test_frame.head(5))\n",
"# # test_frame.plot(figsize=(20,10))\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
version https://git-lfs.github.com/spec/v1
oid sha256:ab8e59c48836329997ea38210ad6e0a9b6423eb2ac1449a349b8552706bf52d2
size 14569
Loading