-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_experiments_2layer_models.sh
More file actions
72 lines (59 loc) · 4.13 KB
/
run_experiments_2layer_models.sh
File metadata and controls
72 lines (59 loc) · 4.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#!/bin/bash
ROT="--n_layers 2 --n_hiddens 200 --data_path data/ --log_every 100 --samples_per_task 200 --dataset mnist_rotations --log_dir logs/"
FASHION="--n_layers 2 --n_hiddens 200 --data_path data/ --log_every 100 --samples_per_task 200 --dataset fashion_mnist --log_dir logs/"
# FASHION MNIST runs
for lr in 0.0001 0.00025 0.0005 0.001 0.0025 0.005 0.01 0.025 0.05 0.1 0.25 0.5
do
# baseline
for i in {0..9}
do
nohup python3 main.py $FASHION --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.1 --opt_lr 0.0005 --remove_bias --calc_test_accuracy --normalize_hidden --small_test
done
# s-only
for i in {0..9}
do
nohup python3 main.py $FASHION --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.1 --opt_lr 0.0005 --remove_bias --calc_test_accuracy --normalize_hidden --small_test --learn_inhibition_multiplier
done
# lr-only
for i in {0..9}
do
nohup python3 main.py $FASHION --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.1 --opt_lr 0.0005 --remove_bias --calc_test_accuracy --normalize_hidden --small_test --learn_lr --learn_layer_lr
done
# meta-s+lr
for i in {0..9}
do
nohup python3 main.py $FASHION --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.1 --opt_lr 0.0005 --remove_bias --calc_test_accuracy --normalize_hidden --small_test --learn_inhibition_multiplier --learn_lr --learn_layer_lr
done
done
# ROTATED MNIST runs
for lr in 0.0001 0.00025 0.0005 0.001 0.0025 0.005 0.01 0.025 0.05 0.1 0.25 0.5
do
# baseline
for i in {0..9}
do
python3 main.py $ROT --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.02 --opt_lr 0.0002 --remove_bias --calc_test_accuracy --normalize_hidden --small_test
done
# s-only
for i in {0..9}
do
nohup python3 main.py $ROT --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.02 --opt_lr 0.0002 --remove_bias --calc_test_accuracy --normalize_hidden --small_test --learn_inhibition_multiplier
done
# lr-only
for i in {0..9}
do
nohup python3 main.py $ROT --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.02 --opt_lr 0.0002 --remove_bias --calc_test_accuracy --normalize_hidden --small_test --learn_lr --learn_layer_lr
done
# meta-s+lr
for i in {0..9}
do
python3 main.py $ROT --model lamaml --seed ${i} --memories 200 --batch_size 5 --replay_batch_size 5 --n_epochs 1 --glances 1 --alpha_init $lr --use_old_task_memory \
--add_item_labels 0.0 --num_of_item_labels 0 --item_option per_item --multiplier_lr 0.02 --opt_lr 0.0002 --remove_bias --calc_test_accuracy --normalize_hidden --small_test --learn_lr --learn_layer_lr --learn_inhibition_multiplier
done
done