-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_performance.py
More file actions
102 lines (86 loc) · 3.49 KB
/
test_performance.py
File metadata and controls
102 lines (86 loc) · 3.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import argparse
import time
import math
import torch
from taildropout import TailDropout
# python -m cProfile -s cumtime test_performance.py --repeats 10
parser = argparse.ArgumentParser(description='')
parser.add_argument('--repeats', type=int, default=20000, metavar='N')
parser.add_argument('--n-features', type=int, default=512, metavar='N')
parser.add_argument('--batch-size', type=int, default=1024, metavar='N')
parser.add_argument('--no-cuda', action='store_true',
default=False, help='disables CUDA training')
parser.add_argument('--time-limit', type=int, default=None,
help='Maximum allowed total time in seconds')
args = parser.parse_args()
print(f'args.no_cuda: {args.no_cuda}')
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(f'GPU: { args.cuda}')
print(f'torch.__version__: {torch.__version__}')
print(args)
def time_since(since):
s = time.time() - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def dropout_runner(dropout,
requires_grad = False,
eval_mode = False,
backward = False):
device = 'cuda' if args.cuda else 'cpu'
y = torch.ones(args.batch_size,
args.n_features,
requires_grad=requires_grad,
device = device)
if eval_mode:
dropout.eval()
if requires_grad and backward:
optimizer = torch.optim.SGD((y,),lr=0.1)
# Work
start = time.time()
for _ in range(args.repeats):
z = dropout(y)
if requires_grad and backward:
loss = z.sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
return time_since(start),time.time() - start
total_start = time.time()
print(f"{'Eval Mode':<12} "
f"{'Requires Grad':<15} "
f"{'Backward':<10} "
f"{'Compile':<10} "
f"{'Timing':<20} "
f"{'Total (s)':<10} "
f"{'Layer Type'}")
for eval_mode in [False, True]:
for requires_grad in [True, False]:
for backward in [True, False]:
for compile in [False, True]:
if backward and not requires_grad:
break
for _ in range(2):
for dropout in [TailDropout()]: # [TailDropout(), nn.Dropout()]
if compile:
dropout.compile()
timing, secs = dropout_runner(
dropout,
requires_grad=requires_grad,
eval_mode=eval_mode,
backward=backward
)
print(f"{str(eval_mode):<12} "
f"{str(requires_grad):<15} "
f"{str(backward):<10} "
f"{str(compile):<10} "
f"{timing:<20} {f'({secs:.2f})':<10} "
f"{dropout.__ne__}")
if args.time_limit is not None:
secs_elapsed = round(time.time() - total_start)
if secs_elapsed >= args.time_limit:
raise TimeoutError(
f"Time limit exceeded: {secs_elapsed}s > {args.time_limit}s"
)
print("-" * 80)
print(f"Total time: {time_since(total_start)} ({round(time.time() - total_start)}s)")