-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinference_autograd.py
More file actions
45 lines (34 loc) · 1.37 KB
/
inference_autograd.py
File metadata and controls
45 lines (34 loc) · 1.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
import sys
from model import *
#from torch.profiler import profile, record_function, ProfilerActivity
# Directory
os.chdir(os.getcwd())
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def infer_eval_autograd(model_name, path, inputData):
print("Start 1000 Inferrence:", model_name+"@"+path)
model = eval(model_name)(num_classes=5).to(device)
# load model weights
weights_path = "./weights/" + path
assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
model.load_state_dict(torch.load(weights_path))
print("using {} device.".format(device))
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(1000):
model(inputData)
print(prof)
if __name__ == '__main__':
sys.argv.__delitem__(0);
cmd_list = sys.argv;
if sys.argv[0] == "-f" and len(sys.argv) > 1:
cmd_list = open(sys.argv[1], "r").read().splitlines()
print('Inference Scheduled: ', str(cmd_list))
inputData = torch.rand([1, 3, 227, 227]).cuda()
for modelName in cmd_list:
if "@" in modelName:
modelAndPath = modelName.split("@")
modelName = modelAndPath[0]
path = modelAndPath[1]
infer_eval_autograd(modelName, path, inputData)
else:
infer_eval_autograd(modelName, path, inputData)