-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathalgorithm_utils.py
More file actions
56 lines (45 loc) · 1.36 KB
/
algorithm_utils.py
File metadata and controls
56 lines (45 loc) · 1.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import abc
import logging
import random
import numpy as np
import torch
from torch.autograd import Variable
class Algorithm(metaclass=abc.ABCMeta):
def __init__(self, module_name, name, seed, details=False):
self.logger = logging.getLogger(module_name)
self.name = name
self.seed = seed
self.details = details
self.prediction_details = {}
if self.seed is not None:
random.seed(seed)
np.random.seed(seed)
def __str__(self):
return self.name
@abc.abstractmethod
def fit(self, X):
"""
Train the algorithm on the given dataset
"""
@abc.abstractmethod
def predict(self, X):
"""
:return anomaly score
"""
class PyTorchUtils(metaclass=abc.ABCMeta):
def __init__(self, seed, gpu):
self.gpu = gpu
self.seed = seed
if self.seed is not None:
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
self.framework = 0
@property
def device(self):
return torch.device(f'cuda:{self.gpu}' if torch.cuda.is_available() and self.gpu is not None else 'cpu')
def to_var(self, t, **kwargs):
# ToDo: check whether cuda Variable.
t = t.to(self.device)
return Variable(t, **kwargs)
def to_device(self, model):
model.to(self.device)