-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathcollocation.py
More file actions
79 lines (66 loc) · 2.88 KB
/
collocation.py
File metadata and controls
79 lines (66 loc) · 2.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import torch
import lhsmdu
def uniform(n, n_i, n_b, _u_i, _u_b, xmin, xmax, tmin=0, tmax=1):
n_x = n # Number of data points in interior domain
n_t = n
# Interior of domain
X = torch.linspace(xmin, xmax, n_x+2, requires_grad=True)[1:-1]
T = torch.linspace(tmin, tmax, n_t+1, requires_grad=True)[1:]
x, t = torch.meshgrid((X, T))
x = x.reshape(-1, 1)
t = t.reshape(-1, 1)
# Initial
x_i = torch.linspace(xmin, xmax, n_i,requires_grad=True).reshape(-1, 1)[1:-1]
t_i = torch.zeros(x_i.shape, requires_grad=True)
u_i = _u_i(x_i)
# Boundary
x_b = torch.cat((xmin*torch.ones(n_b, 1, requires_grad=True),
xmax*torch.ones(n_b, 1, requires_grad=True)))
t_b = torch.cat((torch.linspace(tmin, tmax, n_b, requires_grad=True),
torch.linspace(tmin, tmax, n_b, requires_grad=True)), dim=0).reshape(-1, 1)
u_b = _u_b(t_b)
return x, t, x_i, t_i, u_i, x_b, t_b, u_b
def hypercube(n, n_i, n_b, _u_i, _u_b, xmin, xmax, tmin=0, tmax=1):
cube = lhsmdu.sample(2, n*n)
x = torch.from_numpy(cube[0, :]).reshape(-1, 1)*(xmax-xmin) + xmin
t = torch.from_numpy(cube[1, :]).reshape(-1, 1)*(tmax-tmin) + tmin
x = x.float()
t = t.float()
x.requires_grad = True
t.requires_grad = True
# initial
x_i = torch.from_numpy(lhsmdu.sample(1, n_i)[0, :]).reshape(-1, 1)*(xmax - xmin) + xmin
x_i.requires_grad=True
x_i = x_i.float()
t_i = torch.ones(x_i.shape, requires_grad=True)*tmin
u_i = _u_i(x_i)
# boundary
x_b = torch.cat((xmin*torch.ones(n_b, 1, requires_grad=True),
xmax*torch.ones(n_b, 1, requires_grad=True))).float()
t_b = torch.from_numpy(lhsmdu.sample(1, n_b)[0, :]).reshape(-1, 1)*(tmax-tmin) + tmin
t_b = torch.cat((t_b, t_b)).float()
t_b.requires_grad=True
u_b = _u_b(t_b)
return x, t, x_i, t_i, u_i, x_b, t_b, u_b
def random(n, n_i, n_b, _u_i, _u_b, xmin, xmax, tmin=0, tmax=1):
cube = lhsmdu.createRandomStandardUniformMatrix(2, n*n)
x = torch.from_numpy(cube[0, :]).reshape(-1, 1)*(xmax-xmin) + xmin
t = torch.from_numpy(cube[1, :]).reshape(-1, 1)*(tmax-tmin) + tmin
x = x.float()
t = t.float()
x.requires_grad = True
t.requires_grad = True
# initial
x_i = torch.from_numpy(lhsmdu.createRandomStandardUniformMatrix(1, n_i)[0, :]).reshape(-1, 1)*(xmax - xmin) + xmin
x_i.requires_grad=True
x_i = x_i.float()
t_i = torch.zeros(x_i.shape, requires_grad=True)
u_i = _u_i(x_i)
# boundary
x_b = torch.cat((xmin*torch.ones(n_b, 1, requires_grad=True),
xmax*torch.ones(n_b, 1, requires_grad=True))).float()
t_b = torch.from_numpy(lhsmdu.createRandomStandardUniformMatrix(1, n_b)[0, :]).reshape(-1, 1)*(tmax-tmin) + tmin
t_b = torch.cat((t_b, t_b)).float()
t_b.requires_grad=True
u_b = _u_b(t_b)
return x, t, x_i, t_i, u_i, x_b, t_b, u_b