-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAORA.py
More file actions
executable file
·182 lines (166 loc) · 8.33 KB
/
AORA.py
File metadata and controls
executable file
·182 lines (166 loc) · 8.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
"""
###
# python adaptation of the MATLAB code by M. Bollhöfer, TU Braunschweig
###
This file implements the AORA procedure for moment matching reduced order modeling.
The method relies on a modified Gram-Schmidt procedure.
The AORA function takes as arguments the system matrices for the FEM system.
Optionally, the LU decomposition can be reused to improve performance.
The AORA function returns, among others, the projection matrix V whis is used
to reduce the order of the FE system of equations.
"""
import numpy as np
import scipy as sp
import scipy.linalg
from scipy.sparse import csc_matrix, linalg as sla
from scipy.sparse.linalg import spsolve
from assemble_matrix import assemble_matrix
from assemble_matrix_derivative import assemble_matrix_derivative
def AORA(M, D, K, B, C, s, Nr, LinSysFac = None):
"""
The complete AORA procedure. Takes as input arguments the FE system matrices and the desired
expansion frequency and basis size.
"""
SOAR = 1
if SOAR == False:
FIRST_ORDER = 1
else:
FIRST_ORDER = 0
info = []
# Normalization coefficient hPi(s_i) = \prod_{j} || r^{(j-1)}(s_i) ||
try:
len_s = np.shape(s)[0]
except:
if isinstance(s,float) or isinstance(s,int):
len_s = 1
else:
print("check len of s!")
hPi = np.ones((len_s,1))
# Number of given expansion points
NumExpPts = len_s
# Residual vector for every expansion point in each iteration step
R = np.zeros((np.shape(M)[0],NumExpPts),dtype=np.complex_)
# Projection matrix for reduced order model
V = np.zeros((np.shape(M)[0],Nr),dtype=np.complex_)
# Initialization of Krylov subspaces for each expansion point
if (LinSysFac == None): #if these factors are already given, a performance benefit can be expected.
LinSysFac = []
for i in range(NumExpPts):
if (len(LinSysFac) != 0): # that's the reuse of coefficients (LU)
if (i > len(LinSysFac)):
A = assemble_matrix(s[i],K,D,M)
(Ps,Ls,Us) = sp.linalg.lu(A) # here: dense matrices. change for future work on larger systems
LinSysFac.append([Ps,Ls,Us])
else:
(Ps,Ls,Us,Qs) = (LinSysFac[i][0], LinSysFac[i][1], LinSysFac[i][2], LinSysFac[i][3])
else: # if the LU decomposition is not already given, it is computed here
A = assemble_matrix(s[i],K,D,M)
na = np.shape(A)[0]
A = csc_matrix(A)
lu = sla.splu(A)
Ls = lu.L.A
Us = lu.U.A
Ps = csc_matrix((np.ones(na), (lu.perm_r, np.arange(na))))
Qs = csc_matrix((np.ones(na), (np.arange(na), lu.perm_c)))
LinSysFac.append([Ps,Ls,Us,Qs])
R[:,i] = spsolve((Us@np.transpose(Qs)), spsolve(np.transpose(Ps)@Ls,B))
T = np.zeros((Nr,Nr),dtype=np.complex_)
if SOAR: # all given examples are of second order.
f = np.zeros((np.shape(M)[0],1))
elif FIRST_ORDER:
R2 = np.zeros((np.shape(M)[0],NumExpPts))
# Main iteration loop of AORA method
for j in range(Nr):
# Choice of expansion frequency with maximum output moment error
maxMomErr = np.abs(hPi[0,0]*C@R[:,0])
idxMomErr = 0
for i in range(1,NumExpPts):
if (np.abs(hPi[i]@C@R[:,i]) > maxMomErr):
maxMomErr = np.abs(hPi[i]@C@R[:,i])
idxMomErr = i
# Orthonormal vector for s[idxMomErr]
if FIRST_ORDER:
normRes = np.linalg.norm(np.array([R[:,idxMomErr],R2[:,idxMomErr]]).T)
else:
normRes = np.linalg.norm(np.array([R[:,idxMomErr]]).T)
V[:,j] = R[:,idxMomErr] / normRes
hPi[idxMomErr] = hPi[idxMomErr] * normRes
if SOAR and j>0:
T[j,j-1] = normRes
b = np.zeros((j,1),dtype=np.complex_)
b[0,:] = 1
vec = spsolve(T[1:j+1,0:j],b).reshape(j,1)
f = V[:,0:j] @ vec
elif FIRST_ORDER:
V2[:,j] = R2[:,idxMomErr] /normRes
# Update residual for next iteration
for i in range(NumExpPts):
if (i==idxMomErr):
(Ps,Ls,Us,Qs) = (LinSysFac[i][0], LinSysFac[i][1], LinSysFac[i][2], LinSysFac[i][3])
Ap = assemble_matrix_derivative(s[i],K,D,M)
if SOAR:
R[:,i] = spsolve( -1*(Us@np.transpose(Qs)), spsolve( np.transpose(Ps)@Ls, (np.atleast_2d(Ap@V[:,j]).T+1j*(M@f)) ) ).flatten()
elif FIRST_ORDER:
R[:,i] = spsolve( -1*(Us@np.transpose(Qs)), spsolve( np.transpose(Ps)@Ls, (Ap@V[:,j]+1j*(M@V2[:,j])) ) )
R2[:,i] = V[:,j]
else:
R[:,i] = spsolve( -1*(Us@np.transpose(Qs)), spsolve( np.transpose(Ps)@Ls, (Ap@V[:,j]) ) )
# Complete modified Gram-Schmidt procedure for the new moment. Modified Gram-Schmidt yields higher numerical stability
# compared to classical Gram-Schmidt for finite arithmetic.
for t in range(j+1):
if SOAR: # all given examples are of second order
# Orthogonal projection of R(:,i) onto V(:,t)
T[t,j] = np.transpose(V[:,t]).conj() @ R[:,i]
R[:,i] = R[:,i] - T[t,j] * V[:,t]
##############################################
# optional reorthogonalization:
# beta = np.transpose(V[:,t]).conj() @ R[:,i]
# R[:,i] = R[:,i] - beta * V[:,t]
# T[t,j] = T[t,j] + beta
# this can improve numerical stability. Mostly
# a single orthogonalization step suffices,
# yet twice is enough.
##############################################
elif FIRST_ORDER:
# Orthogonal projection of [R(:,i);R2(:,i)] onto [V(:,t);V2(:,t)]
alpha = np.transpose(V[:,t]).conj() @ R[:,i] + np.transpose(V2[:,t]).conj() @ R2[:,i]
R[:,i] = R[:,i] - alpha @ V[:,t]
R2[:,i] = R2[:,i] - alpha @ V2[:,t]
# optional reorthoginalization:
# alpha = np.transpose(V[:,t]).conj() @ R[:,i] + np.transpose(V2[:,t]).conj() @ R2[:,i]
# R[:,i] = R[:,i] - alpha @ V[:,t]
# R2[:,i] = R2[:,i] - alpha @ V2[:,t]
else:
# Orthogonal projection of R(:,i) onto V(:,t)
alpha = np.transpose(V[:,t]).conj() @ R[:,i]
R[:,i] = R[:,i] - alpha @ V[:,t]
# optional reorthogonalization:
# alpha = np.transpose(V[:,t]) @ R[:,i]
# R[:,i] = R[:,i] - alpha @ V[:,t]
else:
# one further step modified Gram-Schmidt procedure for the old moment
t=j
if FIRST_ORDER:
# Orthogonal projection of [R(:,i);R2(:,i)] onto [V(:,t);V2(:,t)]
alpha = np.transpose(V[:,t]) @ R[:,i] + np.transpose(V2[:,t]) @ R2[:,i]
R[:,i] = R[:,i] - alpha @ V[:,t]
R2[:,i] = R2[:,i] - alpha @ V2[:,t]
# optional reorthogonalization:
# alpha = np.transpose(V[:,t]) @ R[:,i] + np.transpose(V2[:,t]) @ R2[:,i]
# R[:,i] = R[:,i] - alpha @ V[:,t]
# R2[:,i] = R2[:,i] - alpha @ V2[:,t]
else:
# Orthogonal projection of R(:,i) onto V(:,t)
alpha = np.transpose(V[:,t]) @ R[:,i]
R[:,i] = R[:,i] - alpha @ V[:,t]
##############################################
# optional reorthogonalization:
# alpha = np.transpose(V[:,t]) @ R[:,i]
# R[:,i] = R[:,i] - alpha @ V[:,t]
##############################################
if FIRST_ORDER:
(V, RR) = sp.linalg.qr(V)
(URR,SRR,VRR) = sp.linalg.svd(RR)
rnk = np.linalg.matrix_rank(SRR)
V = V@URR[:,0:rnk]
return (V, info, LinSysFac, R, T) # V is the projection matrix and the most important output. LinSysFac are the LU coefficients, which can be saved for further use.