Skip to content

Commit d4c2768

Browse files
authored
Update tests and types (#28)
1 parent 7e44700 commit d4c2768

10 files changed

Lines changed: 269 additions & 25 deletions

src/models.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -294,10 +294,10 @@ function ((;)::CNO)(x, params, state)
294294
masks_down = state.masks_down
295295
masks_up = state.masks_up
296296
masks_bottlenecks = state.masks_bottlenecks
297-
up_ch_ranges = Array(state.up_ch_ranges)
298-
down_ch_ranges = Array(state.down_ch_ranges)
299-
bottleneck_ranges = Array(state.bottleneck_ranges)
300-
reversed_bottleneck_ranges = Array(state.reversed_bottleneck_ranges)
297+
up_ch_ranges = Array(state.up_ch_ranges) |> Lux.cpu_device()
298+
down_ch_ranges = Array(state.down_ch_ranges) |> Lux.cpu_device()
299+
bottleneck_ranges = Array(state.bottleneck_ranges) |> Lux.cpu_device()
300+
reversed_bottleneck_ranges = Array(state.reversed_bottleneck_ranges) |> Lux.cpu_device()
301301

302302
# First thing to do is to crop the center of x along every dimension
303303
s0 = size(x)

test/test-activation.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ using CairoMakie: Figure, Axis, heatmap, save, heatmap!, GridLayout
1212
using Images: load
1313
using CUDA
1414

15-
CUDA.allowscalar(false)
1615

1716
# Setup initial image and parameters
1817
N0 = 512
@@ -94,6 +93,8 @@ if !CUDA.functional()
9493
@test "CUDA not functional, skipping GPU tests"
9594
return
9695
end
96+
CUDA.allowscalar(false)
97+
9798
# Prepare for GPU tests
9899
u = CuArray(u)
99100
actlayer_identity = create_CNOactivation(T, D, N, cutoff, activation_function = identity)

test/test-convolution.jl

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,25 +15,19 @@ using FFTW: fft, ifft
1515
using ChainRulesCore
1616

1717
rng = Random.Xoshiro(123)
18-
CUDA.allowscalar(false)
1918

2019
function reference_convolve(x, k)
21-
if k isa SubArray && parent(k) isa CuArray
22-
k = CuArray(collect(k))
23-
end
24-
if x isa SubArray && parent(x) isa CuArray
25-
x = CuArray(collect(x))
26-
end
2720
fft_x = fft(x, (1, 2))
2821
fft_k = fft(k, (2, 3))
22+
# Can not use for loops if you want it to be differentiable
2923
# ffty = zeros(ComplexF32, size(x, 1), size(x, 2), size(k, 1), size(x, 4))
3024
# for c = 1:size(k, 1)
3125
# for ci = 1:size(x, 3)
3226
# ffty[:,:,c,:] .+= fft_x[:, :, ci, :] .* fft_k[c, :, :]
3327
# end
3428
# end
3529

36-
# Can not use for loops if you want it to be differentiable
30+
# No-loops alternative
3731
ffty = [
3832
reduce(+, [fft_x[:, :, ci, :] .* fft_k[c, :, :] for ci = 1:size(x, 3)]) for
3933
c = 1:size(k, 1)
@@ -112,6 +106,8 @@ if !CUDA.functional()
112106
@test "CUDA not functional, skipping GPU tests"
113107
return
114108
end
109+
CUDA.allowscalar(false)
110+
115111
@testset "Convolution (GPU)" begin
116112
@testset "Forward" begin
117113
x = CUDA.ones(Float32, 16, 16, 2, 1)
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,8 @@ if !CUDA.functional()
168168
@test "CUDA not functional, skipping GPU tests"
169169
return
170170
end
171+
CUDA.allowscalar(false)
172+
171173
@testset "CoupledNODE integration (GPU)" begin
172174
# Create the model
173175
closure, θ_start, st = cno(

test/test-couplednode_prior.jl

Lines changed: 248 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,248 @@
1+
using Test
2+
using Adapt
3+
using Lux
4+
using LuxCUDA
5+
using JLD2
6+
using ConvolutionalNeuralOperators: create_CNOdownsampler, create_CNO
7+
using ComponentArrays: ComponentArray
8+
using Optimisers: Adam, ClipGrad, OptimiserChain
9+
using Optimization
10+
using Random
11+
using Zygote: Zygote
12+
using CUDA
13+
using CoupledNODE
14+
using IncompressibleNavierStokes
15+
using NeuralClosure
16+
using OrdinaryDiffEqTsit5
17+
18+
rng = Random.Xoshiro(123)
19+
T = Float32
20+
N = 16
21+
nles = 16
22+
D = 2
23+
ch_ = [2, 2]
24+
act = [tanh_fast, identity]
25+
df = [2, 2]
26+
k_rad = [3, 3]
27+
bd = [2, 2, 2]
28+
cutoff = 10
29+
batch = 4
30+
31+
@testset "CoupledNODE integration (CPU)" begin
32+
# Create the model
33+
closure, θ_start, st = cno(
34+
T = T,
35+
N = N,
36+
D = D,
37+
cutoff = cutoff,
38+
ch_sizes = ch_,
39+
activations = act,
40+
down_factors = df,
41+
k_radii = k_rad,
42+
bottleneck_depths = bd,
43+
rng = rng,
44+
use_cuda = false,
45+
)
46+
47+
# Define input tensor and pass through model
48+
input_tensor = rand(T, N, N, D, batch)
49+
output = Lux.apply(closure, input_tensor, θ_start, st)[1]
50+
@test size(output) == size(input_tensor)
51+
52+
# Read conf
53+
NS = Base.get_extension(CoupledNODE, :NavierStokes)
54+
conf = NS.read_config("./config.yaml")
55+
conf["params"]["backend"] = CPU()
56+
57+
# get params
58+
params = NS.load_params(conf)
59+
device(x) = x
60+
61+
# Get the setup in the format expected by the CoupledNODE
62+
function getsetup(; params, nles)
63+
Setup(;
64+
x = ntuple-> range(params.lims..., nles + 1), params.D),
65+
params.Re,
66+
params.backend,
67+
params.bodyforce,
68+
params.issteadybodyforce,
69+
)
70+
end
71+
setup = getsetup(; params, nles)
72+
psolver = default_psolver(setup)
73+
setup = []
74+
for nl in nles
75+
x = ntuple-> LinRange(T(0.0), T(1.0), nl + 1), params.D)
76+
push!(setup, Setup(; x = x, Re = params.Re, params.backend))
77+
end
78+
79+
# Load data
80+
function namedtupleload(file)
81+
dict = load(file)
82+
k, v = keys(dict), values(dict)
83+
pairs = @. Symbol(k) => v
84+
(; pairs...)
85+
end
86+
data_train = []
87+
data_i = namedtupleload("data_train.jld2")
88+
push!(data_train, hcat(data_i))
89+
90+
# Create the io array
91+
NS = Base.get_extension(CoupledNODE, :NavierStokes)
92+
io_train = NS.create_io_arrays_priori(data_train, setup)
93+
94+
# Create the dataloader
95+
θ = device(copy(θ_start))
96+
dataloader_prior = NS.create_dataloader_prior(
97+
io_train[1];
98+
batchsize = 4,
99+
rng = Random.Xoshiro(24),
100+
device = device,
101+
)
102+
train_data_priori = dataloader_prior()
103+
104+
l0 = CoupledNODE.loss_priori_lux(closure, θ, st, train_data_priori)[1]
105+
@test isnan(l0) == false
106+
loss = CoupledNODE.loss_priori_lux
107+
108+
# Final integration test of the entire train interface
109+
l, trainstate = CoupledNODE.train(
110+
closure,
111+
θ,
112+
st,
113+
dataloader_prior,
114+
loss;
115+
nepochs = 20,
116+
alg = OptimiserChain(Adam(T(1.0e-3)), ClipGrad(0.1)),
117+
cpu = true,
118+
)
119+
@test isnan(l) == false
120+
@test l < l0
121+
@test trainstate.step == 20
122+
@test any(isnan, trainstate.parameters) == false
123+
124+
end
125+
126+
if !CUDA.functional()
127+
@test "CUDA not functional, skipping GPU tests"
128+
return
129+
end
130+
CUDA.allowscalar(false)
131+
132+
@testset "CoupledNODE integration (GPU)" begin
133+
# Create the model
134+
closure, θ_start, st = cno(
135+
T = T,
136+
N = N,
137+
D = D,
138+
cutoff = cutoff,
139+
ch_sizes = ch_,
140+
activations = act,
141+
down_factors = df,
142+
k_radii = k_rad,
143+
bottleneck_depths = bd,
144+
rng = rng,
145+
use_cuda = true,
146+
)
147+
148+
# Define input tensor and pass through model
149+
input_tensor = CUDA.rand(T, N, N, D, batch)
150+
output = Lux.apply(closure, input_tensor, θ_start, st)[1]
151+
@test size(output) == size(input_tensor)
152+
@test isa(output, CuArray)
153+
154+
# Read conf
155+
NS = Base.get_extension(CoupledNODE, :NavierStokes)
156+
conf = NS.read_config("./config.yaml")
157+
conf["params"]["backend"] = CUDABackend()
158+
159+
# get params
160+
params = NS.load_params(conf)
161+
device(x) = adapt(params.backend, x)
162+
163+
# Get the setup in the format expected by the CoupledNODE
164+
function getsetup(; params, nles)
165+
Setup(;
166+
x = ntuple-> range(params.lims..., nles + 1), params.D),
167+
params.Re,
168+
params.backend,
169+
params.bodyforce,
170+
params.issteadybodyforce,
171+
)
172+
end
173+
setup = getsetup(; params, nles)
174+
psolver = default_psolver(setup)
175+
setup = []
176+
for nl in nles
177+
x = ntuple-> LinRange(T(0.0), T(1.0), nl + 1), params.D)
178+
push!(setup, Setup(; x = x, Re = params.Re, params.backend))
179+
end
180+
181+
# Load data
182+
function namedtupleload(file)
183+
dict = load(file)
184+
k, v = keys(dict), values(dict)
185+
pairs = @. Symbol(k) => v
186+
(; pairs...)
187+
end
188+
data_train = []
189+
data_i = namedtupleload("data_train.jld2")
190+
push!(data_train, hcat(data_i))
191+
192+
# Create the io array
193+
NS = Base.get_extension(CoupledNODE, :NavierStokes)
194+
io_train = NS.create_io_arrays_priori(data_train, setup)
195+
196+
# Create the dataloader
197+
θ = device(copy(θ_start))
198+
dataloader_prior = NS.create_dataloader_prior(
199+
io_train[1];
200+
batchsize = 4,
201+
rng = Random.Xoshiro(24),
202+
device = device,
203+
)
204+
train_data_priori = dataloader_prior()
205+
@test isa(train_data_priori[1], CuArray)
206+
@test isa(train_data_priori[2], CuArray)
207+
208+
l0 = CoupledNODE.loss_priori_lux(closure, θ, st, train_data_priori)[1]
209+
@test isnan(l0) == false
210+
loss = CoupledNODE.loss_priori_lux
211+
212+
function loss_pb(model, ps, st, (x, y), device = identity)
213+
y_pred, st_ = Lux.apply(model, x, ps, st)[1:2]
214+
return sum(abs2, y_pred - y) / sum(abs2, y)
215+
end
216+
y, back = Zygote.pullback(loss_pb, closure, θ, st, train_data_priori)
217+
@test y == l0
218+
y_bar = 1
219+
_, θ_bar, _, _ = back(y_bar)
220+
@test size(θ_bar) == size(θ)
221+
@test sum(θ_bar) !== 0.0
222+
223+
224+
tstate = Lux.Training.TrainState(closure, θ, st, Adam(T(1.0e-3))) |> Lux.gpu_device()
225+
data = dataloader_prior()
226+
_, l, _, tstate =
227+
Lux.Training.single_train_step!(Optimization.AutoZygote(), loss, data, tstate) |>
228+
Lux.gpu_device()
229+
@test isnan(l) == false
230+
@test l < 2 * l0
231+
@test tstate.step == 1
232+
233+
# Final integration test of the entire train interface
234+
l, trainstate = CoupledNODE.train(
235+
closure,
236+
θ,
237+
st,
238+
dataloader_prior,
239+
loss;
240+
nepochs = 20,
241+
alg = Adam(T(1.0e-3)),
242+
)
243+
@test isnan(l) == false
244+
@test l < 2 * l0
245+
@test trainstate.step == 20
246+
@test any(isnan, trainstate.parameters) == false
247+
248+
end

test/test-fullmodel.jl

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ cutoff = 10
4040

4141

4242
@testset "Full model (CPU)" begin
43-
return
4443

4544
@testset "Full CNO model" begin
4645
model, θ, st = cno(
@@ -89,6 +88,7 @@ if !CUDA.functional()
8988
@test "CUDA not functional, skipping GPU tests"
9089
return
9190
end
91+
CUDA.allowscalar(false)
9292
@testset "Full model (GPU)" begin
9393

9494
@testset "Full CNO model" begin
@@ -105,15 +105,11 @@ end
105105
rng = rng,
106106
use_cuda = true,
107107
)
108-
@info typeof(model)
109-
@info typeof(θ)
110-
@info typeof(st)
111108

112109
u_gpu = CuArray(u)
113-
y, zz = model(u_gpu, θ, st)
114-
@info typeof(y)
115-
@info size(y)
116-
# @test size(model(u_gpu, θ, st)[1:1]) == size(u)
110+
y, _ = model(u_gpu, θ, st)
111+
@test size(y) == size(u)
112+
@test isa(y, CuArray)
117113

118114

119115
return

test/test-maskedconvolution.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ using FFTW: fft, ifft
1616
using ChainRulesCore
1717

1818
rng = Random.Xoshiro(123)
19-
CUDA.allowscalar(false)
2019

2120

2221
@testset "Masked-Convolution (CPU)" begin
@@ -99,6 +98,7 @@ if !CUDA.functional()
9998
@test "CUDA not functional, skipping GPU tests"
10099
return
101100
end
101+
CUDA.allowscalar(false)
102102

103103
@testset "Masked-Convolution (GPU)" begin
104104
@testset "Forward" begin

test/test-residualblock.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ using Zygote: Zygote
77
using ComponentArrays: ComponentArray
88
using CUDA
99

10-
CUDA.allowscalar(false)
1110

1211
x = rand(Float32, 16, 16, 2, 1)
1312
k_bottlenecks = rand(Float32, 100, 16, 16)
@@ -52,6 +51,8 @@ if !CUDA.functional()
5251
@test "CUDA not functional, skipping GPU tests"
5352
return
5453
end
54+
CUDA.allowscalar(false)
55+
5556
# Prepare for GPU tests
5657
x = CUDA.rand(Float32, 16, 16, 2, 1)
5758
k_bottlenecks = CUDA.rand(Float32, 100, 16, 16)

0 commit comments

Comments
 (0)