-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathgpu_neuralnetwork.jl
More file actions
2056 lines (1713 loc) · 75.5 KB
/
gpu_neuralnetwork.jl
File metadata and controls
2056 lines (1713 loc) · 75.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
### A Pluto.jl notebook ###
# v0.19.8
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local iv = try Base.loaded_modules[Base.PkgId(Base.UUID("6e696c72-6542-2067-7265-42206c756150"), "AbstractPlutoDingetjes")].Bonds.initial_value catch; b -> missing; end
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : iv(el)
el
end
end
# ╔═╡ 423cb435-ab09-426e-a29a-0b894fc767ba
begin
using Downloads
using CSV, DataFrames
using MLBase, MLDataUtils
#using StatsBase , GLM
using CUDA
using Flux
using Flux: sigmoid, binarycrossentropy, logitbinarycrossentropy
using Plots#, LaTeXStrings
using ValueHistories
using PlutoUI, HypertextLiteral, ProgressLogging
using BenchmarkTools
end
# ╔═╡ 1d8fe699-810b-44ba-a9cd-80816338e08c
md"""
# Lab 18: Accelerating Neural Networks with GPU
#### [Penn State Astroinformatics Summer School 2022](https://sites.psu.edu/astrostatistics/astroinfo-su22-program/)
#### [Eric Ford](https://www.personal.psu.edu/ebf11)
"""
# ╔═╡ 2671099c-667e-4c1b-9e5d-f41bd5752938
md"""
## Overview
In this lab, you'll revisit the use of a neural network as a classifier to identify high-${z}$ quasars based on a dataset of SDSS & Spitzer photometry. This time the goal is to appreciate the potential benefits of using a GPU for training neural network. As in the previous lab, you'll be explore the effects of adding hidden layers and changing the number of nodes in the hidden layer(s) on the time required to train the network (both in wall clock time and the ratio of time required using a CPU and GPU).
You might find that trained a better performing neural network, thanks to the improved performance with a GPU, but that's not the main point.
Most of the code is reused from the [Introduction to Neural Networks Lab](https://github.com/Astroinformatics/NeuralNetworks), but with just a few small changes to allow us to perform computations using a GPU. Therefore, most of that code has been moved to the end of the notebook, so this lab can quickly jump to traiing neural networks, without repeating the explanations in the introductory lab. Therefore, we suggest that you read through that lab's notebook before starting this one.
"""
# ╔═╡ 481e1cae-e832-47ed-b4ff-85a6fb856236
md"""
# Updating code to use a GPU
First, we'll verify that your system has a CUDA-enabled GPU and that you have access to it (e.g., the necessary libraries are installed and there's not another process using the GPU)
"""
# ╔═╡ 4323196f-618d-4d1d-aa44-892cbe098261
CUDA.functional()
# ╔═╡ b4a59b05-b5ea-4f46-9e93-c0e1f523106f
md"""
The GPU will need code that it can run. Writing performant GPU kernel code would be a whole separate set of lessons. In the last lab, we used linear algebra libraries to get the performance benefits of the GPU without writing any GPU code ourselves. Similarly, there a many machine learning and neural network libraries that have already done the hard work for us. Here, we'll be using [Flux.jl](https://fluxml.ai/), the most actively developed package for working with neural networks in Julia. We can instruct Julia to upload the neural network model we built with Flux to the gpu with Flux's `gpu` function. That allocates memory on the GPU to store the parameters for our neural network (mostly the weights and biases) and copies the values of those parameter currently stored on the CPU onto the GPU.
When we train our neural network on the GPU, it will only update the weights and biases stored in the GPU memory to avoid costly memory transfers. If we want to inspect those, then we'll need to transfer them back to the CPU host's memory system.
"""
# ╔═╡ a5ddca67-7c57-45b9-a7e8-0cc2b342e6da
md"""
Next, we'll transfer the training and validation dataset from the CPU host to the GPU's memory. We could use either the `cu` function provided by the CUDA package or Flux's convenience function (`gpu`) and syntax for transfering data.
"""
# ╔═╡ 67995b86-9706-424f-ba21-2698e7e83417
md"""
Those are the only changes necessary to train our neural network on a GPU!
"""
# ╔═╡ f5f1ff7c-c674-4147-9ec1-b1b0f1a8d18a
md"Number of hidden layers: $(@bind num_hidden_layers NumberField(1:3,default=1))"
# ╔═╡ ffd43619-b774-4bff-9a44-c7daf0ec7bcf
md"""
## Benchmark the training cost
Before we train the neural network, let's compare the time required to perform one iteration of updating the neural network weights on both the CPU and the GPU. (For large network sizes, you may want to disable CPU benchmarking to avoid delays.)
"""
# ╔═╡ d54fa17a-9bab-4034-97b9-902880901a3a
md"""
Enable CPU benchmarking. $(@bind benchmark_cpu CheckBox(default=true))
"""
# ╔═╡ 8eed2ad1-f58c-490e-b612-17cf44795337
md"""
**Question:** Try varrying the number of hidden nodes in the hidden layer. How does the time required change? For what size hidden layer is using the GPU at least 100x as fast as the using the CPU?
**Question:** What are the implications for what types of neural network architectures are well suited for getting a big speed-up by using a GPU?
"""
# ╔═╡ 2a69344b-dcb4-4a82-beaf-a787d63f632e
md"""
# Train the Neural Network
"""
# ╔═╡ 1009975c-9a8b-40f6-b9c5-b69820adc6b1
md"""
Check the box below, once you're selected a neural network architecture above that you'd like to try training.
Ready to train neural network. $(@bind ready_for_hidden CheckBox(default=false))
"""
# ╔═╡ 8480c2db-3e21-46d8-9725-554904cdb754
md"""
**Question:** How does the number of iterations required to acheive a given value of the loss function change when you increase the number of nodes in the hidden layer(s)? (If you want to prevent the neural network from being retrained while you tinker with you the number of layers, you can uncheck the box above while you make multiple changes, and recheck it once you're ready to start the training.)
**Question:** How does the number of iterations required to acheive a given value of the loss function change when you increase the number of hidden layers (keeping the nubmer of nodes per hidden layer constant)? (If you'd like to continue training the neural network for more iterations, you can click `Submit` above to have it resume training from where it left off.)
**Question:** What are the implications for choosing the number of hidden layers and nodes per layer, if you have a set ammount of GPU computing time avalible?
"""
# ╔═╡ 89809f9d-e024-4676-bd04-e3af88a39215
md"""# Thank you!
Thanks for participating in the Astroinformatics Summer School.
We hope you'll join us for the final Q&A session, where you can ask big picture questions about the future of Astroinformatics and machine learning for astrononomy & astrophysics.
"""
# ╔═╡ f829ad0c-9e7a-4e92-9f6b-86786ed317e6
md"""
# Code reused from previous lab
You don't need to read any of the code below to complete the lab. Of course, all the code is avaliable, if you'd like to explore and tinker.
"""
# ╔═╡ 783788a0-6f9a-4225-98e6-5030d9f21712
md"""
## Prepare the data
### Read data from file
"""
# ╔═╡ 86744470-2b37-45c1-ab76-af838c122378
function find_or_download_data(data_filename::String, url::String)
if contains(gethostname(),"ec2.internal")
data_path = joinpath(homedir(),"data")
isdir(data_path) || mkdir(data_path)
elseif contains(gethostname(),"aci.ics.psu.edu")
data_path = joinpath("/gpfs/scratch",ENV["USER"],"Astroinformatics")
isdir(data_path) || mkdir(data_path)
data_path = joinpath(data_path,"data")
isdir(data_path) || mkdir(data_path)
else
data_path = joinpath(homedir(),"Astroinformatics")
isdir(data_path) || mkdir(data_path)
data_path = joinpath(data_path,"data")
isdir(data_path) || mkdir(data_path)
end
data_path = joinpath(data_path,data_filename)
if !(filesize(data_path) > 0)
Downloads.download(url, data_path)
end
return data_path
end
# ╔═╡ 49f371db-1ad1-4f1c-b5e2-c00a52035c6a
begin
filename = "quasar2.csv"
url = "https://scholarsphere.psu.edu/resources/edc61b33-550d-471d-8e86-1ff5cc8d8f4d/downloads/19732"
data_path = find_or_download_data(filename, url);
end
# ╔═╡ 85066779-be0f-43a3-bde8-c4ab5a3e5ca3
begin
df = CSV.read(data_path, DataFrame, limit=1_000_000, select=[:ug, :gr, :ri, :iz, :zs1, :s1s2, :label], ntasks=1)
df[:,:label] .= 1 .- df[:,:label] # Make label=1 for high-z quasars
col_names = names(df)
df
end
# ╔═╡ 1c792c1d-f1e8-4e5f-8a76-5c7ca5fb8587
md"""
### Create subsets of data for training & testing
We will divide the dataset into two distinct subsets: `df_cv` (a DataFrame of observations to be used with a cross-validation procedure) and `df_test` (a DataFrame of observations to be used to testing our final model), one for model building and the second for testing our final model.
"""
# ╔═╡ 26635f45-1e34-4025-8151-2185d8d84e06
md"""
Undersample non-high-${z}$ quasars to make for balanced datasets?
$(@bind make_balanced CheckBox(default=true))
"""
# ╔═╡ 0985bcc3-e686-4aa9-b832-0141cb27c4a4
begin
frac_data_used_for_cv = 0.66
df_cv, df_test = stratifiedobs(x->x.label==1, shuffleobs(df), p=frac_data_used_for_cv);
if make_balanced
df_cv = undersample(x->Bool(x.label),df_cv)
df_test = undersample(x->Bool(x.label),df_test)
end
end;
# ╔═╡ ffe4cdc7-4863-4f0e-b790-4b86afcc56b8
md"""
### Constructing subset for K-fold cross-validation
We split `df_cv` into subsets of the observations for training and validating as part of a **k-fold cross-validation** process.
Below, you can choose how many "folds" to use and which fold will be used when training our neural network classifier and which fold will be used for training.
"""
# ╔═╡ c0da3a0b-6aa2-4397-97d3-5076ff1054f7
function stratified_kfolds(label::AbstractVector, data, num_folds::Integer)
@assert length(label) == size(data,1)
list_of_folds_idx = StratifiedKfold(label,num_folds)
data_train = map(idx->datasubset(data, idx),list_of_folds_idx)
data_test = map(idx->datasubset(data, setdiff(1:length(label),idx)),
list_of_folds_idx)
(;data_train, data_test, folds_idx = list_of_folds_idx)
end
# ╔═╡ 7e0341a3-e52e-4a19-b9ac-969ebdd2161f
md"""
For convenience sake, we'll define several dataframes containing data from your chosen fold.
"""
# ╔═╡ ffc98faf-d076-40df-ad6e-94d89f7446f8
md"""
## Code to setup neural network
"""
# ╔═╡ 387a11dd-e46d-4785-aec8-9674e6beaa62
md"## Simplified functions for training neural network"
# ╔═╡ 7463eefa-b0dd-47c4-8144-0b46262eedf0
function train_one_iteration!(model_nn::Union{Dense,Chain}, loss::Function, param::Flux.Zygote.Params{PT},
train_data::DT, optimizer::Flux.Optimise.AbstractOptimiser
) where { PT<:Any, MT1<:AbstractMatrix, MT2<:AbstractMatrix, DT<:Tuple{MT1,MT2} }
x, y = train_data
gs = gradient(param) do
loss(x,y)
end
Flux.Optimise.update!(optimizer, param, gs)
end
# ╔═╡ afccede8-4563-4a7e-bca4-4754349e73b3
md"# Setup & Helper functions"
# ╔═╡ 0102260a-2c3b-4545-a079-037b9c6b0b8d
md"### Evaluating model"
# ╔═╡ c41187d6-4306-4a92-b10e-c7825e79e79e
begin
classify(model::Union{Chain,Dense}, data::AbstractMatrix; threshold::Real=0.5) = model(data).>=threshold
classify(model::Union{Chain,Dense}, data::AbstractDataFrame; threshold::Real=0.5) = classify(model, Matrix(data)', threshold=threshold)'
end
# ╔═╡ 7931116b-3b3f-455c-80aa-17de872a8965
function calc_classification_diagnostics(model, data, label; threshold = 0.5)
pred = classify(model, data; threshold=threshold)
num_true_positives = sum( label.==1 .&& pred)
num_true_negatives = sum( label.==0 .&& .!pred)
num_false_negatives = sum( label.==1 .&& .!pred)
num_false_positives = sum( label.==0 .&& pred)
num_condition_positives = num_true_positives + num_false_negatives
num_condition_negatives = num_true_negatives + num_false_positives
num_total = num_condition_positives + num_condition_negatives
num_predicted_positives = num_true_positives + num_false_positives
num_predicted_negatives = num_true_negatives + num_false_negatives
true_positive_rate = num_true_positives/num_condition_positives
true_negative_rate = num_true_negatives/num_condition_negatives
false_positive_rate = num_false_positives/num_condition_negatives
false_negative_rate = num_false_negatives/num_condition_positives
accuracy = (num_true_positives+num_true_negatives)/num_total
false_omission_rate = num_false_negatives / num_predicted_negatives
false_discovery_rate = num_false_positives / num_predicted_positives
F1_score = 2*num_true_positives/(2*num_true_positives+num_false_positives+num_false_negatives)
prevalence = (num_true_positives+num_false_negatives)/num_total
return (;threshold, accuracy, false_discovery_rate, false_omission_rate, F1_score,
false_positive_rate, false_negative_rate, true_positive_rate, true_negative_rate,
num_true_positives, num_true_negatives, num_false_positives, num_false_negatives,
num_condition_positives, num_condition_negatives, num_predicted_positives, num_predicted_negatives,
num_total, prevalence )
end
# ╔═╡ 7179f3e7-7b8a-468b-847d-5962ce0c1a93
function my_train!(model_nn::Union{Dense,Chain}, loss::Function, param::Flux.Zygote.Params{PT},
train_data::DT, optimizer::Flux.Optimise.AbstractOptimiser;
#= begin optional parameters =#
epochs::Integer=1, test_data = nothing) where { PT<:Any, MT1<:AbstractMatrix, MT2<:AbstractMatrix, DT<:Tuple{MT1,MT2} }
@assert 1<=epochs<Inf
if !isnothing(test_data)
x_test, y_test = test_data
end
history = MVHistory() # For storing intermediate results
@progress for i in 1:epochs
x, y = train_data
results_train = calc_classification_diagnostics(model_nn, x, y)
if !isnothing(test_data) # if test/validation data is provied, evaluate model for it, too.
results_test = calc_classification_diagnostics(model_nn, x_test, y_test)
push!(history, :results_test, i, results_test)
loss_test = loss(x_test, y_test)
push!(history, :loss_test, i, loss_test )
end
gs = gradient(param) do
loss(x,y)
end
push!(history, :loss, i, loss(x,y) )
push!(history, :results_train, i, results_train)
push!(history, :param, i, param)
Flux.Optimise.update!(optimizer, param, gs)
end
return history
end
# ╔═╡ d1a0ab47-5c6a-4960-a126-4e65e86a8149
md"### Plotting"
# ╔═╡ 5cbd25fc-557b-4578-b765-72fcd384d6e0
function plot_classifier_training_history(h::MVHistory, idx_plt)
plt1 = plot(xlabel="Iteration", ylabel="False Discovery Rate", legend=:none)
plt2 = plot(xlabel="Iteration", ylabel="False Omission Rate", legend=:none)
plt3 = plot(xlabel="Iteration", ylabel="Loss", legend=:topright)
scatter!(plt1,get(h,:results_train)[1][idx_plt],
get.(get(h,:results_train)[2][idx_plt],
:false_discovery_rate,nothing), ms=2, markerstrokewidth=0, alpha=0.5, label="Training")
scatter!(plt2,get(h,:results_train)[1][idx_plt],
get.(get(h,:results_train)[2][idx_plt],
:false_omission_rate,nothing), ms=2, markerstrokewidth=0, alpha=0.5, label="Training")
plot!(plt3,get(h,:loss)[1][idx_plt],
get(h,:loss)[2][idx_plt],
label="Training")
if haskey(h,:results_test)
scatter!(plt1,get(h,:results_test)[1][idx_plt],
get.(get(h,:results_test)[2][idx_plt],
:false_discovery_rate,nothing),
ms=2, markerstrokewidth=0, alpha=0.5, label="Validation")
scatter!(plt2,get(h,:results_test)[1][idx_plt],
get.(get(h,:results_test)[2][idx_plt],
:false_omission_rate,nothing),
ms=2, markerstrokewidth=0, alpha=0.5, label="Validation")
plot!(plt3,get(h,:loss_test)[1][idx_plt],
get(h,:loss_test)[2][idx_plt],
alpha=0.7, label="Validation")
end
l = @layout [ a; b c]
plot(plt3, plt1,plt2, layout=l)
end
# ╔═╡ 7df2b007-5b07-43a1-8407-e1310df57c54
md"### Appearances"
# ╔═╡ 56529037-956d-4980-875e-85b0eb5644e0
TableOfContents()
# ╔═╡ 3432a998-b8a9-4d81-a1a2-3ab4c2773a3f
function aside(x; v_offset=0)
@htl("""
<style>
@media (min-width: calc(700px + 30px + 300px)) {
aside.plutoui-aside-wrapper {
position: absolute;
right: -11px;
width: 0px;
}
aside.plutoui-aside-wrapper > div {
width: 300px;
}
}
</style>
<aside class="plutoui-aside-wrapper" style="top: $(v_offset)px">
<div>
$(x)
</div>
</aside>
""")
end
# ╔═╡ 06c2301e-7fd1-4790-a16d-2a3cc3cc7b8e
aside(md"""
!!! tip
The `gpu` function is nice because it reverts to providing a view of the same data on the CPU if a GPU isn't avaliable. That way your code can still run (potentially much more slowly) if you're using a without a CUDA-enabled GPU.
""", v_offset=-160)
# ╔═╡ df0db914-191e-46e6-927d-1a713b50e68b
aside(md"""
!!! tip "Tip:"
The time required to train neural networks can increase rapidly with their size, so start with a small number of nodes in each layer and a small number of iterations (particularly if you're not using a GPU). Then you can increase the number of nodes and iterations.
""", v_offset=-50)
# ╔═╡ 83d2d3c5-f8ec-4bd6-9660-4dda9db75131
aside(md"""
!!! tip "Patience"
When you click `submit` or `click` above, the notebook will start training the neural network with specified architecture, learning rate and optimization algorithm. Depending on the network architecture and number of iterations it may take significant time to train the neural network and to update the plot below. If the progress bar below is less than 100%, then please be patient while the neural network is training.
""", v_offset=-170)
# ╔═╡ 2bb65491-d291-4d71-ac5d-247538a1871b
nbsp = html" "
# ╔═╡ 9eea2ceb-94ee-4aa8-8f5e-ffdd4debe174
@bind num_nodes confirm(PlutoUI.combine() do Child
md"""
Number of nodes in each layer:
Hidden Layer 1: $(Child("hidden1",NumberField(1:100,default=6)))
$nbsp $nbsp
Hidden Layer 2: $(Child("hidden2",NumberField(0:(num_hidden_layers>=2 ? 100 : 0),default=num_hidden_layers>=2 ? 6 : 0)))
$nbsp $nbsp
Hidden Layer 3: $(Child("hidden3",NumberField(0:(num_hidden_layers>=3 ? 100 : 0),default=num_hidden_layers>=3 ? 6 : 0)))
"""
end)
# ╔═╡ 2af3988d-6a46-4ae6-ab77-9de5270bf657
md"Reinitialize neural network weights with new set of random values: $nbsp $(@bind reinit_my_nn Button())"
# ╔═╡ c33ba176-e1bd-46a8-afca-a3d82eb4bc1a
begin
reinit_my_nn # trigger rerunning this cell when button is clicked
if num_hidden_layers == 1
model_my_nn = Chain( Dense(6,num_nodes.hidden1, Flux.sigmoid),
Dense(num_nodes.hidden1, 1, Flux.sigmoid) )
elseif num_hidden_layers == 2
model_my_nn = Chain( Dense(6,num_nodes.hidden1, Flux.sigmoid),
Dense(num_nodes.hidden1, num_nodes.hidden2, Flux.sigmoid),
Dense(num_nodes.hidden2, 1, Flux.sigmoid) )
elseif num_hidden_layers == 3
model_my_nn = Chain( Dense(6,num_nodes.hidden1, Flux.sigmoid),
Dense(num_nodes.hidden1, num_nodes.hidden2, Flux.sigmoid),
Dense(num_nodes.hidden2, num_nodes.hidden3, Flux.sigmoid),
Dense(num_nodes.hidden3, 1, Flux.sigmoid) )
else
md"""!!! warn "Invalid number of layers"""
end
my_nn_param = Flux.params(model_my_nn)
end
# ╔═╡ d0f111b8-8b18-4fd4-a119-97d1080834bf
if CUDA.functional()
model_gpu = gpu(model_my_nn)
param_gpu = Flux.params(model_gpu)
end;
# ╔═╡ 24740804-7333-4e93-aff9-badede5c440c
begin
num_param_in_my_nn = sum(length.(my_nn_param))
md"Your neural network architecture has $num_hidden_layers hidden layers and a total of $num_param_in_my_nn parameters."
end
# ╔═╡ 76b35985-414b-44cc-82e3-55c4bf05e371
begin
my_loss(x,y) = Flux.binarycrossentropy(model_my_nn(x), y)
loss_gpu(x,y) = Flux.binarycrossentropy(model_gpu(x), y)
end
# ╔═╡ c799d55a-2fb9-4b0a-8ebf-12f9cd4b95db
begin
@bind my_opt_param confirm(
PlutoUI.combine() do Child
md"""
Learning Rate: $( Child("learning_rate",NumberField(0.05:0.05:1, default=0.9)) )
$nbsp $nbsp $nbsp
Optimizer: $( Child("type",Select([Descent => "Gradient Descent", Nesterov => "Nesterov Momentum", ADAM => "ADAM" ], default=Nesterov)) )
$nbsp $nbsp $nbsp
Iterations: $( Child("iterations",NumberField(100:100:10_000, default=500)))
"""
end
)
end
# ╔═╡ 86499d0e-bad3-4954-a740-68cba383d790
if ready_for_hidden
md"""
First iteration to plot: $(@bind first_iter_to_plot_hidden Slider(1:my_opt_param.iterations))
Last iteration to plot: $(@bind last_iter_to_plot_hidden Slider(1:my_opt_param.iterations,default=my_opt_param.iterations))
"""
end
# ╔═╡ a4e39577-39ff-4295-a345-c580a062ad01
my_optimizer = my_opt_param.type(my_opt_param.learning_rate) # Set based on GUI inputs
# ╔═╡ 608d9156-f8a8-4886-ae87-b1adab904de5
@bind param_fold confirm(PlutoUI.combine() do Child
md"""
Number of folds: $(Child("num_folds", NumberField(1:10,default=5))) $nbsp $nbsp
Fold to use for training: $(Child("fold_id", NumberField(1:10)))
""" end )
# ╔═╡ ca25abcc-4f7e-4ace-861b-c8f0416584ed
if !(1<=param_fold.fold_id<=param_fold.num_folds)
md"""
!!! warn "fold_id must be between 1 and the number of folds"
"""
else
df_train_list, df_validation_list, list_of_folds_idx = stratified_kfolds(df_cv.label,df_cv,param_fold.num_folds)
num_in_training_set = size(first(df_train_list),1)
num_in_validation_set = size(first(df_validation_list),1)
nothing
end
# ╔═╡ 6f2c856c-3bd3-4d35-be93-1b78c68c6b29
begin
train_Xy, validation_Xy = df_train_list[param_fold.fold_id], df_validation_list[param_fold.fold_id]
# Make some convenient variable names for use later
train_X = select(train_Xy, Not(:label), copycols=false)
train_y = select(train_Xy, (:label), copycols=false)
validation_X = select(validation_Xy, Not(:label), copycols=false)
validation_y = select(validation_Xy, (:label), copycols=false)
end;
# ╔═╡ d7726010-ca9b-4e4d-875f-a37f00826199
begin
train_data = (Matrix(train_X)', Matrix(train_y)')
validation_data = (Matrix(validation_X)', Matrix(validation_y)')
end;
# ╔═╡ 41aa218e-06e3-4452-bd8a-a13100912131
if CUDA.functional()
train_data_gpu = train_data |> gpu
validation_data_gpu = validation_data |> gpu
end;
# ╔═╡ 27e29acb-2caa-42f3-a9ae-80e4836c3c53
if CUDA.functional() # Benchmark on GPU
#num_hidden_layers, num_nodes
# Run once to ensure all functions are compiled for given types
CUDA.@sync train_one_iteration!(model_gpu, loss_gpu, param_gpu, train_data_gpu, my_optimizer)
# Rerun to benchmark
time_per_iteration_gpu = @elapsed( CUDA.@sync train_one_iteration!(model_gpu, loss_gpu, param_gpu, train_data_gpu, my_optimizer) ) #samples = 1 evals=1
end;
# ╔═╡ 43ec3626-baf0-4b70-83cb-89f32ed8bf36
if benchmark_cpu # Benchmark on CPU
# Run once to ensure all functions are compiled for given types
train_one_iteration!(model_my_nn, my_loss, my_nn_param, train_data, my_optimizer)
# Rerun to benchmark
time_per_iteration_cpu = @elapsed train_one_iteration!(model_my_nn, my_loss, my_nn_param, train_data, my_optimizer)
end;
# ╔═╡ b630b78c-8e62-40ac-b6ca-ef8056b2a038
if benchmark_cpu && CUDA.functional()
md"""For your neural network architecture, each iteration takes $(round(time_per_iteration_gpu,digits=3)) seconds on the GPU and $(round(time_per_iteration_cpu,digits=3)) seconds on the CPU.
That's a ratio of **$(round(time_per_iteration_cpu/time_per_iteration_gpu,digits=3))**!
"""
elseif benchmark_cpu && !CUDA.functional()
md"For your neural network architecture, each iteration takes $(round(time_per_iteration_cpu,digits=3)) seconds on the CPU."
else
md"For your neural network architecture, each iteration takes $(round(time_per_iteration_gpu,digits=3)) seconds on the GPU."
end
# ╔═╡ 7369a73e-a04f-49c9-83f8-82633f8c3efb
# Train on GPU if avaliable, otherwise on CPU
if ready_for_hidden
if CUDA.functional()
history_my_nn = my_train!(model_gpu, loss_gpu, param_gpu, train_data_gpu, my_optimizer, test_data = validation_data_gpu, epochs=my_opt_param.iterations)
else
history_my_nn = my_train!(model_my_nn, my_loss, my_nn_param, train_data, my_optimizer, test_data = validation_data, epochs=my_opt_param.iterations)
end
end;
# ╔═╡ 9b55d85d-e5b0-46d6-bce8-6f1cbdd991ee
if ready_for_hidden
plot_classifier_training_history(history_my_nn,first_iter_to_plot_hidden:last_iter_to_plot_hidden)
end
# ╔═╡ 9c5a7bb8-2017-45e5-b56e-3745fc775e7c
br = html"<br />"
# ╔═╡ 83cb30c2-db96-4d68-88f9-09e25b6bfa70
md"""
# Benchmarking & Training $br Neural Networks on the GPU
## Specify the neural network architecure
Below you can sset a number of hidden layers (up to 3) and number of hidden nodes in each layer.
"""
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
HypertextLiteral = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
MLBase = "f0e99cf1-93fa-52ec-9ecc-5026115318e0"
MLDataUtils = "cc2ba9b6-d476-5e6d-8eaf-a92d5412d41d"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
ValueHistories = "98cad3c8-aec3-5f06-8e41-884608649ab7"
[compat]
BenchmarkTools = "~1.3.1"
CSV = "~0.10.4"
CUDA = "~3.10.0"
DataFrames = "~1.3.4"
Flux = "~0.13.0"
HypertextLiteral = "~0.9.4"
MLBase = "~0.9.0"
MLDataUtils = "~0.5.4"
Plots = "~1.29.0"
PlutoUI = "~0.7.38"
ProgressLogging = "~0.1.4"
ValueHistories = "~0.5.4"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.7.2"
manifest_format = "2.0"
[[deps.AbstractFFTs]]
deps = ["ChainRulesCore", "LinearAlgebra"]
git-tree-sha1 = "6f1d9bc1c08f9f4a8fa92e3ea3cb50153a1b40d4"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.1.0"
[[deps.AbstractPlutoDingetjes]]
deps = ["Pkg"]
git-tree-sha1 = "8eaf9f1b4921132a4cff3f36a1d9ba923b14a481"
uuid = "6e696c72-6542-2067-7265-42206c756150"
version = "1.1.4"
[[deps.Accessors]]
deps = ["Compat", "CompositionsBase", "ConstructionBase", "Future", "LinearAlgebra", "MacroTools", "Requires", "Test"]
git-tree-sha1 = "0264a938934447408c7f0be8985afec2a2237af4"
uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
version = "0.1.11"
[[deps.Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "af92965fb30777147966f58acb05da51c5616b5f"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.3"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[deps.ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "81f0cb60dc994ca17f68d9fb7c942a5ae70d9ee4"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "5.0.8"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.2.0"
[[deps.BangBang]]
deps = ["Compat", "ConstructionBase", "Future", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables", "ZygoteRules"]
git-tree-sha1 = "b15a6bc52594f5e4a3b825858d1089618871bf9d"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.3.36"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.BenchmarkTools]]
deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"]
git-tree-sha1 = "4c10eee4af024676200bc7752e536f858c6b8f93"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "1.3.1"
[[deps.Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.8+0"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
[[deps.CSV]]
deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings"]
git-tree-sha1 = "873fb188a4b9d76549b81465b1f75c82aaf59238"
uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
version = "0.10.4"
[[deps.CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
git-tree-sha1 = "19fb33957a5f85efb3cc10e70cf4dd4e30174ac9"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "3.10.0"
[[deps.Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "4b859a208b2397a7a623a03449e4636bdb17bcf2"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.16.1+1"
[[deps.ChainRules]]
deps = ["ChainRulesCore", "Compat", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics"]
git-tree-sha1 = "de68815ccf15c7d3e3e3338f0bd3a8a0528f9b9f"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.33.0"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "9950387274246d08af38f6eef8cb5480862a435f"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.14.0"
[[deps.ChangesOfVariables]]
deps = ["ChainRulesCore", "LinearAlgebra", "Test"]
git-tree-sha1 = "1e315e3f4b0b7ce40feded39c73049692126cf53"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.3"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[deps.ColorSchemes]]
deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "Random"]
git-tree-sha1 = "7297381ccb5df764549818d9a7d57e45f1057d30"
uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
version = "3.18.0"
[[deps.ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "a985dc37e357a3b22b260a5def99f3530fb415d3"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.2"
[[deps.ColorVectorSpace]]
deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "SpecialFunctions", "Statistics", "TensorCore"]
git-tree-sha1 = "3f1f500312161f1ae067abe07d13b40f78f32e07"
uuid = "c3611d14-8923-5661-9e6a-0046d554d3a4"
version = "0.9.8"
[[deps.Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[deps.CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[deps.Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "b153278a25dd42c65abbf4e62344f9d22e59191b"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.43.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[deps.CompositionsBase]]
git-tree-sha1 = "455419f7e328a1a2493cabc6428d79e951349769"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.1"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f74e9d5388b8620b4cee35d4c5a618dd4dc547f4"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.3.0"
[[deps.ContextVariablesX]]
deps = ["Compat", "Logging", "UUIDs"]
git-tree-sha1 = "8ccaa8c655bc1b83d2da4d569c9b28254ababd6e"
uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5"
version = "0.1.2"
[[deps.Contour]]
deps = ["StaticArrays"]
git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7"
uuid = "d38c429a-6771-53c6-b99e-75d170b6e991"
version = "0.5.7"
[[deps.Crayons]]
git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.1.1"
[[deps.DataAPI]]
git-tree-sha1 = "fb5f5316dd3fd4c5e7c30a24d50643b73e37cd40"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.10.0"
[[deps.DataFrames]]
deps = ["Compat", "DataAPI", "Future", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrettyTables", "Printf", "REPL", "Reexport", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"]
git-tree-sha1 = "daa21eb85147f72e41f6352a57fccea377e310a9"
uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
version = "1.3.4"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "cc1a8e22627f33c789ab60b36a9132ac050bbf75"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.12"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[deps.DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[deps.DiffRules]]
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "28d605d9a0ac17118fe2c5e9ce0fbb76c3ceb120"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.11.0"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[deps.Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[deps.EarCut_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "3f3a2501fa7236e9b911e0f7a588c657e822bb6d"
uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5"
version = "2.2.3+0"
[[deps.Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "bad72f730e9e91c08d9427d5e8db95478a3c323d"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.4.8+0"
[[deps.ExprTools]]
git-tree-sha1 = "56559bbef6ca5ea0c0818fa5c90320398a6fbf8d"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.8"
[[deps.FFMPEG]]
deps = ["FFMPEG_jll"]
git-tree-sha1 = "b57e3acbe22f8484b4b5ff66a7499717fe1a9cc8"
uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a"
version = "0.4.1"
[[deps.FFMPEG_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"]
git-tree-sha1 = "d8a578692e3077ac998b50c0217dfd67f21d1e5f"
uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5"
version = "4.4.0+0"
[[deps.FLoops]]
deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"]
git-tree-sha1 = "4391d3ed58db9dc5a9883b23a0578316b4798b1f"
uuid = "cc61a311-1640-44b5-9fba-1b764f453329"
version = "0.2.0"
[[deps.FLoopsBase]]
deps = ["ContextVariablesX"]
git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7"
uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6"
version = "0.1.1"
[[deps.FilePathsBase]]
deps = ["Compat", "Dates", "Mmap", "Printf", "Test", "UUIDs"]
git-tree-sha1 = "129b104185df66e408edd6625d480b7f9e9823a0"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.18"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "246621d23d1f43e3b9c368bf3b72b2331a27c286"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.13.2"
[[deps.FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[deps.Flux]]
deps = ["Adapt", "ArrayInterface", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "Optimisers", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "Test", "Zygote"]
git-tree-sha1 = "f84e50845ab88702c721dc7c6129a85cbc1de332"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.13.1"
[[deps.FoldsThreads]]
deps = ["Accessors", "FunctionWrappers", "InitialValues", "SplittablesBase", "Transducers"]
git-tree-sha1 = "eb8e1989b9028f7e0985b4268dabe94682249025"
uuid = "9c68100b-dfe1-47cf-94c8-95104e173443"
version = "0.1.1"
[[deps.Fontconfig_jll]]
deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03"
uuid = "a3f928ae-7b40-5064-980b-68af3947d34b"
version = "2.13.93+0"
[[deps.Formatting]]
deps = ["Printf"]
git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8"
uuid = "59287772-0a20-5a39-b81b-1366585eb4c0"
version = "0.4.2"
[[deps.ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "2f18915445b248731ec5db4e4a17e451020bf21e"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.30"
[[deps.FreeType2_jll]]
deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "87eb71354d8ec1a96d4a7636bd57a7347dde3ef9"
uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7"
version = "2.10.4+0"
[[deps.FriBidi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91"
uuid = "559328eb-81f9-559d-9380-de523a88c83c"
version = "1.0.10+0"
[[deps.FunctionWrappers]]
git-tree-sha1 = "241552bc2209f0fa068b6415b1942cc0aa486bcc"
uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e"
version = "1.1.2"
[[deps.Functors]]
git-tree-sha1 = "223fffa49ca0ff9ce4f875be001ffe173b2b7de4"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.2.8"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GLFW_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"]
git-tree-sha1 = "51d2dfe8e590fbd74e7a842cf6d13d8a2f45dc01"
uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89"
version = "3.3.6+0"
[[deps.GPUArrays]]
deps = ["Adapt", "LLVM", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
git-tree-sha1 = "c783e8883028bf26fb05ed4022c450ef44edd875"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.3.2"
[[deps.GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "d8c5999631e1dc18d767883f621639c838f8e632"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.15.2"
[[deps.GR]]
deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "RelocatableFolders", "Serialization", "Sockets", "Test", "UUIDs"]
git-tree-sha1 = "b316fd18f5bc025fedcb708332aecb3e13b9b453"