-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsolveInverseProblem.jl
More file actions
1973 lines (1829 loc) · 139 KB
/
solveInverseProblem.jl
File metadata and controls
1973 lines (1829 loc) · 139 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
######################################### solveInverseProblem.jl #########################################
#### Description:
# This script solves an inverse problem on the form S=W*F where 'S' is a vector of length m containing all the
# measurements/data points, 'F' is a vector of length n containing the fast-ion distribution in vectorized form
# and 'W' is an m x n matrix containing all the weight functions (as rows) relating 'S' to 'F'. The script can
# solve standard fast-ion tomography inverse problems in any dimension D, where 1 <= D <= 6, including orbit
# tomography problems. Several forms of prior information, constraints and regularization can be included when
# solving the inverse problem. These are described in the template file OWCF/templates/start_solveInverseProblem_template.jl.
# Any number of diagnostics can be included in the inverse problem, as long as they are included together with
# an equal number of weight matrices.
#
# Please see the OWCF/templates/start_solveInverseProblem_template.jl file for further input information.
#### Inputs (Units given when defined in script)
# Given via input file start_solveInverseProblem_template.jl, for example. 'template' should be replaced by whatever.
#### Outputs
# -
#### Saved files
# The results output file will have the filename format:
# solInvProb_[date and time]_[tokamak]_S_[numODiag]_[numOMeasure]_W_[numOW]_[recSpaceGridSize]_[hyperParamGridSize].jld2
# (or instead with a .hdf5 file extension if the 'saveInHDF5format' input variable was set to true in the start file)
# where
# - 'date and time' is the date and time at which the inverse problem was solved
# - 'tokamak' is the tokamak in which the inverse problem is solved. Specified in input file
# - 'numODiag' is the number of diagnostics used to solve the inverse problem
# - 'numOMeasure' is the total number of measurements used to solve the inverse problem
# - 'numOW' is the number of weight matrices used to solve the inverse problem (equal to numODiag)
# - 'recSpaceGridSize' is the size of the reconstruction space grid on which the fast-ion distribution is reconstructed
# - 'hyperParamGridSize' is the size of the hyper-parameter value(s) grid used to regularize the inverse problem
# This results output file will have the keys:
# sols - The solutions to the inverse problem in inflated format, i.e. recSpaceGridSize x hyperParamGridSize - Array{Float64}
# sols_abscissa_i - where i is an integer >=1. The reconstruction space abscissas of the sols output data. E.g. sols_abscissa_i
# contains the grid points corresponding to the i:th dimension of sols - Vector{Float64}
# sols_abscissa_units_i - where i is an integer >=1. The units of measurement of sols_abscissa_i - String
# sols_hyper_abscissa_j - where j is an integer >=1. The hyper-parameter values of the j:th regularization dimension of sols.
# For example, if the reconstruction space is 2D and two hyper-parameters where needed to solved the inverse
# problem (e.g. the regularization strength for 0th order Tikhonov and 1st order Tikhonov, respectively)
# sols[:,:,j1,j2] corresponds to the solution with hyper-parameter values sols_hyper_abscissa_1[j1] and
# sols_hyper_abscissa_2[j2]. - Vector{Float64}
# FI_species - The particle species of the reconstructed fast-ion distribution - String
# S_k - where k is an integer >=1. The measurements of the k:th diagnostic used to solve the inverse problem - Vector{Float64}
# S_units_k - where k is an integer >=1. The units of S_k - String
# S_err_k - where k is an integer >=1. The uncertainties of the S_k measurements. S_err_k[l] corresponds to the uncertainty
# of measurement S_k[l] - Vector{Float64}
# S_err_units_k - where k is an integer >=1. The units of S_err_k - String
# S_abscissa_k - where k is an integer >=1. The measurement bin centers of S_k. S_abscissa_k[l] corresponds to the measurement
# bin center of S_k[l] - Vector{Float64}
# S_abscissa_units_k - where k is an integer >=1. The units of S_abscissa_k - String
# l_curve_x - The x-axis values of the L-curve of the solutions to the inverse problem, corresponding to ||S-WF|| - Vector{Float64}
# l_curve_y - The y-axis-values of the L-curve of the solutions to the inverse problem, corresponding to ||F|| - Vector{Float64}
# l_curve_opt_index - The index of the L-curve point with the largest curvature. (l_curve_x[l_curve_opt_index], l_curve_y[l_curve_opt_index])
# will then correpond to the L-curve point with the largest curvature - Int64
# l_curve_opt_hyper_point - The hyper-parameter point corresponding to the L-curve point with the largest curvature. I.e. the values of
# λ_h where h is an integer >=1 and λ_h is the regularization strength for regularization type h - Vector{Float64}
# l_curve_opt_sol - The solution to the inverse problem obtained using the l_curve_opt_hyper_point regularization strength(s). l_curve_opt_sol
# will be an array of size recSpaceGridSize. l_curve_opt_sol is equivalent to sols[:,...,:,j1,j2,...,jH] where :,...,: denotes
# all reconstruction space dimensions and jh = findfirst(x-> x==l_curve_opt_hyper_point[h],sols_hyper_abscissa_h) and h>=1 - Array{Float64}
# filepath_start - The filepath to the start file used to execute the solveInverseProblem.jl script - String
# If rescale_W was set to true
# rescale_W_factors - The factors used to rescale the weight functions to have WF_synth match the experimental data - Vector{Float64}
# If :COLLISIONS and/or :ICRF was included in the 'regularization' input variable
# regularization_equil_filepath - The file path to the magnetic equilibrium data used to compute :COLLISIONS and/or :ICRF prior - String
# If :COLLISIONS was included in the 'regularization' input variable
# coll_phys_basis - The collision physics basis functions used to solve the inverse problem. The basis functions are saved as a multi-dimensional
# array with size recSpaceGridSize x reduce(*, recSpaceGridSize) - Array{Float64}
# coll_phys_thermal_species - The list of thermal ion species used to compute the collision physics basis functions - Vector{String}
# coll_phys_Ti - The list of thermal ion temperatures used to compute the collision physics basis functions - Vector{Float64}
# coll_phys_ni - The list of thermal ion densities used to compute the collision physics basis functions - Vector{Float64}
# coll_phys_Te - The electron temperature used to compute the collision physics basis functions - Float64
# coll_phys_ne - The electron density used to compute the collision physics basis functions - Float64
# coll_phys_rho - The normalized poloidal flux coordinate(s) used to compute the collision physics basis functions - Vector{Float64}
# If :ICRF was included in the 'regularization' input variable
# L_ICRF - The matrix used to regularize the inverse problem using the physics of electromagnetic wave heating in the ion
# cyclotron range of frequencies (ICRF) - Matrix{Float64}
# If the 'plot_solutions' input variable was set to true in the start file, .png files will be saved showing plots of the solutions.
# The filename format of those .png files will be:
# solInvProb_[date and time]_resultsPlot_[p]_of_[P].png
# where
# - 'date and time' is the date and time at which the inverse problem was solved
# - 'p' is an integer 1<=p<=P
# - 'P' is an integer equal to the reduce(*,hyperParamGridSize) (the total number of hyper-parameter points for all regularization combinations)
# If the 'gif_solutions' input variable was set to true in the start file, a .gif file will be saved showing all .png files together as an animation.
# The filename format of that .gif file will be:
# solInvProb_[date and time].gif
# where
# - 'date and time' is the date and time at which the inverse problem was solved
# ALL OUTPUT FILES WILL BES SAVED IN THE 'folderpath_out' INPUT VARIABLE FOLDER, SPECIFIED IN THE START FILE.
### Other
#
# Script written by Henrik Järleblad. Last maintained 2025-09-01.
###########################################################################################################
# A dictionary to keep a record of the names of all sections and their respective execution times
dictionary_of_sections = Dict()
# println section number tracker
prnt = 0
# Timestamp of script execution start
timestamps = [time()]
# SECTION 0
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
verbose && println("Loading Julia packages... ")
using Base.Iterators
using Dates
using FileIO
using HDF5
using Interpolations
using JLD2
using LinearAlgebra
using Plots # If macros (@animate) did not expand at parse time, this would only need to be loaded if plot_solutions || gif_solutions
using SparseArrays
using Statistics
using SCS, Convex
include("extra/constants.jl")
include("misc/convert_units.jl")
if "collisions" in lowercase.(String.(regularization))
verbose && println("Collision physics included in list of regularization!")
verbose && print("---> ")
include("extra/dependencies.jl")
include("misc/temp_n_dens.jl")
include("misc/species_func.jl")
end
if ("icrf" in lowercase.(String.(regularization)))
verbose && println("ICRF physics included in list of regularization!")
verbose && print("---> ")
include("extra/dependencies.jl")
end
if ("firsttikhonov" in lowercase.(String.(regularization)))
verbose && println("1st order Tikhonov in list of regularization!")
verbose && print("---> ")
include("extra/dependencies.jl")
end
if rescale_W
verbose && println("rescale_W set to true!")
verbose && print("---> ")
include("extra/dependencies.jl")
end
append!(timestamps,time()) # The timestamp when all necessary packages have been loaded
dictionary_of_sections[prnt-1] = ("Loading Julia packages",diff(timestamps)[end])
###########################################################################################################
# SECTION: PERFORM SAFETY CHECKS
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
verbose && println("Performing safety checks... ")
if !(length(filepaths_S)==length(filepaths_W))
error("Number of measurement files: $(length(filepaths_S)). Number of weight matrix files: $(length(filepaths_W)). Number of measurement files and number of weight matrix files must match. Please correct and re-try.")
end
append!(timestamps,time()) # The timestamp when the foundational safety check has been performed
dictionary_of_sections[prnt-1] = ("Performing safety checks",diff(timestamps)[end])
###########################################################################################################
# SECTION: LOAD MEASUREMENTS
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
verbose && print("Loading measurements... ")
global S; S = Vector{Vector{Float64}}(undef,length(filepaths_S)) # Pre-allocate measurements for all diagnostics
global S_units; S_units = Vector{String}(undef,length(filepaths_S)) # Pre-allocate units of measurements for all diagnostics
global S_errs; S_errs = Vector{Vector{Float64}}(undef,length(filepaths_S)) # Pre-allocate errors (uncertainties) for all diagnostics
global S_errs_units; S_errs_units = Vector{String}(undef,length(filepaths_S)) # Pre-allocate units of errors (uncertainties) for all diagnostics
global S_abscissas; S_abscissas = Vector{Vector{Float64}}(undef,length(filepaths_S)) # Pre-allocate measurement bin centers for all diagnostics
global S_abscissas_units; S_abscissas_units = Vector{String}(undef,length(filepaths_S)) # Pre-allocate units of measurement bin centers for all diagnostics
for (i,f) in enumerate(filepaths_S)
local myfile # Declare local scope. Just for clarity
f_suffix = lowercase(split(f,".")[end])
if f_suffix=="jld2"
myfile = jldopen(f,false,false,false,IOStream)
read_func = x -> x # Identity function. For .jld2 files
elseif f_suffix=="h5" || f_suffix=="hdf5"
myfile = h5open(f,"r")
read_func = read # HDF5 read function. For .hdf5 (.h5) files
else
error("Measurement files should be in either .jld2 or .hdf5 (.h5) file format. Measurement file "*f*"is not. Please correct and re-try.")
end
S[i] = read_func(myfile["S"])
S_units[i] = read_func(myfile["S_units"])
S_errs[i] = read_func(myfile["err"])
S_errs_units[i] = read_func(myfile["err_units"])
S_abscissas[i] = read_func(myfile["Ed_array"])
S_abscissas_units[i] = read_func(myfile["Ed_array_units"])
close(myfile)
end
verbose && println("Done!")
global ok # Declare global scope
ok = true # Simplifying print bool dummy variable
verbose && print("Ensuring measurements units consistency between S and err... ")
for (i,S_unit) in enumerate(S_units)
if !(S_unit==S_errs_units[i])
global ok; ok = false
verbose && println("")
verbose && print("Units of signal $(i): $(S_unit). Units of err $(i): $(S_errs_units[i]). Attempting to convert err to match S units... ")
S_errs[i] = units_conversion_factor(S_errs_units[i],S_unit) .*S_errs[i]
S_errs_units[i] = S_unit
verbose && println("Success!")
end
end
ok && verbose && println("Done!")
append!(timestamps,time()) # The timestamp when the measurements have been loaded completely
dictionary_of_sections[prnt-1] = ("Loading measurements",diff(timestamps)[end])
###########################################################################################################
# SECTION: LOAD WEIGHT FUNCTIONS
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
verbose && print("Loading weight functions... ")
if isempty(min_array) || isempty(max_array) || isempty(n_array) # If any of them are empty...
# Use filepaths_W[1] to determine dimensionality
verbose && println("")
verbose && println("min_array, max_array or n_array was not specified (empty). Determining problem dimensionality via the first weight function matrix in filepaths_W... ")
if !isfile(filepaths_W[1])
error("$(filepaths_W[1]) is not a valid filepath. Please correct and re-try.")
end
file_ext = lowercase(split(filepaths_W[1],".")[end]) # Assume final part of filepath after "." is the file extension
if file_ext=="jld2"
myfile = jldopen(filepaths_W[1],false,false,false,IOStream)
if !(lowercase(scriptSources_W[1])=="calc4dweights" || lowercase(scriptSources_W[1])=="calc4dweights.jl")
W = myfile["W"] # The W file key should be present in all types of weight function files, except...
DIM = length(size(W))
else # When we are loading a calc4DWeights.jl output file. Then, we know the dimension to be 5 (one measurement dimension, plus 4 reconstruction space dimensions)
DIM = 5
end
close(myfile)
elseif file_ext=="hdf5" || file_ext=="h5"
myfile = h5open(filepaths_W[1],"r")
W = read(myfile["W"])
DIM = length(size(W))
close(myfile)
else
error("$(filepaths_W[1]) was not in .jld2, .hdf5 or .h5 file format. Please correct and re-try.")
end
else # If none of them are empty...
DIM = 1+length(n_array) # Simply use n_array. One measurement dimension, plus all reconstruction space dimensions
end
global W_inflated; W_inflated = Vector{Array{Float64,DIM}}(undef,length(filepaths_W)) # The (inflated) weight functions, for each diagnostic. PLEASE NOTE! All weight functions need to have the same number of dimensions!
global W_abscissas; W_abscissas = Vector{Vector{Vector{Real}}}(undef,length(filepaths_W)) # The abscissas (measurement bin centers + phase-space grid points), for each set of weight functions
global W_abscissas_units; W_abscissas_units = Vector{Vector{String}}(undef,length(filepaths_W)) # The units of measurement, for each abscissa, for each set of weight functions
for (i,f) in enumerate(filepaths_W)
local myfile # Declare local scope. Just for clarity
f_suffix = lowercase(split(f,".")[end]) # Check file extension for .jld2 or .hdf5 (.h5)
if f_suffix=="jld2"
myfile = jldopen(f,false,false,false,IOStream)
read_func = x -> x # Identity function. For .jld2 files
elseif f_suffix=="h5" || f_suffix=="hdf5"
myfile = h5open(f,"r")
read_func = read # HDF5 read function. For .hdf5 (.h5) files
else
error("Weight function files should be in either .jld2 or .hdf5 (.h5) file format. Weight function file "*f*"is not. Please correct and re-try.")
end
sswi = lowercase(scriptSources_W[i])
if sswi=="calc2dweights" || sswi=="calc2dweights.jl" # Account for misinterpretation by user (include .jl file extension)
if !haskey(myfile,"W")
error("In the filepaths_W input array, the $(f) file does not have a 'W' file key, even though '$(scriptSources_W[i])' was specified in the scriptSources_W input array. Please correct and re-try.")
end
Ed_array = read_func(myfile["Ed_array"])
Ed_array_units = read_func(myfile["Ed_array_units"])
if lowercase(coordinate_system)=="(vpara,vperp)"
W_inflated[i] = read_func(myfile["W_vel"])
D1_array = read_func(myfile["vpara_array"])
D1_array_units = "m_s^-1" # from calc2DWeights.jl, the vpara grid points will always be in m_s^-1
D2_array = read_func(myfile["vperp_array"])
D2_array_units = "m_s^-1" # from calc2DWeights.jl, the vperp grid points will always be in m_s^-1
elseif lowercase(coordinate_system)=="(e,p)"
W_inflated[i] = read_func(myfile["W"])
D1_array = read_func(myfile["E_array"])
D1_array_units = "keV" # from calc2DWeights.jl, the energy grid points will always be in keV
D2_array = read_func(myfile["p_array"])
D2_array_units = "-" # from calc2DWeights.jl, the pitch grid points will always be dimensionless
else
@warn "Element number $(i) of input variable Vector 'scriptSources_W' was specified as $(scriptSources_W[i]), but input variable 'coordinate_system' was incorrectly specified ($(coordinate_system)). Therefore, a coordinate system of (E,p) will be automatically assumed."
W_inflated[i] = read_func(myfile["W"])
D1_array = read_func(myfile["E_array"])
D1_array_units = "keV" # from calc2DWeights.jl, the energy grid points will always be in keV
D2_array = read_func(myfile["p_array"])
D2_array_units = "-" # from calc2DWeights.jl, the pitch grid points will always be dimensionless
end
close(myfile)
W_abscissas[i] = [Ed_array, D1_array, D2_array]
W_abscissas_units[i] = [Ed_array_units, D1_array_units, D2_array_units]
elseif sswi=="orbweights_2dto4d" || sswi=="orbweights_2dto4d.jl" || sswi=="calcorbweights" || sswi=="calcorbweights.jl" # Account for misinterpretation by user (include .jl file extension). PLEASE NOTE! THESE FILES SHOULD REALLY BE OUTPUT FILES FROM THE orbWeights_2Dto4D.jl SCRIPT. THE CODE BELOW JUST TAKES INTO ACCOUNT THAT USERS MIGHT BE CONFUSED
if haskey(myfile,"W2D")
error("In the filepaths_W input array, the $(f) file is an output file of the calcOrbWeights.jl script. This is not a file that you can specify as input to the solveInverseProblem.jl script. Even though one might think so. Please specify an output file of the OWCF/helper/orbWeights_2Dto4D.jl script instead.")
end
if !haskey(myfile,"W")
error("In the filepaths_W input array, the $(f) file does not have a 'W' file key, even though '$(scriptSources_W[i])' was specified in the scriptSources_W input array. Please correct and re-try.")
end
W_inflated[i] = read_func(myfile["W"])
Ed_array = read_func(myfile["Ed_array"])
Ed_array_units = read_func(myfile["Ed_array_units"])
E_array = read_func(myfile["E_array"])
E_array_units = "keV" # from calcOrbWeights.jl, the energy grid points will always be in keV
pm_array = read_func(myfile["pm_array"])
pm_array_units = "-" # from calcOrbWeights.jl, the pitch maximum grid points will always be dimensionless
Rm_array = read_func(myfile["Rm_array"])
Rm_array_units = "m" # from calcOrbWeights.jl, the major radius maximum grid points will always be in meters
close(myfile)
W_abscissas[i] = [Ed_array, E_array, pm_array, Rm_array]
W_abscissas_units[i] = [Ed_array_units, E_array_units, pm_array_units, Rm_array_units]
elseif sswi=="calc4dweights" || sswi=="calc4dweights.jl"
# Due to its size, the calc4DWeights.jl case needs special treatment
# This code can most likely only be run on clusters with a HUGE amount of RAM
VAL = read_func(myfile["VAL"]) # Vector containing the non-zero values of the (E,p,R,z) weight matrix
ROW = read_func(myfile["ROW"]) # Vector containing the corresponding row elements
COL = read_func(myfile["COL"]) # Vector containing the corresponding column elements
m_W = read_func(myfile["m_W"]) # Total number of rows (including zero elements not included in R and C) of the (E,p,R,z) weight matrix
n_W = read_func(myfile["n_W"]) # Total number of columns (including zero elements not included in R and C) of the (E,p,R,z) weight matrix
Ed_array = read_func(myfile["Ed_array"])
Ed_array_units = read_func(myfile["Ed_array_units"])
E_array = read_func(myfile["E_array"])
E_array_units = "keV" # from calc4DWeights.jl, the energy grid points will always be in keV
p_array = read_func(myfile["p_array"])
p_array_units = "-" # from calc4DWeights.jl, the pitch grid points will always be dimensionless
R_array = read_func(myfile["R_array"])
R_array_units = "m" # from calc4DWeights.jl, the major radius grid points will always be in meters
z_array = read_func(myfile["z_array"])
z_array_units = "m" # from calc4DWeights.jl, the vertical coordinate grid points will always be in meters
EpRz_coords = read_func(myfile["EpRz_coords"]) # The indices and (E,p,R,z) coordinates for all the columns of the (E,p,R,z) weight matrix (W_2D, see below)
close(myfile)
W_2D = dropzeros(sparse(append!(ROW,m_W),append!(COL,n_W),append!(VAL,0.0)))
W_5D = zeros(length(Ed_array),length(E_array),length(p_array),length(R_array),length(z_array))
verbose && println("Re-creating the 5D (Ed,E,p,R,z) weight function matrix... ")
for iEd in 1:size(W_2D,1)
for (i,c) in enumerate(EpRz_coords[:])
W_5D[iEd,c[1][1],c[2][1],c[3][1],c[4][1]] = W_2D[iEd,i]
end
end
W_inflated[i] = W_5D
W_abscissas[i] = [Ed_array, E_array, p_array, R_array, z_array]
W_abscissas_units[i] = [Ed_array_units, E_array_units, p_array_units, R_array_units, z_array_units]
else
# The general weight functions loading case
W_inflated[i] = read_func(myfile["W"])
abscissas = []
units = []
append!(abscissas,[read_func(myfile["Ed_array"])]) # Must have 'Ed_array' file key
append!(units,[read_func(myfile["Ed_array_units"])]) # Must have 'Ed_array_units' file key
append!(abscissas,[read_func(myfile["D1_array"])]) # Must have at least 'D1_array' file key
append!(units,[read_func(myfile["D1_array_units"])]) # Must have at least 'D1_array_units' file key
if haskey(myfile,"D2_array")
append!(abscissas,[read_func(myfile["D2_array"])])
append!(units,[read_func(myfile["D2_array_units"])])
end
if haskey(myfile,"D3_array")
append!(abscissas,[read_func(myfile["D3_array"])])
append!(units,[read_func(myfile["D3_array_units"])])
end
if haskey(myfile,"D4_array")
append!(abscissas,[read_func(myfile["D4_array"])])
append!(units,[read_func(myfile["D4_array_units"])])
end
if haskey(myfile,"D5_array")
append!(abscissas,[read_func(myfile["D5_array"])])
append!(units,[read_func(myfile["D5_array_units"])])
end
if haskey(myfile,"D6_array")
append!(abscissas,[read_func(myfile["D6_array"])])
append!(units,[read_func(myfile["D6_array_units"])])
end
close(myfile)
W_abscissas[i] = [Float64.(ab) for ab in abscissas] # Transform to Vector{Float64} from Vector{Any}
W_abscissas_units[i] = map(x-> "$(x)",units) # Transform to Vector{String} from Vector{Any}
end
end
verbose && println("Done!")
append!(timestamps,time()) # The timestamp when the weight functions have been loaded completely
dictionary_of_sections[prnt-1] = ("Loading weight functions",diff(timestamps)[end])
###########################################################################################################
# SECTION: CHECK THAT ALL ABSCISSAS MATCH IN TERMS OF DIMENSIONS AND UNITS.
# OTHERWISE, CORRECT THEM SO THAT ALL MATCH.
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
verbose && println("Performing dimension and units checks for the weight functions... ")
for i in eachindex(filepaths_W)
if !(length(W_abscissas[i])==length(W_abscissas[1]))
error("Number of abscissas found in $(filepaths_W[1]): $(length(W_abscissas[1])). Number of abscissas found in $(filepaths_W[i]): $(length(W_abscissas[i])). Number of abscissas must match for all weight functions files. Please correct and re-try.")
end
# Check if the units of the measurement bin centers of the weight functions match the units of the measurement bin centers of the signal
# If not, convert both the abscissa units and weight functions units (since weight functions have units signal/ion)
if !(W_abscissas_units[i][1]==S_abscissas_units[i])
verbose && println("---> Units of abscissa (measurement bin centers) of $(filepaths_S[i]) is $(S_abscissas_units[i]).")
verbose && println("---> Units of FIRST abscissa (measurement bin centers) of $(filepaths_W[i]) is $(W_abscissas_units[i][1]).")
verbose && println("------> Converting weight function and its first abscissa to match units of signal abscissa... ")
ucf = units_conversion_factor(W_abscissas_units[i][1],S_abscissas_units[i])
W_inflated[i] = (1/ucf) .*W_inflated[i] # Since weight functions have units signal/ion and the signal will have 1/units of the abscissa, multiply by the inverse of the units conversion factor
W_abscissas[i][1] = ucf .*W_abscissas[i][1]
W_abscissas_units[i][1] = S_abscissas_units[i]
end
# Check that the units of the reconstruction space abscissas of all weight functions match
if i>1 # Only compare with previous file if i>1. Otherwise, will cause out-of-bounds error
for j in eachindex(W_abscissas[i]) # For each reconstruction space dimension (j>1)
if (j>1) && !(W_abscissas_units[i][j]==W_abscissas_units[i-1][j]) # If the units of the same dimension for different weight function files don't match (except for the measurement bin center units of course (they will almost always differ), hence the j>1 condition)...
verbose && println("Units of abscissa $(j) in $(filepaths_W[i]) and $(filepaths_W[i-1]) do not match. Converting abscissa $(j) in $(filepaths_W[i]) to match the units of abscissa $(j) in $(filepaths_W[i-1])... ")
ucf = units_conversion_factor(W_abscissas_units[i][j],W_abscissas_units[i-1][j])
W_abscissas[i][j] = ucf .*W_abscissas[i][j]
W_abscissas_units[i][j] = W_abscissas_units[i][j]
end
end
end
end
append!(timestamps,time()) # The timestamp when the abscissas units checks have been performed
dictionary_of_sections[prnt-1] = ("Performing abscissa units checks",diff(timestamps)[end])
###########################################################################################################
# SECTION: INTERPOLATE ONTO THE GRID SPECIFIED BY min_array, max_array and n_array
# IF THEY ARE NOT SPECIFIED, USE THE ABSCISSAS OF THE FIRST WEIGHT MATRIX IN filepaths_W
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
verbose && println("Attempting to interpolate all weight functions onto a common reconstruction space grid... ")
verbose && println("---> Creating interpolation grid (query points)")
global query_vecs_n_inds; query_vecs_n_inds = () # A tuple to hold all query point vectors and their indices. Structure: ((vector,indices),(vector,indices),...) with length equal to reconstruction grid dimension
if isempty(min_array) || isempty(max_array) || isempty(n_array) # If any of them are empty...
verbose && println("------> min_array, max_array and/or n_array were not specified. Data grid of $(filepaths_W[1]) will be used")
for abscissa in W_abscissas[1][2:end] # Use all reconstruction space abscissas of the first weight function file as reconstruction grid...
global query_vecs_n_inds # Use global scope variable
query_vecs_n_inds = tuple(query_vecs_n_inds[:]...,collect(zip(abscissa,1:length(abscissa)))) # Add the (vector,indices) pairs one by one into a big tuple (tuples are immutable, hence the cumbersome code)
end
else
verbose && println("------> min_array, max_array and n_array were specified. Utilizing... ")
for i in eachindex(n_array) # For all reconstruction grid dimensions...
global query_vecs_n_inds # Use global scope variable
query_vecs_n_inds = tuple(query_vecs_n_inds[:]...,collect(zip(collect(range(min_array[i],stop=max_array[i],length=n_array[i])),1:n_array[i]))) # Add the (vector,indices) pairs one by one into a big tuple (tuples are immutable, hence the cumbersome code)
end
end
query_points_n_coords = Iterators.product(query_vecs_n_inds...) # Create a long list of all reconstruction space grid points and their coordinates by computing a product between all query point-index vectors. Example structure (if 3 dimensions): [((x1_1,1),(x2_1,1),(x3_1,1)),((x1_2,2),(x2_1,1),(x3_1,1)),...]
for (i,w_inflated) in enumerate(W_inflated)
w_inflated_interpolated = zeros(tuple(size(w_inflated,1),(length.(query_vecs_n_inds))...)) # Pre-allocate a new inflated weight function, to store the interpolated values
nodess = () # The nodes of the interpolation object (nodess name only due to Scope warnings)
for abscissa in W_abscissas[i][2:end] # Use the reconstruction space abscissas of the weight functions as the nodes of the interpolation object...
nodess = tuple(nodess[:]...,abscissa) # Put them all in a tuple (tuples are immutable, hence the cumbersome code)
end
node_coords = CartesianIndices(length.(nodess)) # A trick to specify all node coordinates of an array of general size
verbose && println("---> Interpolating weight matrix $(i) of $(length(W_inflated))... ")
for j=1:size(w_inflated,1)
local itp; itp = Interpolations.interpolate(nodess,w_inflated[j,node_coords],Gridded(Linear()))
local etp; etp = Interpolations.extrapolate(itp,Interpolations.Flat()) # If outside of interpolation region, use edge values to extrapolate
for query_point_n_coord in query_points_n_coords
point = map(x-> x[1],query_point_n_coord) # The point to interpolate at. E.g. (100.0,0.3) in energy (keV),pitch
coord = map(x-> x[2],query_point_n_coord) # The coordinate of that point. E.g. (53,14)
w_inflated_interpolated[j,coord...] = etp(point...)
end
end
W_inflated[i] = w_inflated_interpolated # Replace the non-interpolated with the interpolated
W_abscissas[i] = [W_abscissas[i][1], map(x-> map(xx-> xx[1],x), query_vecs_n_inds)...] # Replace the original grid points with the query points
end
append!(timestamps,time()) # The timestamp when the weight functions have been interpolated onto a common reconstruction space grid
dictionary_of_sections[prnt-1] = ("Interpolating weight functions onto common reconstruction space grid",diff(timestamps)[end])
###########################################################################################################
# SECTION: RESHAPE ALL INFLATED WEIGHT MATRICES INTO THEIR 2D SHAPE, TO BE USED IN INVERSE PROBLEMS
# THEN, INTERPOLATE EACH WEIGHT MATRIX W[i] TO MATCH THE MEASUREMENT BIN CENTER GRID OF THE CORRESPONDING SIGNAL S[i]
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
verbose && println("For each diagnostic, restructuring weight functions into 2D matrix... ")
global W; W = Vector{Array{Float64,2}}(undef,length(filepaths_W)) # The weight matrices, for each diagnostic
for (i,w_inflated) in enumerate(W_inflated)
verbose && println("---> Restructuring weight functions, creating weight matrix $(i) of $(length(W_inflated))... ")
ws = size(w_inflated)
W[i] = reshape(w_inflated,(ws[1],reduce(*,ws[2:end])))
end
W_inflated = nothing # Clear memory. To minimize memory usage
verbose && println("For each diagnostic, interpolating the weight matrix to match measurement data grid... ")
for (i,w) in enumerate(W)
verbose && println("---> Interpolating weight matrix $(i) of $(length(W))... ")
verbose && println("------> extrema(S abscissa grid): $(extrema(S_abscissas[i])) $(S_abscissas_units[i])")
verbose && println("------> extrema(W abscissa grid): $(extrema(W_abscissas[i][1])) $(W_abscissas_units[i][1])")
if !(W_abscissas_units[i][1]==S_abscissas_units[i])
@warn "Units for measurement data grid ($(S_abscissas_units[i])) do not match units of weight matrix measurement bin centers ($(W_abscissas_units[i][1])). This should be impossible. Please contact the admins of the OWCF and attach the start file, to have the error investigated."
end
query_points = S_abscissas[i]
w_new = zeros(length(query_points),size(w,2))
for (ic,col) in enumerate(eachcol(w))
local nodes; nodes = (W_abscissas[i][1],)
local itp; itp = Interpolations.interpolate(nodes,col,Gridded(Linear()))
local etp; etp = Interpolations.extrapolate(itp,0.0) # Assume zero sensitivity (no knowledge) outside of known spectrum range
w_new[:,ic] = [etp(qp) for qp in query_points]
end
W[i] = w_new
W_abscissas[i][1] = query_points
W_abscissas_units[i][1] = S_abscissas_units[i] # This should already be the case
end
append!(timestamps,time()) # The timestamp when the weight functions have been reshaped into 2D and interpolated onto the corresponding measurements grid
dictionary_of_sections[prnt-1] = ("Constructing 2D weight matrices from weight functions, and interpolating to match diagnostics measurement bin centers",diff(timestamps)[end])
###########################################################################################################
# SECTION: IF rescale_W WAS SET TO true IN THE START FILE, LOAD OR COMPUTE FAST-ION DISTRIBUTION(S).
# USE THIS/THESE DISTRIBUTION(S) TOGETHER WITH THE WEIGHT MATRICES TO COMPUTE REFERENCE MEASUREMENTS.
# COMPUTE RESCALING FACTORS TO BE ABLE TO RESCALE WEIGHT FUNCTIONS TO HAVE THE REFERENCE MEASUREMENTS MATCH
# THE EXPERIMENTAL MEASUREMENTS.
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
if rescale_W
verbose && println("Computing rescale factors for weight functions... ")
if lowercase(String(rescale_W_type))=="mean"
verbose && println("---> mean(S)/mean(W*F_ref) will be used to rescale weight functions... ")
rescale_func = Statistics.mean # Set the mean() function as the rescale_func() function
elseif lowercase(String(rescale_W_type))=="maximum" || lowercase(String(rescale_W_type))=="max"
verbose && println("---> max(S)/max(W*F_ref) will be used to rescale weight functions... ")
rescale_func = Base.maximum # Set the maximum() function as the rescale_func() function
else
error("rescale_W_type=$(rescale_W_type). Currently supported options include :MEAN and :MAXIMUM. Please correct and re-try.")
end
if lowercase(String(rescale_W_F_ref_source))=="file"
currently_supported_abscissas = "(E,p), (vpara, vperp), (E,p,R,z)" # UPDATE THIS WHEN NEEDED!
standard_rescale_W_error = "rescale_W_F_ref_source=$(rescale_W_F_ref_source) was specified, but weight function abscissas did not match any currently supported groups. Currently supported abscissas for weight functions include $(currently_supported_abscissas). Please change rescale_W_F_ref_source to :GAUSSIAN, or specify filepaths_W to be filepath(s) to file(s) containing weight functions with supported abscissas."
w_abscissas_units = W_abscissas_units[1][2:end] # Units of reconstruction space (2:end) abscissas of first weight function matrix are the units of the abscissas of all weight functions matrices, because of section 5
w_abscissas = W_abscissas[1][2:end] # Reconstruction space (2:end) abscissas of first weight function matrix are the abscissas of all weight functions matrices, because of section 5
verbose && println("---> rescale_W_F_ref_source=$(rescale_W_F_ref_source) was specified... ")
verbose && println("------> Checking weight function reconstruction space compatibility. Currently supported coordinate systems are $(currently_supported_abscissas)... ")
Ep_bool = is_energy_pitch(w_abscissas, w_abscissas_units)
VEL_bool = is_vpara_vperp(w_abscissas, w_abscissas_units)
EpRz_bool = is_EpRz(w_abscissas, w_abscissas_units)
# ADD MORE CHECKS HERE IN FUTURE VERSIONS, IF NEEDED <---------------------------------------------------------------------
if Ep_bool
w_rec_space = "(E,p)"
dummy_bool, w_energy_ind, w_pitch_ind = is_energy_pitch(w_abscissas, w_abscissas_units; returnExtra=true)
R_of_interests = vcat(units_conversion_factor(R_of_interest_units,"m")*R_of_interest) # R_of_interests will be the same, regardless of 2D coordinates
z_of_interests = vcat(units_conversion_factor(z_of_interest_units,"m")*z_of_interest) # z_of_interests will be the same, regardless of 2D coordinates
elseif VEL_bool
w_rec_space = "(vpara,vperp)"
dummy_bool, w_vpara_ind, w_vperp_ind = is_vpara_vperp(w_abscissas, w_abscissas_units; returnExtra=true)
R_of_interests = vcat(units_conversion_factor(R_of_interest_units,"m")*R_of_interest) # R_of_interests will be the same, regardless of 2D coordinates
z_of_interests = vcat(units_conversion_factor(z_of_interest_units,"m")*z_of_interest) # z_of_interests will be the same, regardless of 2D coordinates
elseif EpRz_bool
w_rec_space = "(E,p,R,z)"
dummy_bool, w_energy_ind, w_pitch_ind, w_R_ind, w_z_ind, R_of_interests, z_of_interests = is_EpRz(w_abscissas, w_abscissas_units; returnExtra=true)
R_of_interests = units_conversion_factor(w_abscissas_units[w_R_ind],"m") .*R_of_interests # Need unit of measurement meter for CDFto4D function (if that will be used)
z_of_interests = units_conversion_factor(w_abscissas_units[w_z_ind],"m") .*z_of_interests # Need unit of measurement meter for CDFto4D function (if that will be used)
else
error(standard_rescale_W_error)
end
verbose && println("------> Loading F_ref from file $(rescale_W_F_file_path)... ")
file_ext = lowercase(split(rescale_W_F_file_path,".")[end]) # Assume final part of filepath after "." is the file extension
if file_ext=="jld2"
F_ref, E_ref, p_ref, R_ref, z_ref = JLD2to4D(rescale_W_F_file_path)
elseif file_ext=="hdf5" || file_ext=="h5"
F_ref, E_ref, p_ref, R_ref, z_ref = h5to4D(rescale_W_F_file_path;rowmajor=h5_is_rowmajor)
elseif file_ext=="cdf"
F_ref, E_ref, p_ref, R_ref, z_ref = CDFto4D(rescale_W_F_file_path, R_of_interests, z_of_interests; btipsign=btipsign)
else
error("Input variable 'rescale_W_F_file_path' has unknown file extension ($(file_ext)). Accepted file extensions are .jld2, .hdf5, .h5 and .cdf. Please correct and re-try.")
end
if maximum(R_ref)>100.0 # Assume no tokamak has a major radius as large as 100 m...
verbose && println("------> Loaded F_ref from file $(rescale_W_F_file_path) is assumed to be given in cm^-3. Converting to m^-3... ")
R_ref = units_conversion_factor("cm","m") .* R_ref
z_ref = units_conversion_factor("cm","m") .* z_ref
F_ref = units_conversion_factor("cm^-3","m^-3") .* F_ref
end
if maximum(E_ref)>1.0e6 # Assume no tokamak has a fast-ion distribution with a maximum energy as great as 1.0e6 keV
verbose && println("------> Loaded F_ref from file $(rescale_W_F_file_path) is assumed to be given in eV^-1. Converting to keV^-1... ")
E_ref = units_conversion_factor("eV","keV") .* E_ref
F_ref = units_conversion_factor("eV^-1","keV^-1") .* F_ref
end
if w_rec_space=="(E,p)"
verbose && println("------> Computing W*F_ref assuming weight functions are given in (E,p) coordinates (or equivalent)... ")
w_energy_units = w_abscissas_units[w_energy_ind]
w_pitch_units = w_abscissas_units[w_pitch_ind]
w_energy_abscissa = w_abscissas[w_energy_ind]; nwE = length(w_energy_abscissa)
w_pitch_abscissa = w_abscissas[w_pitch_ind]; nwp = length(w_pitch_abscissa)
query_points_n_coords = Iterators.product(zip(w_energy_abscissa,1:nwE),zip(w_pitch_abscissa,1:nwp),zip(R_of_interests,1:1),zip(z_of_interests,1:1)) # R and z should already be 1-element Vectors with unit of measurement 'm'
F_ref_interpolated = zeros(nwE,nwp,1,1) # Pre-allocate the interpolated f(E,p) distribution
E_ref = units_conversion_factor("keV",w_energy_units) .*E_ref # Convert the energy units from keV (which we know the JLD2to4D, h5to4D and CDFto4D functions output) to w_energy_units
F_ref = units_conversion_factor("keV^-1",units_inverse(w_energy_units)) .*F_ref # Also, convert the fast-ion distribution using the inverse of the w_energy_units
nodes = (E_ref,p_ref,R_ref,z_ref)
itp = Interpolations.interpolate(nodes,F_ref,Gridded(Linear()))
etp = Interpolations.extrapolate(itp,Interpolations.Flat()) # If outside of interpolation region, use edge values to extrapolate
for query_point_n_coord in query_points_n_coords
point = map(x-> x[1],query_point_n_coord) # The point to interpolate at. E.g. (100.0,0.3) in energy (keV),pitch
coord = map(x-> x[2],query_point_n_coord) # The coordinate of that point. E.g. (53,14)
F_ref_interpolated[coord...] = etp(point...)
end
F_ref_interpolated = dropdims(F_ref_interpolated,dims=(3,4)) # From shape (nwE,nwp,1,1) to (nwE,nwp)
F_ref_interp_1D = reshape(F_ref_interpolated,(nwE*nwp,))
rescale_W_factors = ones(length(filepaths_W)) # Pre-allocate weight re-scale factors
for i in eachindex(filepaths_W)
WF = W[i]*F_ref_interp_1D # The weight function matrix (2D) multiplied with the (1D/vectorized) reference fast-ion distribution
rescale_W_factors[i] = rescale_func(S[i])/rescale_func(WF) # S[i] is the experimental data vector of (fast-ion) diagnostic 'i'
end
elseif w_rec_space=="(vpara,vperp)"
verbose && println("------> Using reference fast-ion distribution assuming weight functions are given in (vpara,vperp) coordinates (or equivalent)... ")
w_vpara_unit = w_abscissas_units[w_vpara_ind]
w_vperp_unit = w_abscissas_units[w_vperp_ind]
w_vpara_abscissa = w_abscissas[w_vpara_ind]; nwvpa = length(w_vpara_abscissa)
w_vperp_abscissa = w_abscissas[w_vperp_ind]; nwvpe = length(w_vpara_abscissa)
nfE = length(E_ref); nfp = length(p_ref)
nodes = (E_ref,p_ref,R_ref,z_ref)
itp = Interpolations.interpolate(nodes,F_ref,Gridded(Linear()))
etp = Interpolations.extrapolate(itp,Interpolations.Flat()) # If outside of interpolation region, use edge values to extrapolate
query_points_n_coords = Iterators.product(zip(E_ref,1:nfE),zip(p_ref,1:nfp),zip(R_of_interests,1:1),zip(z_of_interests,1:1))
F_ref_interpolated = zeros(nfE,nfp,1,1) # Pre-allocate the interpolated f(E,p) distribution
for query_point_n_coord in query_points_n_coords
point = map(x-> x[1],query_point_n_coord) # The point to interpolate at. E.g. (100.0,0.3) in energy (keV),pitch
coord = map(x-> x[2],query_point_n_coord) # The coordinate of that point. E.g. (53,14)
F_ref_interpolated[coord...] = etp(point...)
end
F_ref_interpolated = dropdims(F_ref_interpolated,dims=(3,4)) # From shape (nfE,nfp,1,1) to (nfE,nfp)
F_ref_VEL, vpara_ref, vperp_ref = Ep2VparaVperp(E_ref, p_ref, F_ref_interpolated; my_gcp=FI_species, needJac=true, returnAbscissas=true)
vpara_ref = units_conversion_factor("m_s^-1",w_vpara_unit) .*vpara_ref # Match the units of w_abscissas
vperp_ref = units_conversion_factor("m_s^-1",w_vperp_unit) .*vperp_ref # Match the units of w_abscissas
F_ref_VEL = units_conversion_factor("m^-2_s^2",units_inverse(w_vpara_unit)*"_"*units_inverse(w_vperp_unit)) .* F_ref_VEL
nodes = (vpara_ref,vperp_ref)
itp = Interpolations.interpolate(nodes,F_ref_VEL,Gridded(Linear()))
etp = Interpolations.extrapolate(itp,Interpolations.Flat())
query_points_n_coords = Iterators.product(zip(w_vpara_abscissa,1:nwvpa),zip(w_vperp_abscissa,1:nwvpe))
F_ref_VEL_interp = zeros(nwvpa,nwvpe)
for query_point_n_coord in query_points_n_coords
point = map(x-> x[1],query_point_n_coord) # The point to interpolate at. E.g. (-0.6e6,0.2e6) in vpara (m/s), vperp (m/s)
coord = map(x-> x[2],query_point_n_coord) # The coordinate of that point. E.g. (53,14)
F_ref_VEL_interp[coord] = etp(point...)
end
F_ref_VEL_interp_1D = reshape(F_ref_VEL_interp,(nwvpa*nwvpe,))
rescale_W_factors = ones(length(filepaths_W))
for i in eachindex(filepaths_W)
WF = W[i]*F_ref_VEL_interp_1D # The weight function matrix (2D) multiplied with the (1D/vectorized) reference fast-ion distribution
rescale_W_factors[i] = rescale_func(S[i])/rescale_func(WF) # S[i] is the experimental data vector of (fast-ion) diagnostic 'i'
end
elseif w_rec_space=="(E,p,R,z)"
verbose && println("------> Using reference fast-ion distribution assuming weight functions are given in (E,p,R,z) coordinates (or equivalent)... ")
w_energy_units = w_abscissas_units[w_energy_ind]
w_pitch_units = w_abscissas_units[w_pitch_ind]
w_R_units = w_abscissas_units[w_R_ind]
w_z_units = w_abscissas_units[w_z_ind]
w_energy_abscissa = w_abscissas[w_energy_ind]; nwE = length(w_energy_abscissa)
w_pitch_abscissa = w_abscissas[w_pitch_ind]; nwp = length(w_pitch_abscissa)
w_R_abscissa = w_abscissas[w_R_ind]; nwR = length(w_R_abscissa)
w_z_abscissa = w_abscissas[w_z_ind]; nwz = length(w_z_abscissa)
E_ref = units_conversion_factor("keV",w_energy_units) .*E_ref # Convert the energy units from keV (which we know the JLD2to4D, h5to4D and CDFto4D functions output) to w_energy_units
R_ref = units_conversion_factor("m",w_R_units) .*R_ref # Convert the R units from m (which we know the JLD2to4D, h5to4D and CDFto4D functions output) to w_R_units
z_ref = units_conversion_factor("m",w_z_units) .*z_ref # Convert the z units from m (which we know the JLD2to4D, h5to4D and CDFto4D functions output) to w_z_units
F_ref = units_conversion_factor("keV^-1_m^-3",units_inverse(w_energy_units)*"_"*units_inverse(w_R_units)*"_"*units_inverse(w_z_units)) .*F_ref # Also, convert the fast-ion distribution from keV^-1_m^-3 to the inverse of w_energy_units, w_R_units and w_z_units
query_points_n_coords = Iterators.product(zip(w_energy_abscissa,1:nwE),zip(w_pitch_abscissa,1:nwp),zip(w_R_abscissa,1:nwR),zip(w_z_abscissa,1:nwz))
F_ref_interpolated = zeros(nwE,nwp,nwR,nwz) # Pre-allocate the interpolated f(E,p,R,z) distribution
nodes=(E_ref,p_ref,R_ref,z_ref)
itp = Interpolations.interpolate(nodes,F_ref,Gridded(Linear()))
etp = Interpolations.extrapolate(itp,Interpolations.Flat())
for query_point_n_coord in query_points_n_coords
point = map(x-> x[1],query_point_n_coord) # The point to interpolate at. E.g. (120.0,0.64,3.1,0.2) in E [keV], p [-], R [m], z [m]
coord = map(x-> x[2],query_point_n_coord) # The coordinate of that point. E.g. (53,34,15,16)
F_ref_interpolated[coord...] = etp(point...)
end
F_ref_interp_1D = reshape(F_ref_interpolated,(nwE*nwp*nwR*nwz,))
rescale_W_factors = zeros(length(filepaths_W))
for i in eachindex(filepaths_W)
WF = W[i]*F_ref_interp_1D # The weight function matrix (2D) multiplied with the (1D/vectorized) reference fast-ion distribution
rescale_W_factors[i] = rescale_func(S[i])/rescale_func(WF) # S[i] is the experimental data vector of (fast-ion) diagnostic 'i'
end
elseif false # ADD MORE CHECKS HERE IN FUTURE VERSIONS, IF NEEDED <---------------------------------------------------------------------
else
error(standard_rescale_W_error)
end
elseif lowercase(String(rescale_W_F_ref_source))=="gaussian"
w_abscissas_units = W_abscissas_units[1] # Units of abscissas of first weight function matrix are the units of the abscissas of all weight functions matrices, because of section 5
w_abscissas = W_abscissas[1] # Abscissas of first weight function matrix are the abscissas of all weight functions matrices, because of section 5
verbose && println("---> rescale_W_F_ref_source=$(rescale_W_F_ref_source) was specified... ")
verbose && println("---> The average of the WF-products for $(5*10^(length(w_abscissas)-1)) Gaussian reference FI distributions will be used to rescale the weight functions...")
rec_space_lengths = length.(w_abscissas[2:end])
rec_space_diffs = [x[1] for x in diff.(w_abscissas[2:end])] # Assume equidistant grids in reconstruction space
rec_space_extrema = [[x[1],x[2]] for x in extrema.(w_abscissas[2:end])]
F_ref_abscissas = [collect(range(x[1],stop=x[2],length=10)) for x in rec_space_extrema] # Same abscissas as reconstruction space, but a 10x10x...x10 grid
std_abscissas = [collect(range(0.05*x[1],stop=0.25*x[1],length=5)) for x in diff.(rec_space_extrema)] # For each reconstruction space dimension, a set of standard deviations from 0.05 to 0.25 times the width of the dimension
gauss_abscissas = vcat(F_ref_abscissas,std_abscissas) # Put together rec space abscissas and their standard deviations
gauss_coords = Iterators.product(gauss_abscissas...) # Create an iterator of the product between all elements
rec_space_minima = [x[1] for x in rec_space_extrema]
rec_space_maxima = [x[2] for x in rec_space_extrema]
WF_avg = [zeros(size(W[i],1)) for i in 1:length(filepaths_W)] # Pre-allocate WF_avg. The number of diagnostic measurement bins for each diagnostic
for gauss_coord in gauss_coords # For all the Gaussians...
l = length(gauss_coord)
mu = [m for m in gauss_coord[1:l/2]] # The first half will be from F_ref_abscissas (Tuple to Vector)
std = [s for s in gauss_coord[(l/2)+1:end]] # The second half will be from std_abscissas (Tuple to Vector)
F_gauss = gaussian(mu, std; mn=rec_space_minima, mx=rec_space_maxima, n=rec_space_lengths, floor_level=0.001) # Set all valus below 0.001*maximum(F_gauss) to zero
F_gauss = (rescale_W_F_gaus_N_FI_tot/sum(reduce(*,rec_space_diffs) .*F_gauss)) .*F_gauss # Rescale F_gauss to integrate to rescale_W_F_gaus_N_FI_tot
F_gauss_1D = reshape(F_gauss,(reduce(*,rec_space_lengths),))
for i in 1:length(filepaths_W) # For all diagnostics...
WF_avg[i] += W[i]*F_gauss_1D # The weight function matrix (2D) multiplied with the (1D/vectorized) reference fast-ion distribution
end
end
WF_avg ./= length(gauss_coords)
rescale_W_factors = [rescale_func(S[i])/rescale_func(wf) for (i,wf) in enumerate(WF_avg)] # WF_avg will have same length as S and W (and filepaths_S and filepaths_W)
else
error("'rescale_W' was set to true but 'rescale_W_F_ref_source' was not specified correctly. Currently supported options include :FILE and :GAUSSIAN. Please correct and re-try.")
end
else
verbose && println("Weight functions will NOT be rescaled... ")
rescale_W_factors = ones(length(filepaths_W))
end
append!(timestamps,time()) # The timestamp when the weight function rescale factors have been computed
dictionary_of_sections[prnt-1] = ("Computing weight functions rescale factors (if requested)",diff(timestamps)[end])
###########################################################################################################
# SECTION: IF :COLLISIONS WAS INCLUDED IN THE regularization INPUT VARIABLE, AND THE RECONSTRUCTION
# SPACE IS A SPACE IN WHICH COLLISIONS REGULARIZATION IS SUPPORTED, COMPUTE SLOWING-DOWN BASIS FUNCTIONS
println("------------------------------------------------ SECTION $(prnt) ------------------------------------------------"); prnt += 1
if "collisions" in lowercase.(String.(regularization))
COMPATIBLE_OPTIONS = [is_energy_pitch, is_vpara_vperp] # ADD MORE CHECKS HERE IN FUTURE VERSIONS, IF NEEDED <---------------------------------------------------------------------
verbose && print(":COLLISIONS included in 'regularization' input variable. Performing safety checks... ")
if sum([f(W_abscissas[1][2:end], W_abscissas_units[1][2:end]) for f in COMPATIBLE_OPTIONS])==0 # Is the reconstruction space NOT in any of the COMPATIBLE_OPTIONS?
error(":COLLISIONS was specified in 'regularization' input variable, but reconstruction space is not compatible. Current compatible options include (E,p) and (vpara,vperp). Please correct and re-try.")
end
if !(length(regularization_thermal_ion_temp)==length(regularization_thermal_ion_dens)==length(regularization_thermal_ion_species))
error("The lengths of the 'regularization_thermal_ion_temp', 'regularization_thermal_ion_dens' and 'regularization_thermal_ion_species' input variables were not equal. Please correct and re-try.")
end
verbose && println("ok!")
verbose && print("---> Loading magnetic equilibrium from $(regularization_equil_filepath)... ")
M, wall = nothing, nothing # Initialize global magnetic equilibrium variables
try
global M; global wall # Declare global scope
M, wall = read_geqdsk(regularization_equil_filepath,clockwise_phi=false) # Assume the phi-direction is pointing counter-clockwise when tokamak is viewed from above. This is true for almost all coordinate systems used in the field of plasma physics
catch # Otherwise, assume magnetic equilibrium is a saved .jld2 file
global M; global wall; local myfile
myfile = jldopen(regularization_equil_filepath,false,false,false,IOStream)
M = myfile["S"]
wall = myfile["wall"]
close(myfile)
end
psi_axis, psi_bdry = psi_limits(M)
verbose && println("ok!")
verbose && println("---> Computing normalized poloidal flux coordinate (rho_pol) for (R_of_interest, z_of_interest) input variables... ")
if length(W_abscissas_units[1])==3
R_of_interests = vcat(units_conversion_factor(R_of_interest_units,"m") .* R_of_interest) # Converting to meters to match Equilibrium.jl package standard
z_of_interests = vcat(units_conversion_factor(z_of_interest_units,"m") .* z_of_interest) # Converting to meters to match Equilibrium.jl package standard
rho_of_interests = vcat(sqrt((M(R_of_interest,z_of_interest)-psi_axis)/(psi_bdry-psi_axis)))
else
# CODE FUTURE CASES HERE (3D, 4D ETC)
end
verbose && println("---> Preparing thermal ion data... ")
thermal_ion_temperatures = zeros(length(regularization_thermal_ion_species),length(rho_of_interests)) # Ready for future versions with many rho_of_interest values
thermal_ion_densities = zeros(length(regularization_thermal_ion_species),length(rho_of_interests)) # Ready for future versions with many rho_of_interest values
for (i,ion) in enumerate(regularization_thermal_ion_species)
verbose && println("------> Preparing thermal ion temperature data for species $(ion)... ")
if (typeof(regularization_thermal_ion_temp[i]) <: Real)
verbose && println("---------> Found an ion temperature of $(regularization_thermal_ion_temp[i]) keV. Incorporating... ")
thermal_ion_temperatures[i,:] = repeat(vcat(regularization_thermal_ion_temp[i]),length(rho_of_interests))
elseif (typeof(regularization_thermal_ion_temp[i]) <: String)
if !isfile(regularization_thermal_ion_temp[i])
error("regularization_thermal_ion_temp element with index $(i) was not specified correctly. $(regularization_thermal_ion_temp[i]) is not a valid filepath. Please correct and re-try.")
end
local file_ext; file_ext = split(regularization_thermal_ion_temp[i],".")[end] # Assume last part of String after "." is the file extension
if lowercase(file_ext)=="jld2"
local myfile; myfile = jldopen(regularization_thermal_ion_temp[i],false,false,false,IOStream)
local rho_array; rho_array = myfile["rho_pol"]
local temp_array; temp_array = myfile["thermal_temp"]
close(myfile)
local itp; itp = Interpolations.interpolate((rho_array,),temp_array,Gridded(Linear()))
local etp; etp = Interpolations.extrapolate(itp,0.0) # Assume vacuum scrape-off layer
T_is = etp.(rho_of_interests) # T_i values at the rho_pol values of interest
verbose && println("---------> Computed ion temperature(s) of $(T_is) keV")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_ion_temp[i]) . Incorporating temperature(s)... ")
thermal_ion_temperatures[i,:] = T_is
elseif lowercase(file_ext)=="cdf"
if (typeof(regularization_timepoint) <: Real)
local t_mid; t_mid = Float64.(regularization_timepoint)
elseif (typeof(regularization_timepoint) <: String)
if !isfile(regularization_timepoint)
local t_mid; t_mid = regularization_timepoint # Assume format "XX.XXXX" or "XX,XXXX"
else
local t_mid; t_mid = (read_ncdf(regularization_timepoint,wanted_keys=["TIME"]))["TIME"] # Assume TRANSP-NUBEAM output file
end
else
error("regularization_timepoint input variable has invalid type. Expected Float64, Int64 or String. regularization_timepoint input variable needed to load ion temperature data from $(regularization_thermal_ion_temp[i]). Please correct and re-try.")
end
if typeof(t_mid)==String
t_mid = parse(Float64,replace(t_mid, "," => "."))
end
local etp; etp = getTempProfileFromTRANSP(t_mid,regularization_thermal_ion_temp[i],regularization_thermal_ion_species[i])
T_is = etp.(rho_of_interests)
verbose && println("---------> Computed ion temperature(s) of $(T_is) keV")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_ion_temp[i]) . Incorporating temperature(s)... ")
thermal_ion_temperatures[i,:] = T_is
else
error("regularization_thermal_ion_temp element with index $(i) was not specified correctly. $(regularization_thermal_ion_temp[i]) has an invalid file extension. Expected .jld2 or .cdf. Please correct and re-try.")
end
else
error("regularization_thermal_ion_temp element with index $(i) was not specified correctly. It is an invalid type $(regularization_thermal_ion_temp[i]). Please correct and re-try.")
end
verbose && println("------> Preparing thermal ion density data for species $(ion)... ")
if (typeof(regularization_thermal_ion_dens[i]) <: Real)
verbose && println("---------> Found an ion density of $(regularization_thermal_ion_dens[i]) m^-3. Incorporating... ")
thermal_ion_densities[i,:] = repeat(vcat(regularization_thermal_ion_dens[i]),length(rho_of_interests))
elseif (typeof(regularization_thermal_ion_dens[i]) <: String)
if !isfile(regularization_thermal_ion_dens[i])
error("regularization_thermal_ion_dens element with index $(i) was not specified correctly. $(regularization_thermal_ion_dens[i]) is not a valid filepath. Please correct and re-try.")
end
local file_ext; file_ext = split(regularization_thermal_ion_dens[i],".")[end] # Assume last part of String after "." is the file extension
if lowercase(file_ext)=="jld2"
local myfile; myfile = jldopen(regularization_thermal_ion_dens[i],false,false,false,IOStream)
local rho_array; rho_array = myfile["rho_pol"]
local dens_array; dens_array = myfile["thermal_dens"]
close(myfile)
local itp; itp = Interpolations.interpolate((rho_array,),dens_array,Gridded(Linear()))
local etp; etp = Interpolations.extrapolate(itp,0.0) # Assume vacuum scrape-off layer
n_is = etp.(rho_of_interests) # n_i values at the rho_pol values of interest
verbose && println("---------> Computed ion densitie(s) of $(n_is) m^-3")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_ion_dens[i]) . Incorporating densitie(s)... ")
thermal_ion_densities[i,:] = n_is
elseif lowercase(file_ext)=="cdf"
if (typeof(regularization_timepoint) <: Real)
local t_mid; t_mid = Float64.(regularization_timepoint)
elseif (typeof(regularization_timepoint) <: String)
if !isfile(regularization_timepoint)
local t_mid; t_mid = regularization_timepoint # Assume format "XX.XXXX" or "XX,XXXX"
else
local t_mid; t_mid = (read_ncdf(regularization_timepoint,wanted_keys=["TIME"]))["TIME"] # Assume TRANSP-NUBEAM output file
end
else
error("regularization_timepoint input variable has invalid type. Expected Float64, Int64 or String. regularization_timepoint input variable needed to load ion density data from $(regularization_thermal_ion_dens[i]). Please correct and re-try.")
end
if typeof(t_mid)==String
t_mid = parse(Float64,replace(t_mid, "," => "."))
end
local etp; etp = getDensProfileFromTRANSP(t_mid,regularization_thermal_ion_dens[i],regularization_thermal_ion_species[i])
n_is = etp.(rho_of_interests)
verbose && println("---------> Computed ion densities(s) of $(n_is) m^-3")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_ion_dens[i]) . Incorporating densitie(s)... ")
thermal_ion_densities[i,:] = n_is
else
error("regularization_thermal_ion_dens element with index $(i) was not specified correctly. $(regularization_thermal_ion_dens[i]) has an invalid file extension. Expected .jld2 or .cdf. Please correct and re-try.")
end
else
error("regularization_thermal_ion_dens element with index $(i) was not specified correctly. It is an invalid type $(regularization_thermal_ion_dens[i]). Please correct and re-try.")
end
end
# CODE ELECTRON TEMPERATURE AND DENSITY
verbose && println("---> Preparing thermal electron data... ")
thermal_electron_temperatures = zeros(length(rho_of_interests)) # Ready for future versions with many rho_of_interest values
thermal_electron_densities = zeros(length(rho_of_interests)) # Ready for future versions with many rho_of_interest values
verbose && println("------> Preparing thermal electron temperature data... ")
if (typeof(regularization_thermal_electron_temp) <: Real)
verbose && println("---------> Found an electron temperature of $(regularization_thermal_electron_temp) keV. Incorporating... ")
thermal_electron_temperatures = repeat(vcat(regularization_thermal_electron_temp),length(rho_of_interests))
elseif (typeof(regularization_thermal_electron_temp) <: String)
if !isfile(regularization_thermal_electron_temp)
error("regularization_thermal_electron_temp was not specified correctly. $(regularization_thermal_electron_temp) is not a valid filepath. Please correct and re-try.")
end
file_ext = split(regularization_thermal_electron_temp,".")[end] # Assume last part of String after "." is the file extension
if lowercase(file_ext)=="jld2"
myfile = jldopen(regularization_thermal_electron_temp,false,false,false,IOStream)
rho_array = myfile["rho_pol"]
temp_array = myfile["thermal_temp"]
close(myfile)
itp = Interpolations.interpolate((rho_array,),temp_array,Gridded(Linear()))
etp = Interpolations.extrapolate(itp,0.0) # Assume vacuum scrape-off layer
T_es = etp.(rho_of_interests) # T_i values at the rho_pol values of interest
verbose && println("---------> Computed electron temperature(s) of $(T_es) keV")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_electron_temp) . Incorporating temperature(s)... ")
thermal_electron_temperatures = T_es
elseif lowercase(file_ext)=="cdf"
if (typeof(regularization_timepoint) <: Real)
t_mid = Float64.(regularization_timepoint)
elseif (typeof(regularization_timepoint) <: String)
if !isfile(regularization_timepoint)
t_mid = regularization_timepoint # Assume format "XX.XXXX" or "XX,XXXX"
else
t_mid = (read_ncdf(regularization_timepoint,wanted_keys=["TIME"]))["TIME"] # Assume TRANSP-NUBEAM output file
end
else
error("regularization_timepoint input variable has invalid type. Expected Float64, Int64 or String. regularization_timepoint input variable needed to load electron temperature data from $(regularization_thermal_electron_temp). Please correct and re-try.")
end
etp = getTempProfileFromTRANSP(t_mid,regularization_thermal_electron_temp,"e")
T_es = etp.(rho_of_interests)
verbose && println("---------> Computed electron temperature(s) of $(T_es) keV")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_electron_temp) . Incorporating temperature(s)... ")
thermal_electron_temperatures = T_es
else
error("regularization_thermal_electron_temp was not specified correctly. $(regularization_thermal_electron_temp) has an invalid file extension. Expected .jld2 or .cdf. Please correct and re-try.")
end
else
error("regularization_thermal_electron_temp was not specified correctly. It is an invalid type $(regularization_thermal_electron_temp). Please correct and re-try.")
end
verbose && println("------> Preparing thermal electron density data... ")
if (typeof(regularization_thermal_electron_dens) <: Real)
verbose && println("---------> Found an electron density of $(regularization_thermal_electron_dens) m^-3. Incorporating... ")
thermal_electron_densities = repeat(vcat(regularization_thermal_electron_dens),length(rho_of_interests))
elseif (typeof(regularization_thermal_electron_dens) <: String)
if !isfile(regularization_thermal_electron_dens)
error("regularization_thermal_electron_dens was not specified correctly. $(regularization_thermal_electron_dens) is not a valid filepath. Please correct and re-try.")
end
file_ext = split(regularization_thermal_electron_dens,".")[end] # Assume last part of String after "." is the file extension
if lowercase(file_ext)=="jld2"
myfile = jldopen(regularization_thermal_electron_dens,false,false,false,IOStream)
rho_array = myfile["rho_pol"]
dens_array = myfile["thermal_dens"]
close(myfile)
itp = Interpolations.interpolate((rho_array,),dens_array,Gridded(Linear()))
etp = Interpolations.extrapolate(itp,0.0) # Assume vacuum scrape-off layer
n_es = etp.(rho_of_interests) # n_e values at the rho_pol values of interest
verbose && println("---------> Computed electron densitie(s) of $(n_es) m^-3")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_electron_dens) . Incorporating densitie(s)... ")
thermal_electron_densities = n_es
elseif lowercase(file_ext)=="cdf"
if (typeof(regularization_timepoint) <: Real)
t_mid = Float64.(regularization_timepoint)
elseif (typeof(regularization_timepoint) <: String)
if !isfile(regularization_timepoint)
t_mid = regularization_timepoint # Assume format "XX.XXXX" or "XX,XXXX"
else
t_mid = (read_ncdf(regularization_timepoint,wanted_keys=["TIME"]))["TIME"] # Assume TRANSP-NUBEAM output file
end
else
error("regularization_timepoint input variable has invalid type. Expected Float64, Int64 or String. regularization_timepoint input variable needed to load electron density data from $(regularization_thermal_electron_dens). Please correct and re-try.")
end
if typeof(t_mid)==String
t_mid = parse(Float64,replace(t_mid, "," => "."))
end
etp = getDensProfileFromTRANSP(t_mid,regularization_thermal_electron_dens,"e")
n_es = etp.(rho_of_interests)
verbose && println("---------> Computed electron densities(s) of $(n_es) m^-3")
verbose && println("---------> at ρ_{pol}=$(round.(rho_of_interests,sigdigits=4))")
verbose && println("---------> i.e. R = $(round.(R_of_interests,sigdigits=4)) meters")
verbose && println("---------> i.e. z = $(round.(z_of_interests,sigdigits=4)) meters")
verbose && println("---------> by loading $(regularization_thermal_electron_dens) . Incorporating densitie(s)... ")
thermal_electron_densities = n_es
else
error("regularization_thermal_electron_dens was not specified correctly. $(regularization_thermal_electron_dens) has an invalid file extension. Expected .jld2 or .cdf. Please correct and re-try.")
end
else
error("regularization_thermal_electron_dens was not specified correctly. It is an invalid type $(regularization_thermal_electron_dens). Please correct and re-try.")
end
verbose && print("---> Computing slowing-down (collision physics) basis functions...")
if is_energy_pitch(W_abscissas[1][2:end],W_abscissas_units[1][2:end])
n_e = thermal_electron_densities[1] # We know it must be a 1-element Vector because reconstruction space is (E,p)
T_e = thermal_electron_temperatures[1] # -||-
thermal_ion_densities_at_rho = thermal_ion_densities[:,1] # -||-
thermal_ion_temperatures_at_rho = thermal_ion_temperatures[:,1] # -||-
verbose && println("for (E,p) space with grid point extrema:")
w_E_abscissa, w_E_ind = get_energy_abscissa(W_abscissas[1][2:end],W_abscissas_units[1][2:end]; returnExtra=true) # First dimension is always the measurement bin centers
verbose && println("------> (E_min,E_max)=$(round.(extrema(w_E_abscissa),sigdigits=4)) $(W_abscissas_units[1][w_E_ind+1])") # +1 because get_energy_abscissa() returns index shifted by 1, due to not including measurement bin centers abscissa as function input
w_p_abscissa = get_pitch_abscissa(W_abscissas[1][2:end],W_abscissas_units[1][2:end]) # First dimension is always the measurement bin centers
verbose && println("------> (p_min,p_max)=$(round.(extrema(w_p_abscissa),sigdigits=4))")
dE = diff(w_E_abscissa)[1]; dp = diff(w_p_abscissa)[1] # Assume equidistant (E,p) grid
E_inj_array = w_E_abscissa[1:1:end-1] .+ diff(w_E_abscissa)[1]/2 # Assume uniform energy grid
p_inj_array = w_p_abscissa[1:1:end-1] .+ diff(w_p_abscissa)[1]/2 # Assume uniform pitch grid
verbose && println("------> Computing... ")
F_SD = zeros(length(w_E_abscissa)*length(w_p_abscissa),length(E_inj_array)*length(p_inj_array))